hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f732df8d85325a052e8c49eb6f8cf957fd1c3a01 | 26,500 | py | Python | lib/googlecloudsdk/command_lib/artifacts/docker_util.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/googlecloudsdk/command_lib/artifacts/docker_util.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/command_lib/artifacts/docker_util.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for interacting with `artifacts docker` command group."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import re
from apitools.base.py import exceptions as api_exceptions
from googlecloudsdk.api_lib.artifacts import exceptions as ar_exceptions
from googlecloudsdk.api_lib.util import common_args
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.command_lib.artifacts import containeranalysis_util as ca_util
from googlecloudsdk.command_lib.artifacts import requests as ar_requests
from googlecloudsdk.command_lib.artifacts import util as ar_util
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
from googlecloudsdk.core.console import console_io
ARTIFACTREGISTRY_API_NAME = "artifactregistry"
_INVALID_IMAGE_PATH_ERROR = """Invalid Docker string.
A valid Docker repository has the format of
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID
A valid image has the format of
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID/IMAGE
"""
_INVALID_DEFAULT_DOCKER_STRING_ERROR = (
"""Fail to construct Docker string from config values:
core/project: {project}, artifacts/location: {location}, artifacts/repository: {repo}
A valid Docker repository has the format of
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID
A valid image has the format of
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID/IMAGE
""")
_INVALID_IMAGE_ERROR = """Invalid Docker image.
A valid container image has the format of
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID/IMAGE
A valid container image that can be referenced by tag or digest, has the format of
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID/IMAGE:tag
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID/IMAGE@sha256:digest
"""
_INVALID_DOCKER_IMAGE_ERROR = """Invalid Docker image.
A valid container image can be referenced by tag or digest, has the format of
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID/IMAGE:tag
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID/IMAGE@sha256:digest
"""
_INVALID_DOCKER_TAG_ERROR = """Invalid Docker tag.
A valid Docker tag has the format of
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID/IMAGE:tag
"""
_DOCKER_IMAGE_NOT_FOUND = """Image not found.
A valid container image can be referenced by tag or digest, has the format of
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID/IMAGE:tag
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID/IMAGE@sha256:digest
"""
DOCKER_REPO_REGEX = (
r"^(?P<location>.*)-docker.pkg.dev\/(?P<project>[^\/]+)\/(?P<repo>[^\/]+)")
DOCKER_IMG_BY_TAG_REGEX = r"^.*-docker.pkg.dev\/[^\/]+\/[^\/]+\/(?P<img>.*):(?P<tag>.*)"
DOCKER_IMG_BY_DIGEST_REGEX = (
r"^.*-docker.pkg.dev\/[^\/]+\/[^\/]+\/(?P<img>.*)@(?P<digest>sha256:.*)")
DOCKER_IMG_REGEX = r"^.*-docker.pkg.dev\/[^\/]+\/[^\/]+\/(?P<img>.*)"
_VERSION_COLLECTION_NAME = "artifactregistry.projects.locations.repositories.packages.versions"
def _GetDefaultResources():
"""Gets default config values for project, location, and repository."""
project = properties.VALUES.core.project.Get()
location = properties.VALUES.artifacts.location.Get()
repo = properties.VALUES.artifacts.repository.Get()
if not project or not location or not repo:
raise ar_exceptions.InvalidInputValueError(
_INVALID_DEFAULT_DOCKER_STRING_ERROR.format(**{
"project": project,
"location": location,
"repo": repo,
}))
ar_util.ValidateLocation(location, project)
return DockerRepo(project, location, repo)
def _ParseInput(input_str):
"""Parses user input into project, location, and repository values.
Args:
input_str: str, user input. Ex: us-docker.pkg.dev/my-proj/my-repo/my-img
Raises:
ar_exceptions.InvalidInputValueError if user input is invalid.
ar_exceptions.UnsupportedLocationError if provided location is invalid.
Returns:
A DockerRepo.
"""
matches = re.match(DOCKER_REPO_REGEX, input_str)
if not matches:
raise ar_exceptions.InvalidInputValueError()
location = matches.group("location")
project_id = matches.group("project")
return DockerRepo(project_id, location, matches.group("repo"))
def ParseDockerImagePath(img_path):
"""Validates and parses an image path into a DockerImage or a DockerRepo."""
if not img_path:
return _GetDefaultResources()
resource_val_list = list(filter(None, img_path.split("/")))
try:
docker_repo = _ParseInput(img_path)
except ar_exceptions.InvalidInputValueError:
raise ar_exceptions.InvalidInputValueError(_INVALID_IMAGE_PATH_ERROR)
ar_util.ValidateLocation(docker_repo.location, docker_repo.project)
if len(resource_val_list) == 3:
return docker_repo
elif len(resource_val_list) > 3:
return DockerImage(docker_repo, "/".join(resource_val_list[3:]))
raise ar_exceptions.InvalidInputValueError(_INVALID_IMAGE_PATH_ERROR)
def _ParseDockerImage(img_str, err_msg):
"""Validates and parses an image string into a DockerImage.
Args:
img_str: str, User input docker formatted string.
err_msg: str, Error message to return to user.
Raises:
ar_exceptions.InvalidInputValueError if user input is invalid.
ar_exceptions.UnsupportedLocationError if provided location is invalid.
Returns:
A DockerImage, and a DockerTag or a DockerVersion.
"""
try:
docker_repo = _ParseInput(img_str)
except ar_exceptions.InvalidInputValueError:
raise ar_exceptions.InvalidInputValueError(_INVALID_DOCKER_IMAGE_ERROR)
ar_util.ValidateLocation(docker_repo.location, docker_repo.project)
img_by_digest_match = re.match(DOCKER_IMG_BY_DIGEST_REGEX, img_str)
if img_by_digest_match:
docker_img = DockerImage(docker_repo, img_by_digest_match.group("img"))
return docker_img, DockerVersion(docker_img,
img_by_digest_match.group("digest"))
img_by_tag_match = re.match(DOCKER_IMG_BY_TAG_REGEX, img_str)
if img_by_tag_match:
docker_img = DockerImage(docker_repo, img_by_tag_match.group("img"))
return docker_img, DockerTag(docker_img, img_by_tag_match.group("tag"))
whole_img_match = re.match(DOCKER_IMG_REGEX, img_str)
if whole_img_match:
return DockerImage(docker_repo,
whole_img_match.group("img").strip("/")), None
raise ar_exceptions.InvalidInputValueError(err_msg)
def _ParseDockerTag(tag):
"""Validates and parses a tag string.
Args:
tag: str, User input Docker tag string.
Raises:
ar_exceptions.InvalidInputValueError if user input is invalid.
ar_exceptions.UnsupportedLocationError if provided location is invalid.
Returns:
A DockerImage and a DockerTag.
"""
try:
docker_repo = _ParseInput(tag)
except ar_exceptions.InvalidInputValueError:
raise ar_exceptions.InvalidInputValueError(_INVALID_DOCKER_TAG_ERROR)
img_by_tag_match = re.match(DOCKER_IMG_BY_TAG_REGEX, tag)
if img_by_tag_match:
docker_img = DockerImage(docker_repo, img_by_tag_match.group("img"))
return docker_img, DockerTag(docker_img, img_by_tag_match.group("tag"))
else:
raise ar_exceptions.InvalidInputValueError(_INVALID_DOCKER_TAG_ERROR)
def _GetDockerPackagesAndVersions(docker_repo,
include_tags,
page_size,
order_by,
limit,
is_nested=False):
"""Gets a list of packages with versions for a Docker repository."""
client = ar_requests.GetClient()
messages = ar_requests.GetMessages()
img_list = []
for pkg in ar_requests.ListPackages(
client, messages, docker_repo.GetRepositoryName(), page_size=page_size):
parts = pkg.name.split("/")
if len(parts) != 8:
raise ar_exceptions.ArtifactRegistryError(
"Internal error. Corrupted package name: {}".format(pkg.name))
img = DockerImage(DockerRepo(parts[1], parts[3], parts[5]), parts[7])
img_list.extend(_GetDockerVersions(img, include_tags,
page_size, order_by, limit, is_nested))
return img_list
def _GetDockerNestedVersions(docker_img,
include_tags,
page_size,
order_by,
limit,
is_nested=False):
"""Gets a list of versions for a Docker nested image."""
prefix = docker_img.GetDockerString() + "/"
all_versions = _GetDockerPackagesAndVersions(
docker_img.docker_repo, include_tags,
page_size, order_by, limit, is_nested)
return [
ver for ver in all_versions
if ver["package"].startswith(prefix)
]
def _GetDockerVersions(docker_img,
include_tags,
page_size=None,
order_by=None,
limit=None,
is_nested=False):
"""Gets a list of versions for a Docker image."""
client = ar_requests.GetClient()
messages = ar_requests.GetMessages()
ver_view = (
messages
.ArtifactregistryProjectsLocationsRepositoriesPackagesVersionsListRequest
.ViewValueValuesEnum.BASIC)
if include_tags:
ver_view = (
messages.
ArtifactregistryProjectsLocationsRepositoriesPackagesVersionsListRequest
.ViewValueValuesEnum.FULL)
ver_list = ar_requests.ListVersions(client, messages,
docker_img.GetPackageName(), ver_view,
page_size, order_by, limit)
# If there's no result, the package name might be part of a nested package.
# E.g. us-west1-docker.pkg.dev/fake-project/docker-repo/nested1 in
# us-west1-docker.pkg.dev/fake-project/docker-repo/nested1/nested2/test-image
# Try to get the list of versions through the list of all packages.
if not ver_list and not is_nested:
return _GetDockerNestedVersions(
docker_img, include_tags, page_size, order_by, limit, is_nested=True)
img_list = []
for ver in ver_list:
v = resources.REGISTRY.Parse(
ver.name, collection=_VERSION_COLLECTION_NAME).Name()
img_list.append({
"package": docker_img.GetDockerString(),
"tags": ", ".join([tag.name.split("/")[-1] for tag in ver.relatedTags]),
"version": v,
"createTime": ver.createTime,
"updateTime": ver.updateTime
})
return img_list
def _LogResourcesToDelete(docker_version, docker_tags):
"""Logs user visible messages on resources to be deleted."""
log.status.Print("Digests:\n- " + docker_version.GetDockerString())
if docker_tags:
log.status.Print("\nTags:")
for tag in docker_tags:
log.status.Print("- " + tag.GetDockerString())
def _GetDockerVersionTags(client, messages, docker_version):
"""Gets a list of DockerTag associated with the given DockerVersion."""
tags = ar_requests.ListVersionTags(client, messages,
docker_version.GetPackageName(),
docker_version.GetVersionName())
return [
DockerTag(docker_version.image,
tag.name.split("/")[-1]) for tag in tags
]
def _ValidateDockerRepo(repo_name):
repo = ar_requests.GetRepository(repo_name)
messages = ar_requests.GetMessages()
if repo.format != messages.Repository.FormatValueValuesEnum.DOCKER:
raise ar_exceptions.InvalidInputValueError(
"Invalid repository type {}. The `artifacts docker` command group can "
"only be used on Docker repositories.".format(repo.format))
def _ValidateAndGetDockerVersion(version_or_tag):
"""Validates a version_or_tag and returns the validated DockerVersion object.
Args:
version_or_tag: a docker version or a docker tag.
Returns:
a DockerVersion object.
Raises:
ar_exceptions.InvalidInputValueError if version_or_tag is not valid.
"""
try:
if isinstance(version_or_tag, DockerVersion):
# We have all the information about the docker digest.
# Call the API to make sure it exists.
ar_requests.GetVersion(ar_requests.GetClient(), ar_requests.GetMessages(),
version_or_tag.GetVersionName())
return version_or_tag
elif isinstance(version_or_tag, DockerTag):
digest = ar_requests.GetVersionFromTag(ar_requests.GetClient(),
ar_requests.GetMessages(),
version_or_tag.GetTagName())
docker_version = DockerVersion(version_or_tag.image, digest)
return docker_version
else:
raise ar_exceptions.InvalidInputValueError(_INVALID_DOCKER_IMAGE_ERROR)
except api_exceptions.HttpNotFoundError:
raise ar_exceptions.InvalidInputValueError(_DOCKER_IMAGE_NOT_FOUND)
class DockerRepo(object):
"""Holder for a Docker repository.
A valid Docker repository has the format of
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID
Properties:
project: str, The name of cloud project.
location: str, The location of the Docker resource.
repo: str, The name of the repository.
"""
def __init__(self, project_id, location_id, repo_id):
self._project = project_id
self._location = location_id
self._repo = repo_id
@property
def project(self):
return self._project
@property
def location(self):
return self._location
@property
def repo(self):
return self._repo
def __eq__(self, other):
if isinstance(other, DockerRepo):
return self._project == other._project \
and self._location == other._location \
and self._repo == other._repo
return NotImplemented
def GetDockerString(self):
return "{}-docker.pkg.dev/{}/{}".format(self.location, self.project,
self.repo)
def GetRepositoryName(self):
return "projects/{}/locations/{}/repositories/{}".format(
self.project, self.location, self.repo)
class DockerImage(object):
"""Holder for a Docker image resource.
A valid image has the format of
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID/IMAGE_PATH
Properties:
project: str, The name of cloud project.
docker_repo: DockerRepo, The Docker repository.
pkg: str, The name of the package.
"""
def __init__(self, docker_repo, pkg_id):
self._docker_repo = docker_repo
self._pkg = pkg_id
@property
def project(self):
return self._docker_repo.project
@property
def docker_repo(self):
return self._docker_repo
@property
def pkg(self):
return self._pkg
def __eq__(self, other):
if isinstance(other, DockerImage):
return self._docker_repo == other._docker_repo and self._pkg == other._pkg
return NotImplemented
def GetPackageName(self):
return "{}/packages/{}".format(self.docker_repo.GetRepositoryName(),
self.pkg.replace("/", "%2F"))
def GetDockerString(self):
return "{}-docker.pkg.dev/{}/{}/{}".format(
self.docker_repo.location,
self.docker_repo.project,
self.docker_repo.repo,
self.pkg.replace("%2F", "/"))
class DockerTag(object):
"""Holder for a Docker tag.
A valid Docker tag has the format of
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID/IMAGE:tag
Properties:
image: DockerImage, The DockerImage containing the tag.
tag: str, The name of the Docker tag.
"""
def __init__(self, docker_img, tag_id):
self._image = docker_img
self._tag = tag_id
@property
def image(self):
return self._image
@property
def tag(self):
return self._tag
def __eq__(self, other):
if isinstance(other, DockerTag):
return self._image == other._image and self._tag == other._tag
return NotImplemented
def GetTagName(self):
return "{}/tags/{}".format(self.image.GetPackageName(), self.tag)
def GetPackageName(self):
return self.image.GetPackageName()
def GetDockerString(self):
return "{}:{}".format(self.image.GetDockerString(), self.tag)
class DockerVersion(object):
"""Holder for a Docker version.
A valid Docker version has the format of
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID/IMAGE@sha256:digest
Properties:
image: DockerImage, The DockerImage containing the tag.
digest: str, The name of the Docker digest.
project: str, the project this image belongs to.
"""
def __init__(self, docker_img, digest):
self._image = docker_img
self._digest = digest
@property
def image(self):
return self._image
@property
def digest(self):
return self._digest
@property
def project(self):
return self._image.docker_repo.project
def __eq__(self, other):
if isinstance(other, DockerVersion):
return self._image == other._image and self._digest == other._digest
return NotImplemented
def GetVersionName(self):
return "{}/versions/{}".format(self.image.GetPackageName(), self.digest)
def GetPackageName(self):
return self.image.GetPackageName()
def GetDockerString(self):
return "{}@{}".format(self.image.GetDockerString(), self.digest)
def GetDockerImages(resource, args):
"""Gets Docker images."""
limit = args.limit
# If filter is set, we leave limiting to gcloud SDK.
if args.filter is not None:
limit = None
order_by = common_args.ParseSortByArg(args.sort_by)
# Multi-ordering is not supported yet on backend.
if order_by is not None:
if "," in order_by:
order_by = None
limit = None
if isinstance(resource, DockerRepo):
_ValidateDockerRepo(resource.GetRepositoryName())
log.status.Print(
"Listing items under project {}, location {}, repository {}.\n".format(
resource.project, resource.location, resource.repo))
return _GetDockerPackagesAndVersions(resource, args.include_tags,
args.page_size, order_by, limit)
elif isinstance(resource, DockerImage):
_ValidateDockerRepo(resource.docker_repo.GetRepositoryName())
log.status.Print(
"Listing items under project {}, location {}, repository {}.\n".format(
resource.docker_repo.project, resource.docker_repo.location,
resource.docker_repo.repo))
return _GetDockerVersions(resource, args.include_tags,
args.page_size, order_by, limit)
return []
def WaitForOperation(operation, message):
"""Waits for the given google.longrunning.Operation to complete.
Args:
operation: The operation to poll.
message: String to display for default progress_tracker.
Raises:
apitools.base.py.HttpError: if the request returns an HTTP error
"""
op_service = ar_requests.GetClient().projects_locations_operations
op_resource = resources.REGISTRY.ParseRelativeName(
operation.name,
collection="artifactregistry.projects.locations.operations")
poller = waiter.CloudOperationPollerNoResources(op_service)
waiter.WaitFor(poller, op_resource, message)
def DescribeDockerImage(args):
"""Retrieves information about a docker image based on the fully-qualified name.
Args:
args: user input arguments.
Returns:
A dictionary of information about the given docker image.
"""
image, version_or_tag = _ParseDockerImage(args.IMAGE, _INVALID_IMAGE_ERROR)
_ValidateDockerRepo(image.docker_repo.GetRepositoryName())
docker_version = _ValidateAndGetDockerVersion(version_or_tag)
result = {}
result["image_summary"] = {
"digest":
docker_version.digest,
"fully_qualified_digest":
docker_version.GetDockerString(),
"registry":
"{}-docker.pkg.dev".format(docker_version.image.docker_repo.location),
"repository":
docker_version.image.docker_repo.repo,
}
metadata = ca_util.GetContainerAnalysisMetadata(docker_version, args)
result.update(metadata.ImagesDescribeView())
return result
def DeleteDockerImage(args):
"""Deletes a Docker digest or image.
If input is an image, delete the image along with its resources.
If input is an image identified by digest, delete the digest.
If input is an image identified by tag, delete the digest and the tag.
If --delete-tags is specified, delete all tags associated with the image
digest.
Args:
args: user input arguments.
Returns:
The long-running operation from DeletePackage API call.
"""
image, version_or_tag = _ParseDockerImage(args.IMAGE, _INVALID_IMAGE_ERROR)
_ValidateDockerRepo(image.docker_repo.GetRepositoryName())
client = ar_requests.GetClient()
messages = ar_requests.GetMessages()
if not version_or_tag:
console_io.PromptContinue(
message="\nThis operation will delete all tags and images for " +
image.GetDockerString() + ".",
cancel_on_no=True)
return ar_requests.DeletePackage(client, messages, image.GetPackageName())
else:
provided_tags = []
docker_version = version_or_tag
if isinstance(version_or_tag, DockerTag):
docker_version = DockerVersion(
version_or_tag.image,
ar_requests.GetVersionFromTag(client, messages,
version_or_tag.GetTagName()))
provided_tags.append(version_or_tag)
existing_tags = _GetDockerVersionTags(client, messages, docker_version)
if not args.delete_tags and existing_tags != provided_tags:
raise ar_exceptions.ArtifactRegistryError(
"Cannot delete image {} because it is tagged. "
"Existing tags are:\n- {}".format(
args.IMAGE,
"\n- ".join(tag.GetDockerString() for tag in existing_tags)))
_LogResourcesToDelete(docker_version, existing_tags)
console_io.PromptContinue(
message="\nThis operation will delete the above resources.",
cancel_on_no=True)
for tag in existing_tags:
ar_requests.DeleteTag(client, messages, tag.GetTagName())
return ar_requests.DeleteVersion(client, messages,
docker_version.GetVersionName())
def GetDockerImage(image_url):
"""Gets a Docker image.
Args:
image_url (str): path to a Docker image.
Returns:
package: Docker image package
Throws:
HttpNotFoundError: if repo or image path are invalid
"""
image, _ = _ParseDockerImage(image_url, _INVALID_IMAGE_ERROR)
_ValidateDockerRepo(image.docker_repo.GetRepositoryName())
return ar_requests.GetPackage(image.GetPackageName())
def AddDockerTag(args):
"""Adds a Docker tag."""
src_image, version_or_tag = _ParseDockerImage(args.DOCKER_IMAGE,
_INVALID_DOCKER_IMAGE_ERROR)
if version_or_tag is None:
raise ar_exceptions.InvalidInputValueError(_INVALID_DOCKER_IMAGE_ERROR)
dest_image, tag = _ParseDockerTag(args.DOCKER_TAG)
if src_image.GetPackageName() != dest_image.GetPackageName():
raise ar_exceptions.InvalidInputValueError(
"Image {}\ndoes not match image {}".format(
src_image.GetDockerString(), dest_image.GetDockerString()))
_ValidateDockerRepo(src_image.docker_repo.GetRepositoryName())
client = ar_requests.GetClient()
messages = ar_requests.GetMessages()
docker_version = version_or_tag
if isinstance(version_or_tag, DockerTag):
docker_version = DockerVersion(
version_or_tag.image,
ar_requests.GetVersionFromTag(client, messages,
version_or_tag.GetTagName()))
try:
ar_requests.GetTag(client, messages, tag.GetTagName())
except api_exceptions.HttpNotFoundError:
ar_requests.CreateDockerTag(client, messages, tag, docker_version)
else:
ar_requests.DeleteTag(client, messages, tag.GetTagName())
ar_requests.CreateDockerTag(client, messages, tag, docker_version)
log.status.Print("Added tag [{}] to image [{}].".format(
tag.GetDockerString(), args.DOCKER_IMAGE))
def DeleteDockerTag(args):
"""Deletes a Docker tag."""
img, tag = _ParseDockerTag(args.DOCKER_TAG)
ar_util.ValidateLocation(img.docker_repo.location, img.docker_repo.project)
_ValidateDockerRepo(img.docker_repo.GetRepositoryName())
console_io.PromptContinue(
message="You are about to delete tag [{}]".format(tag.GetDockerString()),
cancel_on_no=True)
ar_requests.DeleteTag(ar_requests.GetClient(), ar_requests.GetMessages(),
tag.GetTagName())
log.status.Print("Deleted tag [{}].".format(tag.GetDockerString()))
def ListDockerTags(args):
"""Lists Docker tags."""
resource = ParseDockerImagePath(args.IMAGE_PATH)
client = ar_requests.GetClient()
messages = ar_requests.GetMessages()
img_list = []
if isinstance(resource, DockerRepo):
_ValidateDockerRepo(resource.GetRepositoryName())
log.status.Print(
"Listing items under project {}, location {}, repository {}.\n".format(
resource.project, resource.location, resource.repo))
for pkg in ar_requests.ListPackages(client, messages,
resource.GetRepositoryName()):
img_list.append(DockerImage(resource, pkg.name.split("/")[-1]))
elif isinstance(resource, DockerImage):
_ValidateDockerRepo(resource.docker_repo.GetRepositoryName())
log.status.Print(
"Listing items under project {}, location {}, repository {}.\n".format(
resource.docker_repo.project, resource.docker_repo.location,
resource.docker_repo.repo))
img_list.append(resource)
tag_list = []
for img in img_list:
for tag in ar_requests.ListTags(client, messages, img.GetPackageName(),
args.page_size):
tag_list.append({
"tag": tag.name,
"image": img.GetDockerString(),
"version": tag.version,
})
return tag_list
| 34.237726 | 95 | 0.701698 |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import re
from apitools.base.py import exceptions as api_exceptions
from googlecloudsdk.api_lib.artifacts import exceptions as ar_exceptions
from googlecloudsdk.api_lib.util import common_args
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.command_lib.artifacts import containeranalysis_util as ca_util
from googlecloudsdk.command_lib.artifacts import requests as ar_requests
from googlecloudsdk.command_lib.artifacts import util as ar_util
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
from googlecloudsdk.core.console import console_io
ARTIFACTREGISTRY_API_NAME = "artifactregistry"
_INVALID_IMAGE_PATH_ERROR = """Invalid Docker string.
A valid Docker repository has the format of
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID
A valid image has the format of
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID/IMAGE
"""
_INVALID_DEFAULT_DOCKER_STRING_ERROR = (
"""Fail to construct Docker string from config values:
core/project: {project}, artifacts/location: {location}, artifacts/repository: {repo}
A valid Docker repository has the format of
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID
A valid image has the format of
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID/IMAGE
""")
_INVALID_IMAGE_ERROR = """Invalid Docker image.
A valid container image has the format of
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID/IMAGE
A valid container image that can be referenced by tag or digest, has the format of
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID/IMAGE:tag
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID/IMAGE@sha256:digest
"""
_INVALID_DOCKER_IMAGE_ERROR = """Invalid Docker image.
A valid container image can be referenced by tag or digest, has the format of
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID/IMAGE:tag
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID/IMAGE@sha256:digest
"""
_INVALID_DOCKER_TAG_ERROR = """Invalid Docker tag.
A valid Docker tag has the format of
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID/IMAGE:tag
"""
_DOCKER_IMAGE_NOT_FOUND = """Image not found.
A valid container image can be referenced by tag or digest, has the format of
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID/IMAGE:tag
LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY-ID/IMAGE@sha256:digest
"""
DOCKER_REPO_REGEX = (
r"^(?P<location>.*)-docker.pkg.dev\/(?P<project>[^\/]+)\/(?P<repo>[^\/]+)")
DOCKER_IMG_BY_TAG_REGEX = r"^.*-docker.pkg.dev\/[^\/]+\/[^\/]+\/(?P<img>.*):(?P<tag>.*)"
DOCKER_IMG_BY_DIGEST_REGEX = (
r"^.*-docker.pkg.dev\/[^\/]+\/[^\/]+\/(?P<img>.*)@(?P<digest>sha256:.*)")
DOCKER_IMG_REGEX = r"^.*-docker.pkg.dev\/[^\/]+\/[^\/]+\/(?P<img>.*)"
_VERSION_COLLECTION_NAME = "artifactregistry.projects.locations.repositories.packages.versions"
def _GetDefaultResources():
project = properties.VALUES.core.project.Get()
location = properties.VALUES.artifacts.location.Get()
repo = properties.VALUES.artifacts.repository.Get()
if not project or not location or not repo:
raise ar_exceptions.InvalidInputValueError(
_INVALID_DEFAULT_DOCKER_STRING_ERROR.format(**{
"project": project,
"location": location,
"repo": repo,
}))
ar_util.ValidateLocation(location, project)
return DockerRepo(project, location, repo)
def _ParseInput(input_str):
matches = re.match(DOCKER_REPO_REGEX, input_str)
if not matches:
raise ar_exceptions.InvalidInputValueError()
location = matches.group("location")
project_id = matches.group("project")
return DockerRepo(project_id, location, matches.group("repo"))
def ParseDockerImagePath(img_path):
if not img_path:
return _GetDefaultResources()
resource_val_list = list(filter(None, img_path.split("/")))
try:
docker_repo = _ParseInput(img_path)
except ar_exceptions.InvalidInputValueError:
raise ar_exceptions.InvalidInputValueError(_INVALID_IMAGE_PATH_ERROR)
ar_util.ValidateLocation(docker_repo.location, docker_repo.project)
if len(resource_val_list) == 3:
return docker_repo
elif len(resource_val_list) > 3:
return DockerImage(docker_repo, "/".join(resource_val_list[3:]))
raise ar_exceptions.InvalidInputValueError(_INVALID_IMAGE_PATH_ERROR)
def _ParseDockerImage(img_str, err_msg):
try:
docker_repo = _ParseInput(img_str)
except ar_exceptions.InvalidInputValueError:
raise ar_exceptions.InvalidInputValueError(_INVALID_DOCKER_IMAGE_ERROR)
ar_util.ValidateLocation(docker_repo.location, docker_repo.project)
img_by_digest_match = re.match(DOCKER_IMG_BY_DIGEST_REGEX, img_str)
if img_by_digest_match:
docker_img = DockerImage(docker_repo, img_by_digest_match.group("img"))
return docker_img, DockerVersion(docker_img,
img_by_digest_match.group("digest"))
img_by_tag_match = re.match(DOCKER_IMG_BY_TAG_REGEX, img_str)
if img_by_tag_match:
docker_img = DockerImage(docker_repo, img_by_tag_match.group("img"))
return docker_img, DockerTag(docker_img, img_by_tag_match.group("tag"))
whole_img_match = re.match(DOCKER_IMG_REGEX, img_str)
if whole_img_match:
return DockerImage(docker_repo,
whole_img_match.group("img").strip("/")), None
raise ar_exceptions.InvalidInputValueError(err_msg)
def _ParseDockerTag(tag):
try:
docker_repo = _ParseInput(tag)
except ar_exceptions.InvalidInputValueError:
raise ar_exceptions.InvalidInputValueError(_INVALID_DOCKER_TAG_ERROR)
img_by_tag_match = re.match(DOCKER_IMG_BY_TAG_REGEX, tag)
if img_by_tag_match:
docker_img = DockerImage(docker_repo, img_by_tag_match.group("img"))
return docker_img, DockerTag(docker_img, img_by_tag_match.group("tag"))
else:
raise ar_exceptions.InvalidInputValueError(_INVALID_DOCKER_TAG_ERROR)
def _GetDockerPackagesAndVersions(docker_repo,
include_tags,
page_size,
order_by,
limit,
is_nested=False):
client = ar_requests.GetClient()
messages = ar_requests.GetMessages()
img_list = []
for pkg in ar_requests.ListPackages(
client, messages, docker_repo.GetRepositoryName(), page_size=page_size):
parts = pkg.name.split("/")
if len(parts) != 8:
raise ar_exceptions.ArtifactRegistryError(
"Internal error. Corrupted package name: {}".format(pkg.name))
img = DockerImage(DockerRepo(parts[1], parts[3], parts[5]), parts[7])
img_list.extend(_GetDockerVersions(img, include_tags,
page_size, order_by, limit, is_nested))
return img_list
def _GetDockerNestedVersions(docker_img,
include_tags,
page_size,
order_by,
limit,
is_nested=False):
prefix = docker_img.GetDockerString() + "/"
all_versions = _GetDockerPackagesAndVersions(
docker_img.docker_repo, include_tags,
page_size, order_by, limit, is_nested)
return [
ver for ver in all_versions
if ver["package"].startswith(prefix)
]
def _GetDockerVersions(docker_img,
include_tags,
page_size=None,
order_by=None,
limit=None,
is_nested=False):
client = ar_requests.GetClient()
messages = ar_requests.GetMessages()
ver_view = (
messages
.ArtifactregistryProjectsLocationsRepositoriesPackagesVersionsListRequest
.ViewValueValuesEnum.BASIC)
if include_tags:
ver_view = (
messages.
ArtifactregistryProjectsLocationsRepositoriesPackagesVersionsListRequest
.ViewValueValuesEnum.FULL)
ver_list = ar_requests.ListVersions(client, messages,
docker_img.GetPackageName(), ver_view,
page_size, order_by, limit)
# E.g. us-west1-docker.pkg.dev/fake-project/docker-repo/nested1 in
# us-west1-docker.pkg.dev/fake-project/docker-repo/nested1/nested2/test-image
# Try to get the list of versions through the list of all packages.
if not ver_list and not is_nested:
return _GetDockerNestedVersions(
docker_img, include_tags, page_size, order_by, limit, is_nested=True)
img_list = []
for ver in ver_list:
v = resources.REGISTRY.Parse(
ver.name, collection=_VERSION_COLLECTION_NAME).Name()
img_list.append({
"package": docker_img.GetDockerString(),
"tags": ", ".join([tag.name.split("/")[-1] for tag in ver.relatedTags]),
"version": v,
"createTime": ver.createTime,
"updateTime": ver.updateTime
})
return img_list
def _LogResourcesToDelete(docker_version, docker_tags):
log.status.Print("Digests:\n- " + docker_version.GetDockerString())
if docker_tags:
log.status.Print("\nTags:")
for tag in docker_tags:
log.status.Print("- " + tag.GetDockerString())
def _GetDockerVersionTags(client, messages, docker_version):
tags = ar_requests.ListVersionTags(client, messages,
docker_version.GetPackageName(),
docker_version.GetVersionName())
return [
DockerTag(docker_version.image,
tag.name.split("/")[-1]) for tag in tags
]
def _ValidateDockerRepo(repo_name):
repo = ar_requests.GetRepository(repo_name)
messages = ar_requests.GetMessages()
if repo.format != messages.Repository.FormatValueValuesEnum.DOCKER:
raise ar_exceptions.InvalidInputValueError(
"Invalid repository type {}. The `artifacts docker` command group can "
"only be used on Docker repositories.".format(repo.format))
def _ValidateAndGetDockerVersion(version_or_tag):
try:
if isinstance(version_or_tag, DockerVersion):
# We have all the information about the docker digest.
# Call the API to make sure it exists.
ar_requests.GetVersion(ar_requests.GetClient(), ar_requests.GetMessages(),
version_or_tag.GetVersionName())
return version_or_tag
elif isinstance(version_or_tag, DockerTag):
digest = ar_requests.GetVersionFromTag(ar_requests.GetClient(),
ar_requests.GetMessages(),
version_or_tag.GetTagName())
docker_version = DockerVersion(version_or_tag.image, digest)
return docker_version
else:
raise ar_exceptions.InvalidInputValueError(_INVALID_DOCKER_IMAGE_ERROR)
except api_exceptions.HttpNotFoundError:
raise ar_exceptions.InvalidInputValueError(_DOCKER_IMAGE_NOT_FOUND)
class DockerRepo(object):
def __init__(self, project_id, location_id, repo_id):
self._project = project_id
self._location = location_id
self._repo = repo_id
@property
def project(self):
return self._project
@property
def location(self):
return self._location
@property
def repo(self):
return self._repo
def __eq__(self, other):
if isinstance(other, DockerRepo):
return self._project == other._project \
and self._location == other._location \
and self._repo == other._repo
return NotImplemented
def GetDockerString(self):
return "{}-docker.pkg.dev/{}/{}".format(self.location, self.project,
self.repo)
def GetRepositoryName(self):
return "projects/{}/locations/{}/repositories/{}".format(
self.project, self.location, self.repo)
class DockerImage(object):
def __init__(self, docker_repo, pkg_id):
self._docker_repo = docker_repo
self._pkg = pkg_id
@property
def project(self):
return self._docker_repo.project
@property
def docker_repo(self):
return self._docker_repo
@property
def pkg(self):
return self._pkg
def __eq__(self, other):
if isinstance(other, DockerImage):
return self._docker_repo == other._docker_repo and self._pkg == other._pkg
return NotImplemented
def GetPackageName(self):
return "{}/packages/{}".format(self.docker_repo.GetRepositoryName(),
self.pkg.replace("/", "%2F"))
def GetDockerString(self):
return "{}-docker.pkg.dev/{}/{}/{}".format(
self.docker_repo.location,
self.docker_repo.project,
self.docker_repo.repo,
self.pkg.replace("%2F", "/"))
class DockerTag(object):
def __init__(self, docker_img, tag_id):
self._image = docker_img
self._tag = tag_id
@property
def image(self):
return self._image
@property
def tag(self):
return self._tag
def __eq__(self, other):
if isinstance(other, DockerTag):
return self._image == other._image and self._tag == other._tag
return NotImplemented
def GetTagName(self):
return "{}/tags/{}".format(self.image.GetPackageName(), self.tag)
def GetPackageName(self):
return self.image.GetPackageName()
def GetDockerString(self):
return "{}:{}".format(self.image.GetDockerString(), self.tag)
class DockerVersion(object):
def __init__(self, docker_img, digest):
self._image = docker_img
self._digest = digest
@property
def image(self):
return self._image
@property
def digest(self):
return self._digest
@property
def project(self):
return self._image.docker_repo.project
def __eq__(self, other):
if isinstance(other, DockerVersion):
return self._image == other._image and self._digest == other._digest
return NotImplemented
def GetVersionName(self):
return "{}/versions/{}".format(self.image.GetPackageName(), self.digest)
def GetPackageName(self):
return self.image.GetPackageName()
def GetDockerString(self):
return "{}@{}".format(self.image.GetDockerString(), self.digest)
def GetDockerImages(resource, args):
limit = args.limit
# If filter is set, we leave limiting to gcloud SDK.
if args.filter is not None:
limit = None
order_by = common_args.ParseSortByArg(args.sort_by)
# Multi-ordering is not supported yet on backend.
if order_by is not None:
if "," in order_by:
order_by = None
limit = None
if isinstance(resource, DockerRepo):
_ValidateDockerRepo(resource.GetRepositoryName())
log.status.Print(
"Listing items under project {}, location {}, repository {}.\n".format(
resource.project, resource.location, resource.repo))
return _GetDockerPackagesAndVersions(resource, args.include_tags,
args.page_size, order_by, limit)
elif isinstance(resource, DockerImage):
_ValidateDockerRepo(resource.docker_repo.GetRepositoryName())
log.status.Print(
"Listing items under project {}, location {}, repository {}.\n".format(
resource.docker_repo.project, resource.docker_repo.location,
resource.docker_repo.repo))
return _GetDockerVersions(resource, args.include_tags,
args.page_size, order_by, limit)
return []
def WaitForOperation(operation, message):
op_service = ar_requests.GetClient().projects_locations_operations
op_resource = resources.REGISTRY.ParseRelativeName(
operation.name,
collection="artifactregistry.projects.locations.operations")
poller = waiter.CloudOperationPollerNoResources(op_service)
waiter.WaitFor(poller, op_resource, message)
def DescribeDockerImage(args):
image, version_or_tag = _ParseDockerImage(args.IMAGE, _INVALID_IMAGE_ERROR)
_ValidateDockerRepo(image.docker_repo.GetRepositoryName())
docker_version = _ValidateAndGetDockerVersion(version_or_tag)
result = {}
result["image_summary"] = {
"digest":
docker_version.digest,
"fully_qualified_digest":
docker_version.GetDockerString(),
"registry":
"{}-docker.pkg.dev".format(docker_version.image.docker_repo.location),
"repository":
docker_version.image.docker_repo.repo,
}
metadata = ca_util.GetContainerAnalysisMetadata(docker_version, args)
result.update(metadata.ImagesDescribeView())
return result
def DeleteDockerImage(args):
image, version_or_tag = _ParseDockerImage(args.IMAGE, _INVALID_IMAGE_ERROR)
_ValidateDockerRepo(image.docker_repo.GetRepositoryName())
client = ar_requests.GetClient()
messages = ar_requests.GetMessages()
if not version_or_tag:
console_io.PromptContinue(
message="\nThis operation will delete all tags and images for " +
image.GetDockerString() + ".",
cancel_on_no=True)
return ar_requests.DeletePackage(client, messages, image.GetPackageName())
else:
provided_tags = []
docker_version = version_or_tag
if isinstance(version_or_tag, DockerTag):
docker_version = DockerVersion(
version_or_tag.image,
ar_requests.GetVersionFromTag(client, messages,
version_or_tag.GetTagName()))
provided_tags.append(version_or_tag)
existing_tags = _GetDockerVersionTags(client, messages, docker_version)
if not args.delete_tags and existing_tags != provided_tags:
raise ar_exceptions.ArtifactRegistryError(
"Cannot delete image {} because it is tagged. "
"Existing tags are:\n- {}".format(
args.IMAGE,
"\n- ".join(tag.GetDockerString() for tag in existing_tags)))
_LogResourcesToDelete(docker_version, existing_tags)
console_io.PromptContinue(
message="\nThis operation will delete the above resources.",
cancel_on_no=True)
for tag in existing_tags:
ar_requests.DeleteTag(client, messages, tag.GetTagName())
return ar_requests.DeleteVersion(client, messages,
docker_version.GetVersionName())
def GetDockerImage(image_url):
image, _ = _ParseDockerImage(image_url, _INVALID_IMAGE_ERROR)
_ValidateDockerRepo(image.docker_repo.GetRepositoryName())
return ar_requests.GetPackage(image.GetPackageName())
def AddDockerTag(args):
src_image, version_or_tag = _ParseDockerImage(args.DOCKER_IMAGE,
_INVALID_DOCKER_IMAGE_ERROR)
if version_or_tag is None:
raise ar_exceptions.InvalidInputValueError(_INVALID_DOCKER_IMAGE_ERROR)
dest_image, tag = _ParseDockerTag(args.DOCKER_TAG)
if src_image.GetPackageName() != dest_image.GetPackageName():
raise ar_exceptions.InvalidInputValueError(
"Image {}\ndoes not match image {}".format(
src_image.GetDockerString(), dest_image.GetDockerString()))
_ValidateDockerRepo(src_image.docker_repo.GetRepositoryName())
client = ar_requests.GetClient()
messages = ar_requests.GetMessages()
docker_version = version_or_tag
if isinstance(version_or_tag, DockerTag):
docker_version = DockerVersion(
version_or_tag.image,
ar_requests.GetVersionFromTag(client, messages,
version_or_tag.GetTagName()))
try:
ar_requests.GetTag(client, messages, tag.GetTagName())
except api_exceptions.HttpNotFoundError:
ar_requests.CreateDockerTag(client, messages, tag, docker_version)
else:
ar_requests.DeleteTag(client, messages, tag.GetTagName())
ar_requests.CreateDockerTag(client, messages, tag, docker_version)
log.status.Print("Added tag [{}] to image [{}].".format(
tag.GetDockerString(), args.DOCKER_IMAGE))
def DeleteDockerTag(args):
img, tag = _ParseDockerTag(args.DOCKER_TAG)
ar_util.ValidateLocation(img.docker_repo.location, img.docker_repo.project)
_ValidateDockerRepo(img.docker_repo.GetRepositoryName())
console_io.PromptContinue(
message="You are about to delete tag [{}]".format(tag.GetDockerString()),
cancel_on_no=True)
ar_requests.DeleteTag(ar_requests.GetClient(), ar_requests.GetMessages(),
tag.GetTagName())
log.status.Print("Deleted tag [{}].".format(tag.GetDockerString()))
def ListDockerTags(args):
resource = ParseDockerImagePath(args.IMAGE_PATH)
client = ar_requests.GetClient()
messages = ar_requests.GetMessages()
img_list = []
if isinstance(resource, DockerRepo):
_ValidateDockerRepo(resource.GetRepositoryName())
log.status.Print(
"Listing items under project {}, location {}, repository {}.\n".format(
resource.project, resource.location, resource.repo))
for pkg in ar_requests.ListPackages(client, messages,
resource.GetRepositoryName()):
img_list.append(DockerImage(resource, pkg.name.split("/")[-1]))
elif isinstance(resource, DockerImage):
_ValidateDockerRepo(resource.docker_repo.GetRepositoryName())
log.status.Print(
"Listing items under project {}, location {}, repository {}.\n".format(
resource.docker_repo.project, resource.docker_repo.location,
resource.docker_repo.repo))
img_list.append(resource)
tag_list = []
for img in img_list:
for tag in ar_requests.ListTags(client, messages, img.GetPackageName(),
args.page_size):
tag_list.append({
"tag": tag.name,
"image": img.GetDockerString(),
"version": tag.version,
})
return tag_list
| true | true |
f732e030f11d4a52981ec1e7bf19a8c4446b2ab8 | 127 | py | Python | java/spark/python/whyspark/udt/__init__.py | cswarth/whylogs | 6805b252f1d07efde84836d3924949f7ec2d97b1 | [
"Apache-2.0"
] | 603 | 2020-07-31T23:26:10.000Z | 2022-03-31T23:05:36.000Z | java/spark/python/whyspark/udt/__init__.py | cswarth/whylogs | 6805b252f1d07efde84836d3924949f7ec2d97b1 | [
"Apache-2.0"
] | 284 | 2021-03-02T21:28:03.000Z | 2022-03-31T22:36:08.000Z | java/spark/python/whyspark/udt/__init__.py | jamie256/whylogs | e4b8288a61c00fbe033c0248a015e6e91ee6c8b0 | [
"Apache-2.0"
] | 39 | 2020-08-14T21:22:08.000Z | 2022-03-29T20:24:54.000Z | from .profile import WhyProfileSession, new_profiling_session
__ALL__ = [
WhyProfileSession,
new_profiling_session,
]
| 18.142857 | 61 | 0.787402 | from .profile import WhyProfileSession, new_profiling_session
__ALL__ = [
WhyProfileSession,
new_profiling_session,
]
| true | true |
f732e0afc2f03a5d0b1ff38a52ac0c46ceaeea7f | 1,477 | py | Python | Projects/Hongbog/EyeVerification_v2/native/constant.py | Tim232/Python-Things | 05f0f373a4cf298e70d9668c88a6e3a9d1cd8146 | [
"MIT"
] | 2 | 2020-12-05T07:42:55.000Z | 2021-01-06T23:23:18.000Z | Projects/Hongbog/EyeVerification_v2/native/constant.py | Tim232/Python-Things | 05f0f373a4cf298e70d9668c88a6e3a9d1cd8146 | [
"MIT"
] | null | null | null | Projects/Hongbog/EyeVerification_v2/native/constant.py | Tim232/Python-Things | 05f0f373a4cf298e70d9668c88a6e3a9d1cd8146 | [
"MIT"
] | null | null | null | import tensorflow as tf
flags = tf.app.flags
'''학습 데이터 경로'''
flags.DEFINE_string('train_data_path',
'G:/04_dataset/eye_verification/pair_eye/train',
'눈 학습 데이터 경로')
flags.DEFINE_string('test_data_path',
'G:/04_dataset/eye_verification/pair_eye/test',
'눈 테스트 데이터 경로')
'''학습 로그 경로'''
flags.DEFINE_string('trained_weight_dir',
'D:/Source/PythonRepository/Projects/Hongbog/EyeVerification_v2/native/train_log/001',
'훈련된 가중치 값 저장 경로')
flags.DEFINE_string('tensorboard_log_dir',
'D:/Source/PythonRepository/Projects/Hongbog/EyeVerification_v2/native/tensorboard_log/001',
'텐서보드에서 모니터링 변수 저장 경로')
flags.DEFINE_string('deploy_log_dir',
'D:/Source/PythonRepository/Projects/Hongbog/EyeVerification_v2/native/deploy_log/001',
'Model Deploy 시 사용될 체크포인트 파일 저장 경로')
'''하이퍼 파라미터'''
flags.DEFINE_integer('epochs',
50,
'훈련 시 총 에폭 수')
flags.DEFINE_integer('batch_size',
10,
'훈련 시 배치 크기')
flags.DEFINE_float('dropout_rate',
0.4,
'신경망 dropout 비율')
flags.DEFINE_float('learning_rate',
0.001,
'신경망 learning 비율')
flags.DEFINE_float('regularization_scale',
0.0005,
'신경망 L2 regularization 크기') | 32.108696 | 112 | 0.560596 | import tensorflow as tf
flags = tf.app.flags
flags.DEFINE_string('train_data_path',
'G:/04_dataset/eye_verification/pair_eye/train',
'눈 학습 데이터 경로')
flags.DEFINE_string('test_data_path',
'G:/04_dataset/eye_verification/pair_eye/test',
'눈 테스트 데이터 경로')
flags.DEFINE_string('trained_weight_dir',
'D:/Source/PythonRepository/Projects/Hongbog/EyeVerification_v2/native/train_log/001',
'훈련된 가중치 값 저장 경로')
flags.DEFINE_string('tensorboard_log_dir',
'D:/Source/PythonRepository/Projects/Hongbog/EyeVerification_v2/native/tensorboard_log/001',
'텐서보드에서 모니터링 변수 저장 경로')
flags.DEFINE_string('deploy_log_dir',
'D:/Source/PythonRepository/Projects/Hongbog/EyeVerification_v2/native/deploy_log/001',
'Model Deploy 시 사용될 체크포인트 파일 저장 경로')
flags.DEFINE_integer('epochs',
50,
'훈련 시 총 에폭 수')
flags.DEFINE_integer('batch_size',
10,
'훈련 시 배치 크기')
flags.DEFINE_float('dropout_rate',
0.4,
'신경망 dropout 비율')
flags.DEFINE_float('learning_rate',
0.001,
'신경망 learning 비율')
flags.DEFINE_float('regularization_scale',
0.0005,
'신경망 L2 regularization 크기') | true | true |
f732e2af65f2973557c266ea96c1f221e8325720 | 64 | py | Python | tfc_web/authmultitoken/__init__.py | SmartCambridge/tfc_web | ac16b3c2aa5200320e9ffa9d270fb409b98ed55d | [
"MIT"
] | 2 | 2018-10-28T20:15:23.000Z | 2019-03-29T09:06:09.000Z | tfc_web/authmultitoken/__init__.py | SmartCambridge/tfc_web | ac16b3c2aa5200320e9ffa9d270fb409b98ed55d | [
"MIT"
] | 107 | 2018-10-22T06:57:07.000Z | 2020-09-15T14:43:03.000Z | tfc_web/authmultitoken/__init__.py | SmartCambridge/tfc_web | ac16b3c2aa5200320e9ffa9d270fb409b98ed55d | [
"MIT"
] | 1 | 2020-03-20T19:49:29.000Z | 2020-03-20T19:49:29.000Z | default_app_config = 'authmultitoken.apps.AuthMultiTokenConfig'
| 32 | 63 | 0.875 | default_app_config = 'authmultitoken.apps.AuthMultiTokenConfig'
| true | true |
f732e2ef2581e1694ecea051a931a38d7b2a6e81 | 46,858 | py | Python | exarl/candlelib/uq_utils.py | schr476/EXARL | 7f4596bd8b3d7960aaf52bc677ceac4f37029834 | [
"BSD-3-Clause"
] | 2 | 2022-02-03T20:33:17.000Z | 2022-02-10T22:43:32.000Z | exarl/candlelib/uq_utils.py | schr476/EXARL | 7f4596bd8b3d7960aaf52bc677ceac4f37029834 | [
"BSD-3-Clause"
] | 40 | 2022-01-25T18:03:12.000Z | 2022-03-31T21:43:32.000Z | exarl/candlelib/uq_utils.py | schr476/EXARL | 7f4596bd8b3d7960aaf52bc677ceac4f37029834 | [
"BSD-3-Clause"
] | 1 | 2022-02-10T14:33:30.000Z | 2022-02-10T14:33:30.000Z | from __future__ import absolute_import
import numpy as np
from scipy.stats import pearsonr, spearmanr
from scipy import signal
from scipy.interpolate import InterpolatedUnivariateSpline
def generate_index_distribution(numTrain, numTest, numValidation, params):
""" Generates a vector of indices to partition the data for training.
NO CHECKING IS DONE: it is assumed that the data could be partitioned
in the specified blocks and that the block indices describe a coherent
partition.
Parameters
----------
numTrain : int
Number of training data points
numTest : int
Number of testing data points
numValidation : int
Number of validation data points (may be zero)
params : dictionary with parameters
Contains the keywords that control the behavior of the function
(uq_train_fr, uq_valid_fr, uq_test_fr for fraction specification,
uq_train_vec, uq_valid_vec, uq_test_vec for block list specification, and
uq_train_bks, uq_valid_bks, uq_test_bks for block number specification)
Return
----------
indexTrain : int numpy array
Indices for data in training
indexValidation : int numpy array
Indices for data in validation (if any)
indexTest : int numpy array
Indices for data in testing (if merging)
"""
if all(k in params for k in ('uq_train_fr', 'uq_valid_fr', 'uq_test_fr')):
# specification by fraction
print("Computing UQ cross-validation - Distributing by FRACTION")
return generate_index_distribution_from_fraction(numTrain, numTest, numValidation, params)
elif all(k in params for k in ('uq_train_vec', 'uq_valid_vec', 'uq_test_vec')):
# specification by block list
print("Computing UQ cross-validation - Distributing by BLOCK LIST")
return generate_index_distribution_from_block_list(numTrain, numTest, numValidation, params)
elif all(k in params for k in ('uq_train_bks', 'uq_valid_bks', 'uq_test_bks')):
# specification by block size
print("Computing UQ cross-validation - Distributing by BLOCK NUMBER")
return generate_index_distribution_from_blocks(numTrain, numTest, numValidation, params)
else:
print("ERROR !! No consistent UQ parameter specification found !! ... exiting ")
raise KeyError("No valid triplet of ('uq_train_*', 'uq_valid_*', 'uq_test_*') found. (* is any of fr, vec or bks)")
def generate_index_distribution_from_fraction(numTrain, numTest, numValidation, params):
""" Generates a vector of indices to partition the data for training.
It checks that the fractions provided are (0, 1) and add up to 1.
Parameters
----------
numTrain : int
Number of training data points
numTest : int
Number of testing data points
numValidation : int
Number of validation data points (may be zero)
params : dictionary with parameters
Contains the keywords that control the behavior of the function
(uq_train_fr, uq_valid_fr, uq_test_fr)
Return
----------
indexTrain : int numpy array
Indices for data in training
indexValidation : int numpy array
Indices for data in validation (if any)
indexTest : int numpy array
Indices for data in testing (if merging)
"""
tol = 1e-7
# Extract required parameters
fractionTrain = params['uq_train_fr']
fractionValidation = params['uq_valid_fr']
fractionTest = params['uq_test_fr']
if (fractionTrain < 0.) or (fractionTrain > 1.):
raise ValueError('uq_train_fr is not in (0, 1) range. uq_train_fr: ', fractionTrain)
if (fractionValidation < 0.) or (fractionValidation > 1.):
raise ValueError('uq_valid_fr is not in (0, 1) range. uq_valid_fr: ', fractionValidation)
if (fractionTest < 0.) or (fractionTest > 1.):
raise ValueError('uq_test_fr is not in (0, 1) range. uq_test_fr: ', fractionTest)
fractionSum = fractionTrain + fractionValidation + fractionTest
# if (fractionSum > 1.) or (fractionSum < 1.):
if abs(fractionSum - 1.) > tol:
raise ValueError(
'Specified UQ fractions (uq_train_fr, uq_valid_fr, uq_test_fr) do not add up to 1. No cross-validation partition is computed ! sum:',
fractionSum)
# Determine data size and block size
if fractionTest > 0:
# Use all data and re-distribute the partitions
numData = numTrain + numValidation + numTest
else:
# Preserve test partition
numData = numTrain + numValidation
sizeTraining = int(np.round(numData * fractionTrain))
sizeValidation = int(np.round(numData * fractionValidation))
# Fill partition indices
# Fill train partition
Folds = np.arange(numData)
np.random.shuffle(Folds)
indexTrain = Folds[:sizeTraining]
# Fill validation partition
indexValidation = None
if fractionValidation > 0:
indexValidation = Folds[sizeTraining:sizeTraining + sizeValidation]
# Fill test partition
indexTest = None
if fractionTest > 0:
indexTest = Folds[sizeTraining + sizeValidation:]
return indexTrain, indexValidation, indexTest
def generate_index_distribution_from_blocks(numTrain, numTest, numValidation, params):
""" Generates a vector of indices to partition the data for training.
NO CHECKING IS DONE: it is assumed that the data could be partitioned
in the specified block quantities and that the block quantities describe a
coherent partition.
Parameters
----------
numTrain : int
Number of training data points
numTest : int
Number of testing data points
numValidation : int
Number of validation data points (may be zero)
params : dictionary with parameters
Contains the keywords that control the behavior of the function
(uq_train_bks, uq_valid_bks, uq_test_bks)
Return
----------
indexTrain : int numpy array
Indices for data in training
indexValidation : int numpy array
Indices for data in validation (if any)
indexTest : int numpy array
Indices for data in testing (if merging)
"""
# Extract required parameters
numBlocksTrain = params['uq_train_bks']
numBlocksValidation = params['uq_valid_bks']
numBlocksTest = params['uq_test_bks']
numBlocksTotal = numBlocksTrain + numBlocksValidation + numBlocksTest
# Determine data size and block size
if numBlocksTest > 0:
# Use all data and re-distribute the partitions
numData = numTrain + numValidation + numTest
else:
# Preserve test partition
numData = numTrain + numValidation
blockSize = (numData + numBlocksTotal // 2) // numBlocksTotal # integer division with rounding
remainder = numData - blockSize * numBlocksTotal
if remainder != 0:
print("Warning ! Requested partition does not distribute data evenly between blocks. "
"Testing (if specified) or Validation (if specified) will use different block size.")
sizeTraining = numBlocksTrain * blockSize
sizeValidation = numBlocksValidation * blockSize
# Fill partition indices
# Fill train partition
Folds = np.arange(numData)
np.random.shuffle(Folds)
indexTrain = Folds[:sizeTraining]
# Fill validation partition
indexValidation = None
if numBlocksValidation > 0:
indexValidation = Folds[sizeTraining:sizeTraining + sizeValidation]
# Fill test partition
indexTest = None
if numBlocksTest > 0:
indexTest = Folds[sizeTraining + sizeValidation:]
return indexTrain, indexValidation, indexTest
def generate_index_distribution_from_block_list(numTrain, numTest, numValidation, params):
""" Generates a vector of indices to partition the data for training.
NO CHECKING IS DONE: it is assumed that the data could be partitioned
in the specified list of blocks and that the block indices describe a
coherent partition.
Parameters
----------
numTrain : int
Number of training data points
numTest : int
Number of testing data points
numValidation : int
Number of validation data points (may be zero)
params : dictionary with parameters
Contains the keywords that control the behavior of the function
(uq_train_vec, uq_valid_vec, uq_test_vec)
Return
----------
indexTrain : int numpy array
Indices for data in training
indexValidation : int numpy array
Indices for data in validation (if any)
indexTest : int numpy array
Indices for data in testing (if merging)
"""
# Extract required parameters
blocksTrain = params['uq_train_vec']
blocksValidation = params['uq_valid_vec']
blocksTest = params['uq_test_vec']
# Determine data size and block size
numBlocksTrain = len(blocksTrain)
numBlocksValidation = len(blocksValidation)
numBlocksTest = len(blocksTest)
numBlocksTotal = numBlocksTrain + numBlocksValidation + numBlocksTest
if numBlocksTest > 0:
# Use all data and re-distribute the partitions
numData = numTrain + numValidation + numTest
else:
# Preserve test partition
numData = numTrain + numValidation
blockSize = (numData + numBlocksTotal // 2) // numBlocksTotal # integer division with rounding
remainder = numData - blockSize * numBlocksTotal
if remainder != 0:
print("Warning ! Requested partition does not distribute data evenly between blocks. "
"Last block will have different size.")
if remainder < 0:
remainder = 0
# Fill partition indices
# Fill train partition
maxSizeTrain = blockSize * numBlocksTrain + remainder
indexTrain = fill_array(blocksTrain, maxSizeTrain, numData, numBlocksTotal, blockSize)
# Fill validation partition
indexValidation = None
if numBlocksValidation > 0:
maxSizeValidation = blockSize * numBlocksValidation + remainder
indexValidation = fill_array(blocksValidation, maxSizeValidation, numData, numBlocksTotal, blockSize)
# Fill test partition
indexTest = None
if numBlocksTest > 0:
maxSizeTest = blockSize * numBlocksTest + remainder
indexTest = fill_array(blocksTest, maxSizeTest, numData, numBlocksTotal, blockSize)
return indexTrain, indexValidation, indexTest
def compute_limits(numdata, numblocks, blocksize, blockn):
""" Generates the limit of indices corresponding to a
specific block. It takes into account the non-exact
divisibility of numdata into numblocks letting the
last block to take the extra chunk.
Parameters
----------
numdata : int
Total number of data points to distribute
numblocks : int
Total number of blocks to distribute into
blocksize : int
Size of data per block
blockn : int
Index of block, from 0 to numblocks-1
Return
----------
start : int
Position to start assigning indices
end : int
One beyond position to stop assigning indices
"""
start = blockn * blocksize
end = start + blocksize
if blockn == (numblocks - 1): # last block gets the extra
end = numdata
return start, end
def fill_array(blocklist, maxsize, numdata, numblocks, blocksize):
""" Fills a new array of integers with the indices corresponding
to the specified block structure.
Parameters
----------
blocklist : list
List of integers describes the block indices that
go into the array
maxsize : int
Maximum possible length for the partition (the size of the
common block size plus the remainder, if any).
numdata : int
Total number of data points to distribute
numblocks : int
Total number of blocks to distribute into
blocksize : int
Size of data per block
Return
----------
indexArray : int numpy array
Indices for specific data partition. Resizes the array
to the correct length.
"""
indexArray = np.zeros(maxsize, np.int)
offset = 0
for i in blocklist:
start, end = compute_limits(numdata, numblocks, blocksize, i)
length = end - start
indexArray[offset:offset + length] = np.arange(start, end)
offset += length
return indexArray[:offset]
# UTILS for COMPUTATION OF EMPIRICAL CALIBRATION
def compute_statistics_homoscedastic(df_data,
col_true=0,
col_pred=6,
col_std_pred=7,
):
""" Extracts ground truth, mean prediction, error and
standard deviation of prediction from inference
data frame. The latter includes the statistics
over all the inference realizations.
Parameters
----------
df_data : pandas data frame
Data frame generated by current CANDLE inference
experiments. Indices are hard coded to agree with
current CANDLE version. (The inference file usually
has the name: <model>_pred.tsv).
col_true : integer
Index of the column in the data frame where the true
value is stored (Default: 0, index in current CANDLE format).
col_pred : integer
Index of the column in the data frame where the predicted
value is stored (Default: 6, index in current CANDLE format).
col_std_pred : integer
Index of the column in the data frame where the standard
deviation of the predicted values is stored (Default: 7,
index in current CANDLE format).
Return
----------
Ytrue : numpy array
Array with true (observed) values
Ypred : numpy array
Array with predicted values.
yerror : numpy array
Array with errors computed (observed - predicted).
sigma : numpy array
Array with standard deviations learned with deep learning
model. For homoscedastic inference this corresponds to the
std value computed from prediction (and is equal to the
following returned variable).
Ypred_std : numpy array
Array with standard deviations computed from regular
(homoscedastic) inference.
pred_name : string
Name of data column or quantity predicted (as extracted
from the data frame using the col_true index).
"""
Ytrue = df_data.iloc[:, col_true].values
print('Ytrue shape: ', Ytrue.shape)
pred_name = df_data.columns[col_true]
Ypred = df_data.iloc[:, col_pred].values
print('Ypred shape: ', Ypred.shape)
Ypred_std = df_data.iloc[:, col_std_pred].values
print('Ypred_std shape: ', Ypred_std.shape)
yerror = Ytrue - Ypred
print('yerror shape: ', yerror.shape)
sigma = Ypred_std # std
MSE = np.mean((Ytrue - Ypred)**2)
print('MSE: ', MSE)
MSE_STD = np.std((Ytrue - Ypred)**2)
print('MSE_STD: ', MSE_STD)
# p-value 'not entirely reliable, reasonable for datasets > 500'
spearman_cc, pval = spearmanr(Ytrue, Ypred)
print('Spearman CC: %f, p-value: %e' % (spearman_cc, pval))
return Ytrue, Ypred, yerror, sigma, Ypred_std, pred_name
def compute_statistics_homoscedastic_all(df_data,
col_true=4,
col_pred_start=6
):
""" Extracts ground truth, mean prediction, error and
standard deviation of prediction from inference
data frame. The latter includes all the individual
inference realizations.
Parameters
----------
df_data : pandas data frame
Data frame generated by current CANDLE inference
experiments. Indices are hard coded to agree with
current CANDLE version. (The inference file usually
has the name: <model>.predicted_INFER.tsv).
col_true : integer
Index of the column in the data frame where the true
value is stored (Default: 4, index in current HOM format).
col_pred_start : integer
Index of the column in the data frame where the first predicted
value is stored. All the predicted values during inference
are stored (Default: 6 index, in current HOM format).
Return
----------
Ytrue : numpy array
Array with true (observed) values
Ypred : numpy array
Array with predicted values.
yerror : numpy array
Array with errors computed (observed - predicted).
sigma : numpy array
Array with standard deviations learned with deep learning
model. For homoscedastic inference this corresponds to the
std value computed from prediction (and is equal to the
following returned variable).
Ypred_std : numpy array
Array with standard deviations computed from regular
(homoscedastic) inference.
pred_name : string
Name of data column or quantity predicted (as extracted
from the data frame using the col_true index).
"""
Ytrue = df_data.iloc[:, col_true].values
print('Ytrue shape: ', Ytrue.shape)
pred_name = df_data.columns[col_true]
Ypred_mean_ = np.mean(df_data.iloc[:, col_pred_start:], axis=1)
Ypred_mean = Ypred_mean_.values
print('Ypred_mean shape: ', Ypred_mean.shape)
Ypred_std_ = np.std(df_data.iloc[:, col_pred_start:], axis=1)
Ypred_std = Ypred_std_.values
print('Ypred_std shape: ', Ypred_std.shape)
yerror = Ytrue - Ypred_mean
print('yerror shape: ', yerror.shape)
sigma = Ypred_std # std
MSE = np.mean((Ytrue - Ypred_mean)**2)
print('MSE: ', MSE)
MSE_STD = np.std((Ytrue - Ypred_mean)**2)
print('MSE_STD: ', MSE_STD)
# p-value 'not entirely reliable, reasonable for datasets > 500'
spearman_cc, pval = spearmanr(Ytrue, Ypred_mean)
print('Spearman CC: %f, p-value: %e' % (spearman_cc, pval))
return Ytrue, Ypred_mean, yerror, sigma, Ypred_std, pred_name
def compute_statistics_heteroscedastic(df_data,
col_true=4,
col_pred_start=6,
col_std_pred_start=7,
):
""" Extracts ground truth, mean prediction, error, standard
deviation of prediction and predicted (learned) standard
deviation from inference data frame. The latter includes
all the individual inference realizations.
Parameters
----------
df_data : pandas data frame
Data frame generated by current heteroscedastic inference
experiments. Indices are hard coded to agree with
current version. (The inference file usually
has the name: <model>.predicted_INFER_HET.tsv).
col_true : integer
Index of the column in the data frame where the true
value is stored (Default: 4, index in current HET format).
col_pred_start : integer
Index of the column in the data frame where the first predicted
value is stored. All the predicted values during inference
are stored and are interspaced with standard deviation
predictions (Default: 6 index, step 2, in current HET format).
col_std_pred_start : integer
Index of the column in the data frame where the first predicted
standard deviation value is stored. All the predicted values
during inference are stored and are interspaced with predictions
(Default: 7 index, step 2, in current HET format).
Return
----------
Ytrue : numpy array
Array with true (observed) values
Ypred : numpy array
Array with predicted values.
yerror : numpy array
Array with errors computed (observed - predicted).
sigma : numpy array
Array with standard deviations learned with deep learning
model. For homoscedastic inference this corresponds to the
std value computed from prediction (and is equal to the
following returned variable).
Ypred_std : numpy array
Array with standard deviations computed from regular
(homoscedastic) inference.
pred_name : string
Name of data column or quantity predicted (as extracted
from the data frame using the col_true index).
"""
Ytrue = df_data.iloc[:, col_true].values
print('Ytrue shape: ', Ytrue.shape)
pred_name = df_data.columns[col_true]
Ypred_mean_ = np.mean(df_data.iloc[:, col_pred_start::2], axis=1)
Ypred_mean = Ypred_mean_.values
print('Ypred shape: ', Ypred_mean.shape)
Ypred_std_ = np.std(df_data.iloc[:, col_pred_start::2], axis=1)
Ypred_std = Ypred_std_.values
print('Ypred_std shape: ', Ypred_std.shape)
yerror = Ytrue - Ypred_mean
print('yerror shape: ', yerror.shape)
s_ = df_data.iloc[:, col_std_pred_start::2]
s_mean = np.mean(s_, axis=1)
var = np.exp(s_mean.values) # variance
sigma = np.sqrt(var) # std
print('sigma shape: ', sigma.shape)
MSE = np.mean((Ytrue - Ypred_mean)**2)
print('MSE: ', MSE)
MSE_STD = np.std((Ytrue - Ypred_mean)**2)
print('MSE_STD: ', MSE_STD)
# p-value 'not entirely reliable, reasonable for datasets > 500'
spearman_cc, pval = spearmanr(Ytrue, Ypred_mean)
print('Spearman CC: %f, p-value: %e' % (spearman_cc, pval))
return Ytrue, Ypred_mean, yerror, sigma, Ypred_std, pred_name
def compute_statistics_quantile(df_data,
sigma_divisor=2.56,
col_true=4,
col_pred_start=6
):
""" Extracts ground truth, 50th percentile mean prediction,
low percentile and high percentile mean prediction
(usually 10th percentile and 90th percentile respectively),
error (using 50th percentile), standard deviation of
prediction (using 50th percentile) and predicted (learned)
standard deviation from interdecile range in inference data frame.
The latter includes all the individual inference realizations.
Parameters
----------
df_data : pandas data frame
Data frame generated by current quantile inference
experiments. Indices are hard coded to agree with
current version. (The inference file usually
has the name: <model>.predicted_INFER_QTL.tsv).
sigma_divisor : float
Divisor to convert from the intercedile range to the corresponding
standard deviation for a Gaussian distribution.
(Default: 2.56, consistent with an interdecile range computed from
the difference between the 90th and 10th percentiles).
col_true : integer
Index of the column in the data frame where the true
value is stored (Default: 4, index in current QTL format).
col_pred_start : integer
Index of the column in the data frame where the first predicted
value is stored. All the predicted values during inference
are stored and are interspaced with other percentile
predictions (Default: 6 index, step 3, in current QTL format).
Return
----------
Ytrue : numpy array
Array with true (observed) values
Ypred : numpy array
Array with predicted values (based on the 50th percentile).
yerror : numpy array
Array with errors computed (observed - predicted).
sigma : numpy array
Array with standard deviations learned with deep learning
model. This corresponds to the interdecile range divided
by the sigma divisor.
Ypred_std : numpy array
Array with standard deviations computed from regular
(homoscedastic) inference.
pred_name : string
Name of data column or quantity predicted (as extracted
from the data frame using the col_true index).
Ypred_Lp_mean : numpy array
Array with predicted values of the lower percentile
(usually the 10th percentile).
Ypred_Hp_mean : numpy array
Array with predicted values of the higher percentile
(usually the 90th percentile).
"""
Ytrue = df_data.iloc[:, col_true].values
print('Ytrue shape: ', Ytrue.shape)
pred_name = df_data.columns[col_true]
Ypred_50q_mean = np.mean(df_data.iloc[:, col_pred_start::3], axis=1)
Ypred_mean = Ypred_50q_mean.values
print('Ypred shape: ', Ypred_mean.shape)
Ypred_Lp_mean_ = np.mean(df_data.iloc[:, col_pred_start + 1::3], axis=1)
Ypred_Hp_mean_ = np.mean(df_data.iloc[:, col_pred_start + 2::3], axis=1)
Ypred_Lp_mean = Ypred_Lp_mean_.values
Ypred_Hp_mean = Ypred_Hp_mean_.values
interdecile_range = Ypred_Hp_mean - Ypred_Lp_mean
sigma = interdecile_range / sigma_divisor
print('sigma shape: ', sigma.shape)
yerror = Ytrue - Ypred_mean
print('yerror shape: ', yerror.shape)
Ypred_std_ = np.std(df_data.iloc[:, col_pred_start::3], axis=1)
Ypred_std = Ypred_std_.values
print('Ypred_std shape: ', Ypred_std.shape)
MSE = np.mean((Ytrue - Ypred_mean)**2)
print('MSE: ', MSE)
MSE_STD = np.std((Ytrue - Ypred_mean)**2)
print('MSE_STD: ', MSE_STD)
# p-value 'not entirely reliable, reasonable for datasets > 500'
spearman_cc, pval = spearmanr(Ytrue, Ypred_mean)
print('Spearman CC: %f, p-value: %e' % (spearman_cc, pval))
return Ytrue, Ypred_mean, yerror, sigma, Ypred_std, pred_name, Ypred_Lp_mean, Ypred_Hp_mean
def split_data_for_empirical_calibration(Ytrue, Ypred, sigma, cal_split=0.8):
"""
Extracts a portion of the arrays provided for the computation
of the calibration and reserves the remainder portion
for testing.
Parameters
----------
Ytrue : numpy array
Array with true (observed) values
Ypred : numpy array
Array with predicted values.
sigma : numpy array
Array with standard deviations learned with deep learning
model (or std value computed from prediction if homoscedastic
inference).
cal_split : float
Split of data to use for estimating the calibration relationship.
It is assumed that it will be a value in (0, 1).
(Default: use 80% of predictions to generate empirical
calibration).
Return
----------
index_perm_total : numpy array
Random permutation of the array indices. The first 'num_cal'
of the indices correspond to the samples that are used for
calibration, while the remainder are the samples reserved
for calibration testing.
pSigma_cal : numpy array
Part of the input sigma array to use for calibration.
pSigma_test : numpy array
Part of the input sigma array to reserve for testing.
pPred_cal : numpy array
Part of the input Ypred array to use for calibration.
pPred_test : numpy array
Part of the input Ypred array to reserve for testing.
true_cal : numpy array
Part of the input Ytrue array to use for calibration.
true_test : numpy array
Part of the input Ytrue array to reserve for testing.
"""
# shuffle data for calibration
num_pred_total = sigma.shape[0]
num_cal = np.int(num_pred_total * cal_split)
index_perm_total = np.random.permutation(range(num_pred_total))
# Permute data
pSigma_perm_all = sigma[index_perm_total]
pPred_perm_all = Ypred[index_perm_total]
true_perm_all = Ytrue[index_perm_total]
# Split in calibration and testing
pSigma_cal = pSigma_perm_all[:num_cal]
pSigma_test = pSigma_perm_all[num_cal:]
pPred_cal = pPred_perm_all[:num_cal]
pPred_test = pPred_perm_all[num_cal:]
true_cal = true_perm_all[:num_cal]
true_test = true_perm_all[num_cal:]
print('Size of calibration set: ', true_cal.shape)
print('Size of test set: ', true_test.shape)
return index_perm_total, pSigma_cal, pSigma_test, pPred_cal, pPred_test, true_cal, true_test
def compute_empirical_calibration(pSigma_cal, pPred_cal, true_cal, bins, coverage_percentile):
""" Use the arrays provided to estimate an empirical mapping
between standard deviation and absolute value of error,
both of which have been observed during inference. Since
most of the times the raw statistics per bin are very noisy,
a smoothing step (based on scipy's savgol filter) is performed.
Parameters
----------
pSigma_cal : numpy array
Part of the standard deviations array to use for calibration.
pPred_cal : numpy array
Part of the predictions array to use for calibration.
true_cal : numpy array
Part of the true (observed) values array to use for calibration.
bins : int
Number of bins to split the range of standard deviations
included in pSigma_cal array.
coverage_percentile : float
Value to use for estimating coverage when evaluating the percentiles
of the observed absolute value of errors.
Return
----------
mean_sigma : numpy array
Array with the mean standard deviations computed per bin.
min_sigma : numpy array
Array with the minimum standard deviations computed per bin.
max_sigma : numpy array
Array with the maximum standard deviations computed per bin.
error_thresholds : numpy array
Thresholds of the errors computed to attain a certain
error coverage per bin.
err_err : numpy array
Error bars in errors (one standard deviation for a binomial
distribution estimated by bin vs. the other bins) for the
calibration error.
error_thresholds_smooth : numpy array
Thresholds of the errors computed to attain a certain
error coverage per bin after a smoothed operation is applied
to the frequently noisy bin-based estimations.
sigma_start_index : non-negative integer
Index in the mean_sigma array that defines the start of
the valid empirical calibration interval (i.e. index to
the smallest std for which a meaningful error mapping
is obtained).
sigma_end_index : non-negative integer
Index in the mean_sigma array that defines the end of
the valid empirical calibration interval (i.e. index to
the largest std for which a meaningful error mapping
is obtained).
s_interpolate : scipy.interpolate python object
A python object from scipy.interpolate that computes a
univariate spline (InterpolatedUnivariateSpline) constructed
to express the mapping from standard deviation to error. This
spline is generated during the computational empirical
calibration procedure.
"""
index_sigma_cal = np.argsort(pSigma_cal)
pSigma_cal_ordered_ = pSigma_cal[index_sigma_cal]
Er_vect_cal_ = np.abs(true_cal - pPred_cal)
Er_vect_cal_orderedSigma_ = Er_vect_cal_[index_sigma_cal]
minL_sigma = np.min(pSigma_cal_ordered_)
maxL_sigma = np.max(pSigma_cal_ordered_)
print('Complete Sigma range --> Min: %f, Max: %f' % (minL_sigma, maxL_sigma))
# Bin statistics for error and sigma
mean_sigma, min_sigma, max_sigma, error_thresholds, err_err = bining_for_calibration(pSigma_cal_ordered_,
minL_sigma,
maxL_sigma,
Er_vect_cal_orderedSigma_,
bins,
coverage_percentile)
# smooth error function
# scipy.signal.savgol_filter(x, window_length, polyorder,
# deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)
# error_thresholds_smooth = signal.savgol_filter(error_thresholds, 5, 1)
error_thresholds_smooth = signal.savgol_filter(error_thresholds, 5, 1, mode='nearest')
# Build Interpolant over smooth plot (this will become the calibration function)
s_interpolate = InterpolatedUnivariateSpline(mean_sigma, error_thresholds_smooth)
# Determine limits of calibration (i.e. monotonicity range)
sigma_start_index, sigma_end_index = computation_of_valid_calibration_interval(error_thresholds, error_thresholds_smooth, err_err)
print('Range of valid sigma: %.6f --> %.6f' % (mean_sigma[sigma_start_index], mean_sigma[sigma_end_index]))
return mean_sigma, min_sigma, max_sigma, error_thresholds, err_err, error_thresholds_smooth, sigma_start_index, sigma_end_index, s_interpolate
def bining_for_calibration(pSigma_cal_ordered_, minL_sigma,
maxL_sigma, Er_vect_cal_orderedSigma_,
bins, coverage_percentile):
""" Bin the values of the standard deviations observed during
inference and estimate a specified coverage percentile
in the absolute error (observed during inference as well).
Bins that have less than 50 samples are merged until they
surpass this threshold.
Parameters
----------
pSigma_cal_ordered_ : numpy array
Array of standard deviations ordered in ascending way.
minL_sigma : float
Minimum value of standard deviations included in
pSigma_cal_ordered_ array.
maxL_sigma : numpy array
Maximum value of standard deviations included in
pSigma_cal_ordered_ array.
Er_vect_cal_orderedSigma_ : numpy array
Array ob absolute value of errors corresponding with
the array of ordered standard deviations.
bins : int
Number of bins to split the range of standard deviations
included in pSigma_cal_ordered_ array.
coverage_percentile : float
Value to use for estimating coverage when evaluating the percentiles
of the observed absolute value of errors.
Return
----------
mean_sigma : numpy array
Array with the mean standard deviations computed per bin.
min_sigma : numpy array
Array with the minimum standard deviations computed per bin.
max_sigma : numpy array
Array with the maximum standard deviations computed per bin.
error_thresholds : numpy array
Thresholds of the errors computed to attain a certain
error coverage per bin.
err_err : numpy array
Error bars in errors (one standard deviation for a binomial
distribution estimated by bin vs. the other bins) for the
calibration error.
"""
# thresholds = np.logspace(np.log10(minL_sigma), np.log10(maxL_sigma), num=bins)
thresholds = np.linspace(minL_sigma, maxL_sigma, num=bins)
classes = np.digitize(pSigma_cal_ordered_, thresholds)
Nbin = np.zeros(bins + 1)
for i in range(bins + 1):
indices = (classes == i)
Nbin[i] = indices.sum()
# Repair bins
new_thresholds_l = []
new_nbins_l = []
sumN = 0
for i in range(Nbin.shape[0]):
sumN += Nbin[i]
if sumN > 50:
if i > (thresholds.shape[0] - 1):
new_thresholds_l.append(thresholds[-1])
else:
new_thresholds_l.append(thresholds[i])
new_nbins_l.append(sumN)
sumN = 0
new_thresholds = np.array(new_thresholds_l)
new_nbins = np.array(new_nbins_l)
new_thresholds[-1] = thresholds[-1]
new_nbins[-1] += sumN
#
classes = np.digitize(pSigma_cal_ordered_, new_thresholds[:-1])
error_thresholds = -1. * np.ones(new_nbins.shape[0])
mean_sigma = -1. * np.ones(new_nbins.shape[0])
min_sigma = -1. * np.ones(new_nbins.shape[0])
max_sigma = -1. * np.ones(new_nbins.shape[0])
err_err = -1. * np.ones(new_nbins.shape[0])
Ncal = pSigma_cal_ordered_.shape[0]
for i in range(error_thresholds.shape[0]):
indices = (classes == i)
n_aux = indices.sum()
assert n_aux == new_nbins[i]
print('Points in bin %d: %d' % (i, n_aux))
mean_sigma[i] = np.mean(pSigma_cal_ordered_[indices])
min_sigma[i] = np.min(pSigma_cal_ordered_[indices])
max_sigma[i] = np.max(pSigma_cal_ordered_[indices])
error_thresholds[i] = np.percentile(Er_vect_cal_orderedSigma_[indices], coverage_percentile)
err_err[i] = np.sqrt(new_nbins[i] * (Ncal - new_nbins[i])) / Ncal * error_thresholds[i]
return mean_sigma, min_sigma, max_sigma, error_thresholds, err_err
def computation_of_valid_calibration_interval(error_thresholds, error_thresholds_smooth, err_err):
""" Function that estimates the empirical range in which a
monotonic relation is observed between standard deviation
and coverage of absolute value of error. Since the
statistics computed per bin are relatively noisy, the
application of a greedy criterion (e.g. guarantee a
monotonically increasing relationship) does not yield
good results. Therefore, a softer version is constructed
based on the satisfaction of certain criteria depending
on: the values of the error coverage computed per bin,
a smoothed version of them and the associated error
estimated (based on one standard deviation for a binomial
distribution estimated by bin vs. the other bins).
A minimal validation requiring the end idex to be
largest than the starting index is performed before
the function return.
Current criteria:
- the smoothed errors are inside the error bars AND
they are almost increasing (a small tolerance is
allowed, so a small wobbliness in the smoother
values is permitted).
OR
- both the raw values for the bins (with a small tolerance)
are increasing, AND the smoothed value is greater than the
raw value.
OR
- the current smoothed value is greater than the previous AND
the smoothed values for the next been are inside the error
bars.
Parameters
----------
error_thresholds : numpy array
Thresholds of the errors computed to attain a certain
error coverage per bin.
error_thresholds_smooth : numpy array
Thresholds of the errors computed to attain a certain
error coverage per bin after a smoothed operation is applied
to the frequently noisy bin-based estimations.
err_err : numpy array
Error bars in errors (one standard deviation for a binomial
distribution estimated by bin vs. the other bins) for the
calibration error.
Return
----------
sigma_start_index : non-negative integer
Index estimated in the mean_sigma array corresponding to
the value that defines the start of the valid empirical
calibration interval (i.e. index to the smallest std for
which a meaningful error mapping is obtained, according
to the criteria explained before).
sigma_end_index : non-negative integer
Index estimated in the mean_sigma array corresponding to
the value that defines the end of the valid empirical
calibration interval (i.e. index to the largest std for
which a meaningful error mapping is obtained, according
to the criteria explained before).
"""
# Computation of the calibration interval
limitH = error_thresholds + err_err
limitL = error_thresholds - err_err
# search for starting point
for i in range(err_err.shape[0]):
if ((error_thresholds_smooth[i] >= limitL[i]) and
(error_thresholds_smooth[i] <= limitH[i])): # Ask if the current is in the interval
sigma_start_index = i
break
sigma_end_index = sigma_start_index - 1
restart = max(1, sigma_start_index)
for i in range(restart, err_err.shape[0] - 1):
if (((error_thresholds_smooth[i] >= limitL[i]) and
(error_thresholds_smooth[i] <= limitH[i]) and
((error_thresholds_smooth[i] * 1.005 > error_thresholds_smooth[i - 1]) or
((error_thresholds[i] * 1.01 > error_thresholds[i - 1]) and
(error_thresholds_smooth[i] > error_thresholds[i])))) # Ask if the current is in the interval with slightly increasing trend
or # Ask if the current is greater than the previous and the next is in the interval
((error_thresholds_smooth[i] > error_thresholds_smooth[i - 1]) and
((error_thresholds_smooth[i + 1] >= limitL[i + 1]) and
(error_thresholds_smooth[i + 1] <= limitH[i + 1])))):
sigma_end_index = i
else: # Finalize search for monotonic range
if (sigma_end_index - sigma_start_index) > 4:
break
else: # Reset indices
sigma_start_index = i + 1
sigma_end_index = i
print('Range of valid sigma indices (inclusive): %d --> %d' % (sigma_start_index, sigma_end_index))
assert (sigma_end_index > sigma_start_index)
return sigma_start_index, sigma_end_index
def applying_calibration(pSigma_test, pPred_test, true_test, s_interpolate, minL_sigma_auto, maxL_sigma_auto):
""" Use the empirical mapping between standard deviation and
absolute value of error estimated during calibration (i.e.
apply the univariate spline computed) to estimate the error
for the part of the standard deviation array that was reserved
for testing the empirical calibration. The resulting error array
(yp_test) should overestimate the true observed error (eabs_red).
All the computations are restricted to the valid calibration
interval: [minL_sigma_auto, maxL_sigma_auto].
Parameters
----------
pSigma_test : numpy array
Part of the standard deviations array to use for calibration testing.
pPred_test : numpy array
Part of the predictions array to use for calibration testing.
true_test : numpy array
Part of the true (observed) values array to use for calibration testing.
s_interpolate : scipy.interpolate python object
A python object from scipy.interpolate that computes a
univariate spline (InterpolatedUnivariateSpline) expressing
the mapping from standard deviation to error. This
spline is generated during the computational empirical
calibration procedure.
minL_sigma_auto : float
Starting value of the valid empirical calibration interval
(i.e. smallest std for which a meaningful error mapping
is obtained).
maxL_sigma_auto : float
Ending value of the valid empirical calibration interval
(i.e. largest std for which a meaningful error mapping
is obtained).
Return
----------
index_sigma_range_test : numpy array
Indices of the pSigma_test array that are included in the
valid calibration interval, given by:
[minL_sigma_auto, maxL_sigma_auto].
xp_test : numpy array
Array with the mean standard deviations in the calibration
testing array.
yp_test : numpy array
Mapping of the given standard deviation to error computed
from the interpolation spline constructed by empirical
calibration.
eabs_red : numpy array
Array with the observed absolute errors in the part of the testing
array for which the observed standard deviations are in the
valid interval of calibration.
"""
# Filter to appropriate range
index_sigma_range_test = (pSigma_test >= minL_sigma_auto) & (pSigma_test < maxL_sigma_auto)
xp_test = pSigma_test[index_sigma_range_test]
yp_test = s_interpolate(xp_test)
Er_vect_ = true_test - pPred_test
eabs_ = np.abs(Er_vect_)
eabs_red = eabs_[index_sigma_range_test]
return index_sigma_range_test, xp_test, yp_test, eabs_red
def overprediction_check(yp_test, eabs_red):
""" Compute the percentage of overestimated absolute error
predictions for the arrays reserved for calibration testing
and whose corresponding standard deviations are included
in the valid calibration interval.
Parameters
----------
yp_test : numpy array
Mapping of the standard deviation to error computed
from the interpolation spline constructed by empirical
calibration.
eabs_red : numpy array
Array with the observed absolute errors in the part of the testing
array for which the observed standard deviations are in the
valid interval of calibration.
"""
over_pred_error_index = (yp_test >= eabs_red)
percentage_over_predicted = (over_pred_error_index.sum() / yp_test.shape[0])
print("percentage over predicted: ", percentage_over_predicted)
| 43.187097 | 146 | 0.646272 | from __future__ import absolute_import
import numpy as np
from scipy.stats import pearsonr, spearmanr
from scipy import signal
from scipy.interpolate import InterpolatedUnivariateSpline
def generate_index_distribution(numTrain, numTest, numValidation, params):
if all(k in params for k in ('uq_train_fr', 'uq_valid_fr', 'uq_test_fr')):
print("Computing UQ cross-validation - Distributing by FRACTION")
return generate_index_distribution_from_fraction(numTrain, numTest, numValidation, params)
elif all(k in params for k in ('uq_train_vec', 'uq_valid_vec', 'uq_test_vec')):
print("Computing UQ cross-validation - Distributing by BLOCK LIST")
return generate_index_distribution_from_block_list(numTrain, numTest, numValidation, params)
elif all(k in params for k in ('uq_train_bks', 'uq_valid_bks', 'uq_test_bks')):
print("Computing UQ cross-validation - Distributing by BLOCK NUMBER")
return generate_index_distribution_from_blocks(numTrain, numTest, numValidation, params)
else:
print("ERROR !! No consistent UQ parameter specification found !! ... exiting ")
raise KeyError("No valid triplet of ('uq_train_*', 'uq_valid_*', 'uq_test_*') found. (* is any of fr, vec or bks)")
def generate_index_distribution_from_fraction(numTrain, numTest, numValidation, params):
tol = 1e-7
fractionTrain = params['uq_train_fr']
fractionValidation = params['uq_valid_fr']
fractionTest = params['uq_test_fr']
if (fractionTrain < 0.) or (fractionTrain > 1.):
raise ValueError('uq_train_fr is not in (0, 1) range. uq_train_fr: ', fractionTrain)
if (fractionValidation < 0.) or (fractionValidation > 1.):
raise ValueError('uq_valid_fr is not in (0, 1) range. uq_valid_fr: ', fractionValidation)
if (fractionTest < 0.) or (fractionTest > 1.):
raise ValueError('uq_test_fr is not in (0, 1) range. uq_test_fr: ', fractionTest)
fractionSum = fractionTrain + fractionValidation + fractionTest
if abs(fractionSum - 1.) > tol:
raise ValueError(
'Specified UQ fractions (uq_train_fr, uq_valid_fr, uq_test_fr) do not add up to 1. No cross-validation partition is computed ! sum:',
fractionSum)
if fractionTest > 0:
numData = numTrain + numValidation + numTest
else:
numData = numTrain + numValidation
sizeTraining = int(np.round(numData * fractionTrain))
sizeValidation = int(np.round(numData * fractionValidation))
Folds = np.arange(numData)
np.random.shuffle(Folds)
indexTrain = Folds[:sizeTraining]
indexValidation = None
if fractionValidation > 0:
indexValidation = Folds[sizeTraining:sizeTraining + sizeValidation]
indexTest = None
if fractionTest > 0:
indexTest = Folds[sizeTraining + sizeValidation:]
return indexTrain, indexValidation, indexTest
def generate_index_distribution_from_blocks(numTrain, numTest, numValidation, params):
numBlocksTrain = params['uq_train_bks']
numBlocksValidation = params['uq_valid_bks']
numBlocksTest = params['uq_test_bks']
numBlocksTotal = numBlocksTrain + numBlocksValidation + numBlocksTest
if numBlocksTest > 0:
numData = numTrain + numValidation + numTest
else:
numData = numTrain + numValidation
blockSize = (numData + numBlocksTotal // 2) // numBlocksTotal
remainder = numData - blockSize * numBlocksTotal
if remainder != 0:
print("Warning ! Requested partition does not distribute data evenly between blocks. "
"Testing (if specified) or Validation (if specified) will use different block size.")
sizeTraining = numBlocksTrain * blockSize
sizeValidation = numBlocksValidation * blockSize
Folds = np.arange(numData)
np.random.shuffle(Folds)
indexTrain = Folds[:sizeTraining]
indexValidation = None
if numBlocksValidation > 0:
indexValidation = Folds[sizeTraining:sizeTraining + sizeValidation]
indexTest = None
if numBlocksTest > 0:
indexTest = Folds[sizeTraining + sizeValidation:]
return indexTrain, indexValidation, indexTest
def generate_index_distribution_from_block_list(numTrain, numTest, numValidation, params):
blocksTrain = params['uq_train_vec']
blocksValidation = params['uq_valid_vec']
blocksTest = params['uq_test_vec']
numBlocksTrain = len(blocksTrain)
numBlocksValidation = len(blocksValidation)
numBlocksTest = len(blocksTest)
numBlocksTotal = numBlocksTrain + numBlocksValidation + numBlocksTest
if numBlocksTest > 0:
numData = numTrain + numValidation + numTest
else:
numData = numTrain + numValidation
blockSize = (numData + numBlocksTotal // 2) // numBlocksTotal
remainder = numData - blockSize * numBlocksTotal
if remainder != 0:
print("Warning ! Requested partition does not distribute data evenly between blocks. "
"Last block will have different size.")
if remainder < 0:
remainder = 0
maxSizeTrain = blockSize * numBlocksTrain + remainder
indexTrain = fill_array(blocksTrain, maxSizeTrain, numData, numBlocksTotal, blockSize)
indexValidation = None
if numBlocksValidation > 0:
maxSizeValidation = blockSize * numBlocksValidation + remainder
indexValidation = fill_array(blocksValidation, maxSizeValidation, numData, numBlocksTotal, blockSize)
indexTest = None
if numBlocksTest > 0:
maxSizeTest = blockSize * numBlocksTest + remainder
indexTest = fill_array(blocksTest, maxSizeTest, numData, numBlocksTotal, blockSize)
return indexTrain, indexValidation, indexTest
def compute_limits(numdata, numblocks, blocksize, blockn):
start = blockn * blocksize
end = start + blocksize
if blockn == (numblocks - 1):
end = numdata
return start, end
def fill_array(blocklist, maxsize, numdata, numblocks, blocksize):
indexArray = np.zeros(maxsize, np.int)
offset = 0
for i in blocklist:
start, end = compute_limits(numdata, numblocks, blocksize, i)
length = end - start
indexArray[offset:offset + length] = np.arange(start, end)
offset += length
return indexArray[:offset]
def compute_statistics_homoscedastic(df_data,
col_true=0,
col_pred=6,
col_std_pred=7,
):
Ytrue = df_data.iloc[:, col_true].values
print('Ytrue shape: ', Ytrue.shape)
pred_name = df_data.columns[col_true]
Ypred = df_data.iloc[:, col_pred].values
print('Ypred shape: ', Ypred.shape)
Ypred_std = df_data.iloc[:, col_std_pred].values
print('Ypred_std shape: ', Ypred_std.shape)
yerror = Ytrue - Ypred
print('yerror shape: ', yerror.shape)
sigma = Ypred_std
MSE = np.mean((Ytrue - Ypred)**2)
print('MSE: ', MSE)
MSE_STD = np.std((Ytrue - Ypred)**2)
print('MSE_STD: ', MSE_STD)
spearman_cc, pval = spearmanr(Ytrue, Ypred)
print('Spearman CC: %f, p-value: %e' % (spearman_cc, pval))
return Ytrue, Ypred, yerror, sigma, Ypred_std, pred_name
def compute_statistics_homoscedastic_all(df_data,
col_true=4,
col_pred_start=6
):
Ytrue = df_data.iloc[:, col_true].values
print('Ytrue shape: ', Ytrue.shape)
pred_name = df_data.columns[col_true]
Ypred_mean_ = np.mean(df_data.iloc[:, col_pred_start:], axis=1)
Ypred_mean = Ypred_mean_.values
print('Ypred_mean shape: ', Ypred_mean.shape)
Ypred_std_ = np.std(df_data.iloc[:, col_pred_start:], axis=1)
Ypred_std = Ypred_std_.values
print('Ypred_std shape: ', Ypred_std.shape)
yerror = Ytrue - Ypred_mean
print('yerror shape: ', yerror.shape)
sigma = Ypred_std
MSE = np.mean((Ytrue - Ypred_mean)**2)
print('MSE: ', MSE)
MSE_STD = np.std((Ytrue - Ypred_mean)**2)
print('MSE_STD: ', MSE_STD)
spearman_cc, pval = spearmanr(Ytrue, Ypred_mean)
print('Spearman CC: %f, p-value: %e' % (spearman_cc, pval))
return Ytrue, Ypred_mean, yerror, sigma, Ypred_std, pred_name
def compute_statistics_heteroscedastic(df_data,
col_true=4,
col_pred_start=6,
col_std_pred_start=7,
):
Ytrue = df_data.iloc[:, col_true].values
print('Ytrue shape: ', Ytrue.shape)
pred_name = df_data.columns[col_true]
Ypred_mean_ = np.mean(df_data.iloc[:, col_pred_start::2], axis=1)
Ypred_mean = Ypred_mean_.values
print('Ypred shape: ', Ypred_mean.shape)
Ypred_std_ = np.std(df_data.iloc[:, col_pred_start::2], axis=1)
Ypred_std = Ypred_std_.values
print('Ypred_std shape: ', Ypred_std.shape)
yerror = Ytrue - Ypred_mean
print('yerror shape: ', yerror.shape)
s_ = df_data.iloc[:, col_std_pred_start::2]
s_mean = np.mean(s_, axis=1)
var = np.exp(s_mean.values)
sigma = np.sqrt(var)
print('sigma shape: ', sigma.shape)
MSE = np.mean((Ytrue - Ypred_mean)**2)
print('MSE: ', MSE)
MSE_STD = np.std((Ytrue - Ypred_mean)**2)
print('MSE_STD: ', MSE_STD)
spearman_cc, pval = spearmanr(Ytrue, Ypred_mean)
print('Spearman CC: %f, p-value: %e' % (spearman_cc, pval))
return Ytrue, Ypred_mean, yerror, sigma, Ypred_std, pred_name
def compute_statistics_quantile(df_data,
sigma_divisor=2.56,
col_true=4,
col_pred_start=6
):
Ytrue = df_data.iloc[:, col_true].values
print('Ytrue shape: ', Ytrue.shape)
pred_name = df_data.columns[col_true]
Ypred_50q_mean = np.mean(df_data.iloc[:, col_pred_start::3], axis=1)
Ypred_mean = Ypred_50q_mean.values
print('Ypred shape: ', Ypred_mean.shape)
Ypred_Lp_mean_ = np.mean(df_data.iloc[:, col_pred_start + 1::3], axis=1)
Ypred_Hp_mean_ = np.mean(df_data.iloc[:, col_pred_start + 2::3], axis=1)
Ypred_Lp_mean = Ypred_Lp_mean_.values
Ypred_Hp_mean = Ypred_Hp_mean_.values
interdecile_range = Ypred_Hp_mean - Ypred_Lp_mean
sigma = interdecile_range / sigma_divisor
print('sigma shape: ', sigma.shape)
yerror = Ytrue - Ypred_mean
print('yerror shape: ', yerror.shape)
Ypred_std_ = np.std(df_data.iloc[:, col_pred_start::3], axis=1)
Ypred_std = Ypred_std_.values
print('Ypred_std shape: ', Ypred_std.shape)
MSE = np.mean((Ytrue - Ypred_mean)**2)
print('MSE: ', MSE)
MSE_STD = np.std((Ytrue - Ypred_mean)**2)
print('MSE_STD: ', MSE_STD)
spearman_cc, pval = spearmanr(Ytrue, Ypred_mean)
print('Spearman CC: %f, p-value: %e' % (spearman_cc, pval))
return Ytrue, Ypred_mean, yerror, sigma, Ypred_std, pred_name, Ypred_Lp_mean, Ypred_Hp_mean
def split_data_for_empirical_calibration(Ytrue, Ypred, sigma, cal_split=0.8):
num_pred_total = sigma.shape[0]
num_cal = np.int(num_pred_total * cal_split)
index_perm_total = np.random.permutation(range(num_pred_total))
pSigma_perm_all = sigma[index_perm_total]
pPred_perm_all = Ypred[index_perm_total]
true_perm_all = Ytrue[index_perm_total]
pSigma_cal = pSigma_perm_all[:num_cal]
pSigma_test = pSigma_perm_all[num_cal:]
pPred_cal = pPred_perm_all[:num_cal]
pPred_test = pPred_perm_all[num_cal:]
true_cal = true_perm_all[:num_cal]
true_test = true_perm_all[num_cal:]
print('Size of calibration set: ', true_cal.shape)
print('Size of test set: ', true_test.shape)
return index_perm_total, pSigma_cal, pSigma_test, pPred_cal, pPred_test, true_cal, true_test
def compute_empirical_calibration(pSigma_cal, pPred_cal, true_cal, bins, coverage_percentile):
index_sigma_cal = np.argsort(pSigma_cal)
pSigma_cal_ordered_ = pSigma_cal[index_sigma_cal]
Er_vect_cal_ = np.abs(true_cal - pPred_cal)
Er_vect_cal_orderedSigma_ = Er_vect_cal_[index_sigma_cal]
minL_sigma = np.min(pSigma_cal_ordered_)
maxL_sigma = np.max(pSigma_cal_ordered_)
print('Complete Sigma range --> Min: %f, Max: %f' % (minL_sigma, maxL_sigma))
mean_sigma, min_sigma, max_sigma, error_thresholds, err_err = bining_for_calibration(pSigma_cal_ordered_,
minL_sigma,
maxL_sigma,
Er_vect_cal_orderedSigma_,
bins,
coverage_percentile)
error_thresholds_smooth = signal.savgol_filter(error_thresholds, 5, 1, mode='nearest')
s_interpolate = InterpolatedUnivariateSpline(mean_sigma, error_thresholds_smooth)
sigma_start_index, sigma_end_index = computation_of_valid_calibration_interval(error_thresholds, error_thresholds_smooth, err_err)
print('Range of valid sigma: %.6f --> %.6f' % (mean_sigma[sigma_start_index], mean_sigma[sigma_end_index]))
return mean_sigma, min_sigma, max_sigma, error_thresholds, err_err, error_thresholds_smooth, sigma_start_index, sigma_end_index, s_interpolate
def bining_for_calibration(pSigma_cal_ordered_, minL_sigma,
maxL_sigma, Er_vect_cal_orderedSigma_,
bins, coverage_percentile):
thresholds = np.linspace(minL_sigma, maxL_sigma, num=bins)
classes = np.digitize(pSigma_cal_ordered_, thresholds)
Nbin = np.zeros(bins + 1)
for i in range(bins + 1):
indices = (classes == i)
Nbin[i] = indices.sum()
new_thresholds_l = []
new_nbins_l = []
sumN = 0
for i in range(Nbin.shape[0]):
sumN += Nbin[i]
if sumN > 50:
if i > (thresholds.shape[0] - 1):
new_thresholds_l.append(thresholds[-1])
else:
new_thresholds_l.append(thresholds[i])
new_nbins_l.append(sumN)
sumN = 0
new_thresholds = np.array(new_thresholds_l)
new_nbins = np.array(new_nbins_l)
new_thresholds[-1] = thresholds[-1]
new_nbins[-1] += sumN
classes = np.digitize(pSigma_cal_ordered_, new_thresholds[:-1])
error_thresholds = -1. * np.ones(new_nbins.shape[0])
mean_sigma = -1. * np.ones(new_nbins.shape[0])
min_sigma = -1. * np.ones(new_nbins.shape[0])
max_sigma = -1. * np.ones(new_nbins.shape[0])
err_err = -1. * np.ones(new_nbins.shape[0])
Ncal = pSigma_cal_ordered_.shape[0]
for i in range(error_thresholds.shape[0]):
indices = (classes == i)
n_aux = indices.sum()
assert n_aux == new_nbins[i]
print('Points in bin %d: %d' % (i, n_aux))
mean_sigma[i] = np.mean(pSigma_cal_ordered_[indices])
min_sigma[i] = np.min(pSigma_cal_ordered_[indices])
max_sigma[i] = np.max(pSigma_cal_ordered_[indices])
error_thresholds[i] = np.percentile(Er_vect_cal_orderedSigma_[indices], coverage_percentile)
err_err[i] = np.sqrt(new_nbins[i] * (Ncal - new_nbins[i])) / Ncal * error_thresholds[i]
return mean_sigma, min_sigma, max_sigma, error_thresholds, err_err
def computation_of_valid_calibration_interval(error_thresholds, error_thresholds_smooth, err_err):
limitH = error_thresholds + err_err
limitL = error_thresholds - err_err
for i in range(err_err.shape[0]):
if ((error_thresholds_smooth[i] >= limitL[i]) and
(error_thresholds_smooth[i] <= limitH[i])):
sigma_start_index = i
break
sigma_end_index = sigma_start_index - 1
restart = max(1, sigma_start_index)
for i in range(restart, err_err.shape[0] - 1):
if (((error_thresholds_smooth[i] >= limitL[i]) and
(error_thresholds_smooth[i] <= limitH[i]) and
((error_thresholds_smooth[i] * 1.005 > error_thresholds_smooth[i - 1]) or
((error_thresholds[i] * 1.01 > error_thresholds[i - 1]) and
(error_thresholds_smooth[i] > error_thresholds[i]))))
or
((error_thresholds_smooth[i] > error_thresholds_smooth[i - 1]) and
((error_thresholds_smooth[i + 1] >= limitL[i + 1]) and
(error_thresholds_smooth[i + 1] <= limitH[i + 1])))):
sigma_end_index = i
else:
if (sigma_end_index - sigma_start_index) > 4:
break
else:
sigma_start_index = i + 1
sigma_end_index = i
print('Range of valid sigma indices (inclusive): %d --> %d' % (sigma_start_index, sigma_end_index))
assert (sigma_end_index > sigma_start_index)
return sigma_start_index, sigma_end_index
def applying_calibration(pSigma_test, pPred_test, true_test, s_interpolate, minL_sigma_auto, maxL_sigma_auto):
index_sigma_range_test = (pSigma_test >= minL_sigma_auto) & (pSigma_test < maxL_sigma_auto)
xp_test = pSigma_test[index_sigma_range_test]
yp_test = s_interpolate(xp_test)
Er_vect_ = true_test - pPred_test
eabs_ = np.abs(Er_vect_)
eabs_red = eabs_[index_sigma_range_test]
return index_sigma_range_test, xp_test, yp_test, eabs_red
def overprediction_check(yp_test, eabs_red):
over_pred_error_index = (yp_test >= eabs_red)
percentage_over_predicted = (over_pred_error_index.sum() / yp_test.shape[0])
print("percentage over predicted: ", percentage_over_predicted)
| true | true |
f732e5668b65d11b93bd117e38193c3912a37e5a | 3,589 | py | Python | pyasstosrt/pyasstosrt.py | GitBib/pyasstosrt | fb69d88b56a25e7cc00a098311a0f19aec05419f | [
"Apache-2.0"
] | 6 | 2020-04-15T01:46:00.000Z | 2021-09-29T05:16:19.000Z | pyasstosrt/pyasstosrt.py | GitBib/pyasstosrt | fb69d88b56a25e7cc00a098311a0f19aec05419f | [
"Apache-2.0"
] | 2 | 2020-11-15T01:29:58.000Z | 2022-02-08T18:47:03.000Z | pyasstosrt/pyasstosrt.py | GitBib/pyasstosrt | fb69d88b56a25e7cc00a098311a0f19aec05419f | [
"Apache-2.0"
] | 1 | 2021-10-18T07:26:22.000Z | 2021-10-18T07:26:22.000Z | import os
import re
from os.path import isfile
from pathlib import Path
from typing import AnyStr, List, Union, Optional
from .dialogue import Dialogue
class Subtitle:
"""
Converting ass to art.
:type filepath: Path to a file that contains text in Advanced SubStation Alpha format
"""
dialog_mask = re.compile(r"Dialogue: \d+?,(\d:\d{2}:\d{2}.\d{2}),(\d:\d{2}:\d{2}.\d{2}),.*?,\d+,\d+,\d+,.*?,(.*)")
def __init__(self, filepath: Union[str, os.PathLike]):
if not isfile(filepath):
raise FileNotFoundError('"{}" does not exist'.format(filepath))
if isinstance(filepath, os.PathLike):
self.filepath: AnyStr = str(filepath)
self.file: AnyStr = filepath.stem
elif isinstance(filepath, str):
self.filepath: AnyStr = filepath
self.file: AnyStr = Path(filepath).stem
else:
raise TypeError('"{}" is not of type str'.format(filepath))
self.raw_text: AnyStr = self.get_text()
self.dialogues: List = []
def get_text(self) -> AnyStr:
"""
Reads the file and returns the complete contents
:return: File contents
"""
return Path(self.filepath).read_text(encoding="utf8")
def convert(self):
"""
Convert the format ass subtitles to srt.
:return:
"""
cleaning_old_format = re.compile(r"{.*?}")
dialogs = re.findall(self.dialog_mask, re.sub(cleaning_old_format, "", self.raw_text))
dialogs = sorted(list(filter(lambda x: x[2], dialogs)))
self.subtitle_formatting(dialogs)
@staticmethod
def text_clearing(raw_text: str) -> str:
"""
We're clearing the text from unnecessary tags.
:param raw_text: Dialog text with whitespace characters
:return: Dialog text without whitespaces and with the right move to a new line
"""
text = raw_text.replace(r'\h', '\xa0').strip()
line_text = text.split(r'\N')
return '\n'.join(item.strip() for item in line_text).strip()
def subtitle_formatting(self, dialogues: List):
"""
Formatting ass into srt.
:param dialogues: Prepared dialogues
:return: Prepared dialogue sheet
"""
for index, values in enumerate(dialogues, start=1):
start, end, text = values
text = self.text_clearing(text.strip())
dialogue = Dialogue(index, start, end, text)
self.dialogues.append(dialogue)
def export(
self,
output_dir: AnyStr = None,
encoding: AnyStr = "utf8",
output_dialogues: bool = False
) -> Optional[List]:
"""
If ret_dialogues parameter is False exports the subtitles to a file.
:param output_dir: Export path SubRip file
:param encoding: In which encoding you should save the file
:param output_dialogues: Whereas it should return a list of dialogues not creating a SubRip file
:return: List of dialogues
"""
self.convert()
if output_dialogues:
return self.dialogues
path = Path(self.filepath)
file = self.file + ".srt"
if output_dir:
Path(output_dir).mkdir(parents=True, exist_ok=True)
out_path = os.path.join(output_dir, file)
else:
out_path = os.path.join(path.parent, file)
with open(out_path, encoding=encoding, mode="w") as writer:
for dialogue in self.dialogues:
writer.write(str(dialogue))
| 33.542056 | 118 | 0.600167 | import os
import re
from os.path import isfile
from pathlib import Path
from typing import AnyStr, List, Union, Optional
from .dialogue import Dialogue
class Subtitle:
dialog_mask = re.compile(r"Dialogue: \d+?,(\d:\d{2}:\d{2}.\d{2}),(\d:\d{2}:\d{2}.\d{2}),.*?,\d+,\d+,\d+,.*?,(.*)")
def __init__(self, filepath: Union[str, os.PathLike]):
if not isfile(filepath):
raise FileNotFoundError('"{}" does not exist'.format(filepath))
if isinstance(filepath, os.PathLike):
self.filepath: AnyStr = str(filepath)
self.file: AnyStr = filepath.stem
elif isinstance(filepath, str):
self.filepath: AnyStr = filepath
self.file: AnyStr = Path(filepath).stem
else:
raise TypeError('"{}" is not of type str'.format(filepath))
self.raw_text: AnyStr = self.get_text()
self.dialogues: List = []
def get_text(self) -> AnyStr:
return Path(self.filepath).read_text(encoding="utf8")
def convert(self):
cleaning_old_format = re.compile(r"{.*?}")
dialogs = re.findall(self.dialog_mask, re.sub(cleaning_old_format, "", self.raw_text))
dialogs = sorted(list(filter(lambda x: x[2], dialogs)))
self.subtitle_formatting(dialogs)
@staticmethod
def text_clearing(raw_text: str) -> str:
text = raw_text.replace(r'\h', '\xa0').strip()
line_text = text.split(r'\N')
return '\n'.join(item.strip() for item in line_text).strip()
def subtitle_formatting(self, dialogues: List):
for index, values in enumerate(dialogues, start=1):
start, end, text = values
text = self.text_clearing(text.strip())
dialogue = Dialogue(index, start, end, text)
self.dialogues.append(dialogue)
def export(
self,
output_dir: AnyStr = None,
encoding: AnyStr = "utf8",
output_dialogues: bool = False
) -> Optional[List]:
self.convert()
if output_dialogues:
return self.dialogues
path = Path(self.filepath)
file = self.file + ".srt"
if output_dir:
Path(output_dir).mkdir(parents=True, exist_ok=True)
out_path = os.path.join(output_dir, file)
else:
out_path = os.path.join(path.parent, file)
with open(out_path, encoding=encoding, mode="w") as writer:
for dialogue in self.dialogues:
writer.write(str(dialogue))
| true | true |
f732e5b07ff3a53f8f9e92219e9ef7d5237d6a1e | 2,449 | py | Python | neutron_lib/api/definitions/__init__.py | starlingx-staging/stx-neutron-lib | a0e07ae87ad0cfb5df8b72aed63bc8cb9a9d92b9 | [
"Apache-2.0"
] | null | null | null | neutron_lib/api/definitions/__init__.py | starlingx-staging/stx-neutron-lib | a0e07ae87ad0cfb5df8b72aed63bc8cb9a9d92b9 | [
"Apache-2.0"
] | null | null | null | neutron_lib/api/definitions/__init__.py | starlingx-staging/stx-neutron-lib | a0e07ae87ad0cfb5df8b72aed63bc8cb9a9d92b9 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import address_scope
from neutron_lib.api.definitions import agent
from neutron_lib.api.definitions import auto_allocated_topology
from neutron_lib.api.definitions import bgpvpn
from neutron_lib.api.definitions import bgpvpn_routes_control
from neutron_lib.api.definitions import data_plane_status
from neutron_lib.api.definitions import dns
from neutron_lib.api.definitions import dns_domain_ports
from neutron_lib.api.definitions import extra_dhcp_opt
from neutron_lib.api.definitions import fip64
from neutron_lib.api.definitions import firewall
from neutron_lib.api.definitions import firewall_v2
from neutron_lib.api.definitions import firewallrouterinsertion
from neutron_lib.api.definitions import l3
from neutron_lib.api.definitions import logging
from neutron_lib.api.definitions import logging_resource
from neutron_lib.api.definitions import network
from neutron_lib.api.definitions import network_mtu
from neutron_lib.api.definitions import port
from neutron_lib.api.definitions import port_security
from neutron_lib.api.definitions import portbindings
from neutron_lib.api.definitions import provider_net
from neutron_lib.api.definitions import router_interface_fip
from neutron_lib.api.definitions import subnet
from neutron_lib.api.definitions import subnetpool
from neutron_lib.api.definitions import trunk
from neutron_lib.api.definitions import trunk_details
_ALL_API_DEFINITIONS = {
address_scope,
agent,
auto_allocated_topology,
bgpvpn,
bgpvpn_routes_control,
data_plane_status,
dns,
dns_domain_ports,
extra_dhcp_opt,
fip64,
firewall,
firewall_v2,
firewallrouterinsertion,
l3,
logging,
logging_resource,
network,
network_mtu,
port,
port_security,
portbindings,
provider_net,
router_interface_fip,
subnet,
subnetpool,
trunk,
trunk_details
}
| 34.492958 | 75 | 0.811352 |
from neutron_lib.api.definitions import address_scope
from neutron_lib.api.definitions import agent
from neutron_lib.api.definitions import auto_allocated_topology
from neutron_lib.api.definitions import bgpvpn
from neutron_lib.api.definitions import bgpvpn_routes_control
from neutron_lib.api.definitions import data_plane_status
from neutron_lib.api.definitions import dns
from neutron_lib.api.definitions import dns_domain_ports
from neutron_lib.api.definitions import extra_dhcp_opt
from neutron_lib.api.definitions import fip64
from neutron_lib.api.definitions import firewall
from neutron_lib.api.definitions import firewall_v2
from neutron_lib.api.definitions import firewallrouterinsertion
from neutron_lib.api.definitions import l3
from neutron_lib.api.definitions import logging
from neutron_lib.api.definitions import logging_resource
from neutron_lib.api.definitions import network
from neutron_lib.api.definitions import network_mtu
from neutron_lib.api.definitions import port
from neutron_lib.api.definitions import port_security
from neutron_lib.api.definitions import portbindings
from neutron_lib.api.definitions import provider_net
from neutron_lib.api.definitions import router_interface_fip
from neutron_lib.api.definitions import subnet
from neutron_lib.api.definitions import subnetpool
from neutron_lib.api.definitions import trunk
from neutron_lib.api.definitions import trunk_details
_ALL_API_DEFINITIONS = {
address_scope,
agent,
auto_allocated_topology,
bgpvpn,
bgpvpn_routes_control,
data_plane_status,
dns,
dns_domain_ports,
extra_dhcp_opt,
fip64,
firewall,
firewall_v2,
firewallrouterinsertion,
l3,
logging,
logging_resource,
network,
network_mtu,
port,
port_security,
portbindings,
provider_net,
router_interface_fip,
subnet,
subnetpool,
trunk,
trunk_details
}
| true | true |
f732e6cdeddc94f79176604dc26000aa1254bbb5 | 7,038 | py | Python | data_collection/script.py | sukumargv/hurricane | 98e792f94f6924e0b3fcfb8aef32a6638a551cd2 | [
"MIT"
] | null | null | null | data_collection/script.py | sukumargv/hurricane | 98e792f94f6924e0b3fcfb8aef32a6638a551cd2 | [
"MIT"
] | 2 | 2016-09-26T16:45:53.000Z | 2016-09-26T17:11:35.000Z | data_collection/script.py | sukumargv/hurricane | 98e792f94f6924e0b3fcfb8aef32a6638a551cd2 | [
"MIT"
] | null | null | null | import threading
from bs4 import BeautifulSoup
import re
import math
import json
import urllib2
from datetime import datetime
import pytz
start_time = datetime.utcnow()
# convert pixels to coordinates
'''
def pixels_to_coordinates(route_no, center_x, center_y):
ds = gdal.Open(route_no)
# unravel GDAL affine transform parameters
c, a, b, f, d, e = ds.GetGeoTransform()
def pixel2coord(col, row):
"""Returns global coordinates to pixel center using base-0 raster index"""
xp = a * col + b * row + a * 0.5 + b * 0.5 + c
yp = d * col + e * row + d * 0.5 + e * 0.5 + f
return xp, yp
cart_cord = pixel2coord(center_x, center_y)
# Converting coordinates from EPSG 3857 to 4326
inProj = pyproj.Proj(init='epsg:3857')
outProj = pyproj.Proj(init='epsg:4326')
coordinates = pyproj.transform(inProj, outProj, cart_cord[0], cart_cord[1])
local_dict = {'lat': coordinates[1], 'lon': coordinates[0]}
return local_dict
'''
def html_json(url, fname, rid):
soup = BeautifulSoup(urllib2.urlopen(url).read(),
'html.parser')
local_format = "%Y-%m-%d %H:%M:%S"
utc_moment_unaware = datetime.utcnow()
utc_moment = utc_moment_unaware.replace(tzinfo=pytz.utc)
local_datetime = utc_moment.astimezone(pytz.timezone('Canada/Pacific'))
timestamp = local_datetime.strftime(local_format)
# load in the html file
# soup = BeautifulSoup(open("./data/r00-2016-09-21_17-09-32.html"), 'html.parser')
# grab the content in the script section
script_content = soup.find('script')
if len(re.findall('<td>.*', script_content.string, re.MULTILINE)) is 0:
page_data={}
with open(fname + '-' +timestamp+ '.json', 'w') as fp:
json.dump(page_data, fp, indent=0, sort_keys=True, separators=(',', ':'))
else:
# container for data extracted from script part of html file
final_boundaries_fragment = []
# extract the content of pixel boundaries
pixel_boundary_string = re.findall('if\s\(x.*', script_content.string, re.MULTILINE)
# operating on the string obtained via regular expression to format the data into dictionary
for x in pixel_boundary_string:
local = []
dict_local = {}
split_text = x.split()
dict_local['x1'] = int(split_text[3])
dict_local['y1'] = int(split_text[7])
dict_local['x2'] = int(split_text[11])
dict_local['y2'] = int(re.sub('\)', '', split_text[15]))
bottom_edge_width = dict_local['x2'] - dict_local['x1']
left_edge_width = dict_local['y2'] - dict_local['y1']
dict_local['cx'] = int(dict_local['x1'] + math.floor(bottom_edge_width / 2))
dict_local['cy'] = int(dict_local['y1'] + math.floor(left_edge_width) / 2)
final_boundaries_fragment.append(dict_local)
# container for rest of the vessel data from the script portion
js_data = []
# extract the rest of the original vessel data in the script portion
grabbed_content = re.findall('<td>.*', script_content.string, re.MULTILINE)
# operating on rest of vessel data and cleaning up and structuring
for x in grabbed_content:
local = []
dict_local = {}
replace_td = re.compile('<td>')
x = replace_td.sub(' ', x)
replace_b = re.compile('<b>')
x = replace_b.sub(' ', x)
x = x.strip()[:-11]
if ':' in x:
x = x.split(':')
# converting unicode to string
dict_local[str(x[0])] = str((x[1]).strip())
else:
# Vessel name does not have any prefix
dict_local['Vessel'] = str(x)
js_data.append(dict_local)
# grouping data by vessel
if js_data[1].get("Destination") is None:
js_grouped_by_vessel = [js_data[x:x + 3] for x in range(0, len(js_data), 3)]
else:
js_grouped_by_vessel = [js_data[x:x + 4] for x in range(0, len(js_data), 4)]
# Final formatted data for the js fragment
final_js_fragment = []
for i in js_grouped_by_vessel:
final_js_fragment.append(i)
page_data = []
# extracting html data
rows = soup.find_all('td')[:-2]
# container for tds
list_of_tds = []
for el in soup.find_all('td')[:-2]:
x = el.get_text()
# converting unicode to string
list_of_tds.append(str(el.get_text()))
# grouping data by row
grouped_tds = [list_of_tds[x:x + 4] for x in range(0, len(list_of_tds), 4)]
# first rows of the table which need to be seen as keys
keys_html = grouped_tds[0]
# later part of the table which are values for the keys
data_html = grouped_tds[1:]
# making dictionary by looping over the
for i in data_html:
dict_local = {}
for j in i:
dict_local[keys_html[i.index(j)]] = j
try:
dict_local['Boundaries'] = final_boundaries_fragment[len(page_data)]
except IndexError:
continue
try:
dict_local['Heading'] = final_js_fragment[len(page_data)][2]['Heading']
except KeyError:
dict_local['Heading'] = final_js_fragment[len(page_data)][1]['Heading']
try:
dict_local['Speed'] = final_js_fragment[len(page_data)][3]['Speed']
except IndexError:
dict_local['Speed'] = final_js_fragment[len(page_data)][2]['Speed']
dict_local['Time'] = timestamp
dict_local['Timezone'] = "Canada/Pacific"
dict_local['Route'] = rid
# cx, cy = final_boundaries_fragment[len(page_data)]['cx'], final_boundaries_fragment[len(page_data)]['cx']
# function call to convert center pixel to geographic lon and lat
# dict_local['location'] = pixels_to_coordinates('../tiff_routes/route4_modified_1.tif', cx, cy)
page_data.append(dict_local)
# print json.dumps(page_data, indent=2, sort_keys=True, separators=(',', ':'))
with open(fname + '-' + timestamp + '.json', 'w') as fp:
json.dump(page_data, fp, indent=0, sort_keys=True, separators=(',', ':'))
def get_all():
# edit this base path to set the directory
BASEPATH = '/Volumes/Ego/bc_ferries/'
base_url = "http://bcferries.applocation.net/routemaps/route{}.html"
routes = [0, 1, 2, 3, 4, 5, 6, 7, 13, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 29]
for route in routes:
url = base_url.format(route)
rid = "r{:02d}".format(route)
fname = BASEPATH + "r{:02d}".format(route)
thread = threading.Thread(target=html_json(url, fname, rid))
thread.start()
# get_page(url, fname)
if __name__ == '__main__':
get_all()
end_time = datetime.utcnow() | 39.1 | 119 | 0.593208 | import threading
from bs4 import BeautifulSoup
import re
import math
import json
import urllib2
from datetime import datetime
import pytz
start_time = datetime.utcnow()
def html_json(url, fname, rid):
soup = BeautifulSoup(urllib2.urlopen(url).read(),
'html.parser')
local_format = "%Y-%m-%d %H:%M:%S"
utc_moment_unaware = datetime.utcnow()
utc_moment = utc_moment_unaware.replace(tzinfo=pytz.utc)
local_datetime = utc_moment.astimezone(pytz.timezone('Canada/Pacific'))
timestamp = local_datetime.strftime(local_format)
script_content = soup.find('script')
if len(re.findall('<td>.*', script_content.string, re.MULTILINE)) is 0:
page_data={}
with open(fname + '-' +timestamp+ '.json', 'w') as fp:
json.dump(page_data, fp, indent=0, sort_keys=True, separators=(',', ':'))
else:
final_boundaries_fragment = []
pixel_boundary_string = re.findall('if\s\(x.*', script_content.string, re.MULTILINE)
for x in pixel_boundary_string:
local = []
dict_local = {}
split_text = x.split()
dict_local['x1'] = int(split_text[3])
dict_local['y1'] = int(split_text[7])
dict_local['x2'] = int(split_text[11])
dict_local['y2'] = int(re.sub('\)', '', split_text[15]))
bottom_edge_width = dict_local['x2'] - dict_local['x1']
left_edge_width = dict_local['y2'] - dict_local['y1']
dict_local['cx'] = int(dict_local['x1'] + math.floor(bottom_edge_width / 2))
dict_local['cy'] = int(dict_local['y1'] + math.floor(left_edge_width) / 2)
final_boundaries_fragment.append(dict_local)
js_data = []
grabbed_content = re.findall('<td>.*', script_content.string, re.MULTILINE)
for x in grabbed_content:
local = []
dict_local = {}
replace_td = re.compile('<td>')
x = replace_td.sub(' ', x)
replace_b = re.compile('<b>')
x = replace_b.sub(' ', x)
x = x.strip()[:-11]
if ':' in x:
x = x.split(':')
dict_local[str(x[0])] = str((x[1]).strip())
else:
dict_local['Vessel'] = str(x)
js_data.append(dict_local)
if js_data[1].get("Destination") is None:
js_grouped_by_vessel = [js_data[x:x + 3] for x in range(0, len(js_data), 3)]
else:
js_grouped_by_vessel = [js_data[x:x + 4] for x in range(0, len(js_data), 4)]
final_js_fragment = []
for i in js_grouped_by_vessel:
final_js_fragment.append(i)
page_data = []
rows = soup.find_all('td')[:-2]
list_of_tds = []
for el in soup.find_all('td')[:-2]:
x = el.get_text()
list_of_tds.append(str(el.get_text()))
grouped_tds = [list_of_tds[x:x + 4] for x in range(0, len(list_of_tds), 4)]
keys_html = grouped_tds[0]
data_html = grouped_tds[1:]
for i in data_html:
dict_local = {}
for j in i:
dict_local[keys_html[i.index(j)]] = j
try:
dict_local['Boundaries'] = final_boundaries_fragment[len(page_data)]
except IndexError:
continue
try:
dict_local['Heading'] = final_js_fragment[len(page_data)][2]['Heading']
except KeyError:
dict_local['Heading'] = final_js_fragment[len(page_data)][1]['Heading']
try:
dict_local['Speed'] = final_js_fragment[len(page_data)][3]['Speed']
except IndexError:
dict_local['Speed'] = final_js_fragment[len(page_data)][2]['Speed']
dict_local['Time'] = timestamp
dict_local['Timezone'] = "Canada/Pacific"
dict_local['Route'] = rid
page_data.append(dict_local)
with open(fname + '-' + timestamp + '.json', 'w') as fp:
json.dump(page_data, fp, indent=0, sort_keys=True, separators=(',', ':'))
def get_all():
BASEPATH = '/Volumes/Ego/bc_ferries/'
base_url = "http://bcferries.applocation.net/routemaps/route{}.html"
routes = [0, 1, 2, 3, 4, 5, 6, 7, 13, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 29]
for route in routes:
url = base_url.format(route)
rid = "r{:02d}".format(route)
fname = BASEPATH + "r{:02d}".format(route)
thread = threading.Thread(target=html_json(url, fname, rid))
thread.start()
if __name__ == '__main__':
get_all()
end_time = datetime.utcnow() | true | true |
f732e6f588f5855f7f3ca7d4824dee056ed66fae | 813 | py | Python | configs/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py | SeanZhang777/mmdetection | 14d250f9575198a8d0f2001903e4eed908e4461f | [
"Apache-2.0"
] | 4 | 2021-06-01T08:33:56.000Z | 2022-02-12T14:54:09.000Z | configs/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py | SeanZhang777/mmdetection | 14d250f9575198a8d0f2001903e4eed908e4461f | [
"Apache-2.0"
] | null | null | null | configs/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py | SeanZhang777/mmdetection | 14d250f9575198a8d0f2001903e4eed908e4461f | [
"Apache-2.0"
] | 1 | 2021-06-01T08:34:00.000Z | 2021-06-01T08:34:00.000Z | _base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
pretrained=None,
backbone=dict(
frozen_stages=-1, zero_init_residual=False, norm_cfg=norm_cfg),
neck=dict(norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg),
mask_head=dict(norm_cfg=norm_cfg)))
# optimizer
optimizer = dict(paramwise_cfg=dict(norm_decay_mult=0))
optimizer_config = dict(_delete_=True, grad_clip=None)
# learning policy
lr_config = dict(warmup_ratio=0.1, step=[65, 71])
runner = dict(max_epochs=73)
| 33.875 | 72 | 0.692497 | _base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
pretrained=None,
backbone=dict(
frozen_stages=-1, zero_init_residual=False, norm_cfg=norm_cfg),
neck=dict(norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg),
mask_head=dict(norm_cfg=norm_cfg)))
optimizer = dict(paramwise_cfg=dict(norm_decay_mult=0))
optimizer_config = dict(_delete_=True, grad_clip=None)
lr_config = dict(warmup_ratio=0.1, step=[65, 71])
runner = dict(max_epochs=73)
| true | true |
f732e886bb6651ccc0f8d71e8ddd18c7c6dbccb9 | 266 | py | Python | Workshop/Part3/part3_sol.py | ibenemerito88/openBF_workshop | a63a6fbd1ef8528890fb1072730124e054875008 | [
"Zlib",
"Apache-2.0"
] | null | null | null | Workshop/Part3/part3_sol.py | ibenemerito88/openBF_workshop | a63a6fbd1ef8528890fb1072730124e054875008 | [
"Zlib",
"Apache-2.0"
] | null | null | null | Workshop/Part3/part3_sol.py | ibenemerito88/openBF_workshop | a63a6fbd1ef8528890fb1072730124e054875008 | [
"Zlib",
"Apache-2.0"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from scipy import integrate
import reslast
plt.close("all")
# Symmetric network
q,a,p,u,c,n,s = reslast.resu("network")
# Non-symmetric network
qn,an,pn,un,cn,nn,sn = reslast.resu("networknonsym")
plt.show() | 14.777778 | 52 | 0.733083 | import numpy as np
import matplotlib.pyplot as plt
from scipy import integrate
import reslast
plt.close("all")
q,a,p,u,c,n,s = reslast.resu("network")
qn,an,pn,un,cn,nn,sn = reslast.resu("networknonsym")
plt.show() | true | true |
f732e8b734c2a496b973cb2688cadd10beb54d63 | 1,044 | py | Python | users/users.py | TRUFA-rnaseq/trufa-users-sqlite | 7213481786037414952deb419014627c42cc79ac | [
"BSD-3-Clause"
] | null | null | null | users/users.py | TRUFA-rnaseq/trufa-users-sqlite | 7213481786037414952deb419014627c42cc79ac | [
"BSD-3-Clause"
] | null | null | null | users/users.py | TRUFA-rnaseq/trufa-users-sqlite | 7213481786037414952deb419014627c42cc79ac | [
"BSD-3-Clause"
] | null | null | null | # ------------------------------------------------------------------------------
from . import database as db
# ------------------------------------------------------------------------------
def checkUser(username, passwd):
return db.checkUser(username, passwd)
# ------------------------------------------------------------------------------
def checkIfUserAvailable(username):
return db.checkIfUserAvailable(username)
# ------------------------------------------------------------------------------
def getUserEmail(username):
return db.getUserEmail(username)
# ------------------------------------------------------------------------------
def allowPasswordChange(username):
return True
# ------------------------------------------------------------------------------
def changeUserPassword(username, oldpass, newpass):
if db.checkUser(username, oldpass):
return db.changeUserPassword(username, newpass)
return False
# ------------------------------------------------------------------------------
| 32.625 | 80 | 0.344828 |
from . import database as db
def checkUser(username, passwd):
return db.checkUser(username, passwd)
def checkIfUserAvailable(username):
return db.checkIfUserAvailable(username)
def getUserEmail(username):
return db.getUserEmail(username)
def allowPasswordChange(username):
return True
def changeUserPassword(username, oldpass, newpass):
if db.checkUser(username, oldpass):
return db.changeUserPassword(username, newpass)
return False
| true | true |
f732e8e89e99d78486d786aaa8642acc5b624676 | 184 | py | Python | spotify_dl/constants.py | opiumozor/spotify-dl | 2e5b42727f6d05123e8f05c0b8da547585b6a9d2 | [
"MIT"
] | 1 | 2020-12-22T23:06:27.000Z | 2020-12-22T23:06:27.000Z | spotify_dl/constants.py | opiumozor/spotify-dl | 2e5b42727f6d05123e8f05c0b8da547585b6a9d2 | [
"MIT"
] | null | null | null | spotify_dl/constants.py | opiumozor/spotify-dl | 2e5b42727f6d05123e8f05c0b8da547585b6a9d2 | [
"MIT"
] | null | null | null | __all__ = ['VERSION']
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
VIDEO = 'youtube#video'
YOUTUBE_VIDEO_URL = 'https://www.youtube.com/watch?v='
VERSION = '4.0.0'
| 23 | 54 | 0.728261 | __all__ = ['VERSION']
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
VIDEO = 'youtube#video'
YOUTUBE_VIDEO_URL = 'https://www.youtube.com/watch?v='
VERSION = '4.0.0'
| true | true |
f732e9b2a6ebaaa5570c84740a20877a5638855d | 162 | py | Python | tests/test_models/test_backbones/__init__.py | mrzhuzhe/mmdetection | c04ca2c2a65500bc248a5d2ab6ace5b15f00064d | [
"Apache-2.0"
] | null | null | null | tests/test_models/test_backbones/__init__.py | mrzhuzhe/mmdetection | c04ca2c2a65500bc248a5d2ab6ace5b15f00064d | [
"Apache-2.0"
] | null | null | null | tests/test_models/test_backbones/__init__.py | mrzhuzhe/mmdetection | c04ca2c2a65500bc248a5d2ab6ace5b15f00064d | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
from .utils import check_norm_state, is_block, is_norm
__all__ = ['is_block', 'is_norm', 'check_norm_state']
| 32.4 | 55 | 0.740741 |
from .utils import check_norm_state, is_block, is_norm
__all__ = ['is_block', 'is_norm', 'check_norm_state']
| true | true |
f732eaff6151f4d4b22cd35da4acef79c66dca69 | 2,212 | py | Python | tests/integration/exception_test.py | enginbozaba/stweet-twitter-api | 060250e00a01ae53c2ca12954719b5efc918e132 | [
"MIT"
] | null | null | null | tests/integration/exception_test.py | enginbozaba/stweet-twitter-api | 060250e00a01ae53c2ca12954719b5efc918e132 | [
"MIT"
] | null | null | null | tests/integration/exception_test.py | enginbozaba/stweet-twitter-api | 060250e00a01ae53c2ca12954719b5efc918e132 | [
"MIT"
] | null | null | null | import pytest
import stweet as st
from stweet.auth import TwitterAuthTokenProvider, SimpleAuthTokenProvider
from stweet.exceptions import RefreshTokenException, ScrapBatchBadResponse
from tests.integration.mock_web_client import MockWebClient
def test_get_auth_token_with_incorrect_response_1():
with pytest.raises(RefreshTokenException):
TwitterAuthTokenProvider(MockWebClient(None, None)).get_new_token()
def test_get_simple_auth_token_with_incorrect_response_1():
with pytest.raises(RefreshTokenException):
SimpleAuthTokenProvider(MockWebClient(None, None)).get_new_token()
def test_get_auth_token_with_incorrect_response_2():
with pytest.raises(RefreshTokenException):
TwitterAuthTokenProvider(MockWebClient(400, 'None')).get_new_token()
def test_get_auth_token_with_incorrect_response_3():
with pytest.raises(RefreshTokenException):
TwitterAuthTokenProvider(MockWebClient(200, 'None')).get_new_token()
def test_runner_exceptions():
class TokenExpiryExceptionWebClient(st.WebClient):
count_dict = dict({
'https://twitter.com': 0,
'https://api.twitter.com/2/search/adaptive.json': 0
})
def run_request(self, params: st.http_request.RequestDetails) -> st.http_request.RequestResponse:
self.count_dict[params.url] = self.count_dict[params.url] + 1
if params.url == 'https://api.twitter.com/2/search/adaptive.json':
if self.count_dict[params.url] == 1:
return st.http_request.RequestResponse(429, None)
else:
return st.http_request.RequestResponse(400, '')
else:
return st.http_request.RequestResponse(200, 'decodeURIComponent("gt=1330640566170869763; Max=10800;')
with pytest.raises(ScrapBatchBadResponse):
search_tweets_task = st.SearchTweetsTask(
all_words='#koronawirus'
)
st.TweetSearchRunner(
search_tweets_task=search_tweets_task,
tweet_outputs=[],
web_client=TokenExpiryExceptionWebClient(),
auth_token_provider_factory=st.auth.TwitterAuthTokenProviderFactory()
).run()
| 38.807018 | 117 | 0.711121 | import pytest
import stweet as st
from stweet.auth import TwitterAuthTokenProvider, SimpleAuthTokenProvider
from stweet.exceptions import RefreshTokenException, ScrapBatchBadResponse
from tests.integration.mock_web_client import MockWebClient
def test_get_auth_token_with_incorrect_response_1():
with pytest.raises(RefreshTokenException):
TwitterAuthTokenProvider(MockWebClient(None, None)).get_new_token()
def test_get_simple_auth_token_with_incorrect_response_1():
with pytest.raises(RefreshTokenException):
SimpleAuthTokenProvider(MockWebClient(None, None)).get_new_token()
def test_get_auth_token_with_incorrect_response_2():
with pytest.raises(RefreshTokenException):
TwitterAuthTokenProvider(MockWebClient(400, 'None')).get_new_token()
def test_get_auth_token_with_incorrect_response_3():
with pytest.raises(RefreshTokenException):
TwitterAuthTokenProvider(MockWebClient(200, 'None')).get_new_token()
def test_runner_exceptions():
class TokenExpiryExceptionWebClient(st.WebClient):
count_dict = dict({
'https://twitter.com': 0,
'https://api.twitter.com/2/search/adaptive.json': 0
})
def run_request(self, params: st.http_request.RequestDetails) -> st.http_request.RequestResponse:
self.count_dict[params.url] = self.count_dict[params.url] + 1
if params.url == 'https://api.twitter.com/2/search/adaptive.json':
if self.count_dict[params.url] == 1:
return st.http_request.RequestResponse(429, None)
else:
return st.http_request.RequestResponse(400, '')
else:
return st.http_request.RequestResponse(200, 'decodeURIComponent("gt=1330640566170869763; Max=10800;')
with pytest.raises(ScrapBatchBadResponse):
search_tweets_task = st.SearchTweetsTask(
all_words='#koronawirus'
)
st.TweetSearchRunner(
search_tweets_task=search_tweets_task,
tweet_outputs=[],
web_client=TokenExpiryExceptionWebClient(),
auth_token_provider_factory=st.auth.TwitterAuthTokenProviderFactory()
).run()
| true | true |
f732ed8370a93b287fd9dff34ab1c8565a35baae | 2,805 | py | Python | user/models/verifycode.py | Hrsn2861/pysat-server | 72224bb0e6af8ef825eaf3259587698b5639b8a5 | [
"MIT"
] | null | null | null | user/models/verifycode.py | Hrsn2861/pysat-server | 72224bb0e6af8ef825eaf3259587698b5639b8a5 | [
"MIT"
] | 7 | 2020-06-06T01:55:39.000Z | 2022-02-10T11:46:31.000Z | user/models/verifycode.py | Hrsnnnn/pysat-server | 72224bb0e6af8ef825eaf3259587698b5639b8a5 | [
"MIT"
] | null | null | null | """Models about verify
"""
from django.db import models
from utils import getdate_now, randkey
from utils.checker import UserInfoChecker
class VerifyCode(models.Model):
"""VerifyCode
"""
session_id = models.IntegerField()
phone = models.CharField(max_length=11)
code = models.CharField(max_length=8)
send_time = models.DateTimeField()
class Meta:
verbose_name = 'verifycode'
verbose_name_plural = 'verifycodes'
get_latest_by = 'id'
class VerifyHelper:
"""User Helper for pysat-server
It contains some functions about user operation.
"""
@staticmethod
def get_latest_code(session_id, phone):
"""get the latest code
"""
if not isinstance(session_id, int):
return None
if UserInfoChecker.check_phone(phone) is not True:
return None
logs = VerifyCode.objects.filter(
session_id=session_id,
phone=phone
).filter(~models.Q(code=''))
if logs.exists():
log = logs.last()
return {
'code' : log.code,
'time' : log.send_time
}
return None
@staticmethod
def del_codes(session_id, phone):
"""delete all codes with `seesion_id` and `phone`.
"""
if not isinstance(session_id, int):
return False
if UserInfoChecker.check_phone(phone) is not True:
return False
logs = VerifyCode.objects.filter(
session_id=session_id,
phone=phone
)
for log in logs:
log.code = ''
log.save()
return True
@staticmethod
def add_code(session_id, phone, default_code='GUXYNB'):
"""get the EntryLog by session_id.
"""
if not VerifyHelper.del_codes(session_id, phone):
return None
if default_code is None:
code = randkey(length=6)
else:
code = default_code
VerifyCode(
session_id=session_id,
phone=phone,
code=code,
send_time=getdate_now()
).save()
return code
@staticmethod
def check_code(session_id, phone, code):
"""check the verify_code
"""
if not isinstance(session_id, int):
return False
if not isinstance(code, str) or code == '':
return False
if UserInfoChecker.check_phone(phone) is not True:
return False
logs = VerifyCode.objects.filter(
session_id=session_id,
phone=phone,
code=code
)
if logs.exists():
log = logs.last()
log.code = ''
log.save()
return True
return False
| 26.714286 | 59 | 0.557576 | from django.db import models
from utils import getdate_now, randkey
from utils.checker import UserInfoChecker
class VerifyCode(models.Model):
session_id = models.IntegerField()
phone = models.CharField(max_length=11)
code = models.CharField(max_length=8)
send_time = models.DateTimeField()
class Meta:
verbose_name = 'verifycode'
verbose_name_plural = 'verifycodes'
get_latest_by = 'id'
class VerifyHelper:
@staticmethod
def get_latest_code(session_id, phone):
if not isinstance(session_id, int):
return None
if UserInfoChecker.check_phone(phone) is not True:
return None
logs = VerifyCode.objects.filter(
session_id=session_id,
phone=phone
).filter(~models.Q(code=''))
if logs.exists():
log = logs.last()
return {
'code' : log.code,
'time' : log.send_time
}
return None
@staticmethod
def del_codes(session_id, phone):
if not isinstance(session_id, int):
return False
if UserInfoChecker.check_phone(phone) is not True:
return False
logs = VerifyCode.objects.filter(
session_id=session_id,
phone=phone
)
for log in logs:
log.code = ''
log.save()
return True
@staticmethod
def add_code(session_id, phone, default_code='GUXYNB'):
if not VerifyHelper.del_codes(session_id, phone):
return None
if default_code is None:
code = randkey(length=6)
else:
code = default_code
VerifyCode(
session_id=session_id,
phone=phone,
code=code,
send_time=getdate_now()
).save()
return code
@staticmethod
def check_code(session_id, phone, code):
if not isinstance(session_id, int):
return False
if not isinstance(code, str) or code == '':
return False
if UserInfoChecker.check_phone(phone) is not True:
return False
logs = VerifyCode.objects.filter(
session_id=session_id,
phone=phone,
code=code
)
if logs.exists():
log = logs.last()
log.code = ''
log.save()
return True
return False
| true | true |
f732ee5fee3b6ed7c4a9c5da6e42fa1b95cb49b1 | 1,200 | py | Python | aliyun-python-sdk-edas/aliyunsdkedas/request/v20170801/ListDegradeControlsRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | 1 | 2019-12-23T12:36:43.000Z | 2019-12-23T12:36:43.000Z | aliyun-python-sdk-edas/aliyunsdkedas/request/v20170801/ListDegradeControlsRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-edas/aliyunsdkedas/request/v20170801/ListDegradeControlsRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
class ListDegradeControlsRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'Edas', '2017-08-01', 'ListDegradeControls')
self.set_uri_pattern('/pop/v5/app/degradeControls')
self.set_method('GET')
def get_AppId(self):
return self.get_query_params().get('AppId')
def set_AppId(self,AppId):
self.add_query_param('AppId',AppId) | 37.5 | 73 | 0.7625 |
from aliyunsdkcore.request import RoaRequest
class ListDegradeControlsRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'Edas', '2017-08-01', 'ListDegradeControls')
self.set_uri_pattern('/pop/v5/app/degradeControls')
self.set_method('GET')
def get_AppId(self):
return self.get_query_params().get('AppId')
def set_AppId(self,AppId):
self.add_query_param('AppId',AppId) | true | true |
f732f01c93fca354ce9cbdca8c18adaa7da7961b | 415,830 | py | Python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/models/_models_py3.py | benbp/azure-sdk-for-python | 2329ba03e48098dcdc581898f6434d7c2b13a7b9 | [
"MIT"
] | null | null | null | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/models/_models_py3.py | benbp/azure-sdk-for-python | 2329ba03e48098dcdc581898f6434d7c2b13a7b9 | [
"MIT"
] | null | null | null | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/models/_models_py3.py | benbp/azure-sdk-for-python | 2329ba03e48098dcdc581898f6434d7c2b13a7b9 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
import msrest.serialization
from ._compute_management_client_enums import *
class AccessUri(msrest.serialization.Model):
"""A disk access SAS uri.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar access_sas: A SAS uri for accessing a disk.
:vartype access_sas: str
"""
_validation = {
'access_sas': {'readonly': True},
}
_attribute_map = {
'access_sas': {'key': 'accessSAS', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AccessUri, self).__init__(**kwargs)
self.access_sas = None
class AdditionalCapabilities(msrest.serialization.Model):
"""Enables or disables a capability on the virtual machine or virtual machine scale set.
:param ultra_ssd_enabled: The flag that enables or disables a capability to have one or more
managed data disks with UltraSSD_LRS storage account type on the VM or VMSS. Managed disks with
storage account type UltraSSD_LRS can be added to a virtual machine or virtual machine scale
set only if this property is enabled.
:type ultra_ssd_enabled: bool
"""
_attribute_map = {
'ultra_ssd_enabled': {'key': 'ultraSSDEnabled', 'type': 'bool'},
}
def __init__(
self,
*,
ultra_ssd_enabled: Optional[bool] = None,
**kwargs
):
super(AdditionalCapabilities, self).__init__(**kwargs)
self.ultra_ssd_enabled = ultra_ssd_enabled
class AdditionalUnattendContent(msrest.serialization.Model):
"""Specifies additional XML formatted information that can be included in the Unattend.xml file, which is used by Windows Setup. Contents are defined by setting name, component name, and the pass in which the content is applied.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar pass_name: The pass name. Currently, the only allowable value is OobeSystem. Default
value: "OobeSystem".
:vartype pass_name: str
:ivar component_name: The component name. Currently, the only allowable value is Microsoft-
Windows-Shell-Setup. Default value: "Microsoft-Windows-Shell-Setup".
:vartype component_name: str
:param setting_name: Specifies the name of the setting to which the content applies. Possible
values are: FirstLogonCommands and AutoLogon. Possible values include: "AutoLogon",
"FirstLogonCommands".
:type setting_name: str or ~azure.mgmt.compute.v2019_03_01.models.SettingNames
:param content: Specifies the XML formatted content that is added to the unattend.xml file for
the specified path and component. The XML must be less than 4KB and must include the root
element for the setting or feature that is being inserted.
:type content: str
"""
_validation = {
'pass_name': {'constant': True},
'component_name': {'constant': True},
}
_attribute_map = {
'pass_name': {'key': 'passName', 'type': 'str'},
'component_name': {'key': 'componentName', 'type': 'str'},
'setting_name': {'key': 'settingName', 'type': 'str'},
'content': {'key': 'content', 'type': 'str'},
}
pass_name = "OobeSystem"
component_name = "Microsoft-Windows-Shell-Setup"
def __init__(
self,
*,
setting_name: Optional[Union[str, "SettingNames"]] = None,
content: Optional[str] = None,
**kwargs
):
super(AdditionalUnattendContent, self).__init__(**kwargs)
self.setting_name = setting_name
self.content = content
class ApiEntityReference(msrest.serialization.Model):
"""The API entity reference.
:param id: The ARM resource id in the form of
/subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/...
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(ApiEntityReference, self).__init__(**kwargs)
self.id = id
class ApiError(msrest.serialization.Model):
"""Api error.
:param details: The Api error details.
:type details: list[~azure.mgmt.compute.v2019_03_01.models.ApiErrorBase]
:param innererror: The Api inner error.
:type innererror: ~azure.mgmt.compute.v2019_03_01.models.InnerError
:param code: The error code.
:type code: str
:param target: The target of the particular error.
:type target: str
:param message: The error message.
:type message: str
"""
_attribute_map = {
'details': {'key': 'details', 'type': '[ApiErrorBase]'},
'innererror': {'key': 'innererror', 'type': 'InnerError'},
'code': {'key': 'code', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
details: Optional[List["ApiErrorBase"]] = None,
innererror: Optional["InnerError"] = None,
code: Optional[str] = None,
target: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(ApiError, self).__init__(**kwargs)
self.details = details
self.innererror = innererror
self.code = code
self.target = target
self.message = message
class ApiErrorBase(msrest.serialization.Model):
"""Api error base.
:param code: The error code.
:type code: str
:param target: The target of the particular error.
:type target: str
:param message: The error message.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: Optional[str] = None,
target: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(ApiErrorBase, self).__init__(**kwargs)
self.code = code
self.target = target
self.message = message
class AutomaticOSUpgradePolicy(msrest.serialization.Model):
"""The configuration parameters used for performing automatic OS upgrade.
:param enable_automatic_os_upgrade: Indicates whether OS upgrades should automatically be
applied to scale set instances in a rolling fashion when a newer version of the OS image
becomes available. Default value is false. :code:`<br>`:code:`<br>` If this is set to true for
Windows based scale sets, `enableAutomaticUpdates
<https://docs.microsoft.com/dotnet/api/microsoft.azure.management.compute.models.windowsconfiguration.enableautomaticupdates?view=azure-
dotnet>`_ is automatically set to false and cannot be set to true.
:type enable_automatic_os_upgrade: bool
:param disable_automatic_rollback: Whether OS image rollback feature should be disabled.
Default value is false.
:type disable_automatic_rollback: bool
"""
_attribute_map = {
'enable_automatic_os_upgrade': {'key': 'enableAutomaticOSUpgrade', 'type': 'bool'},
'disable_automatic_rollback': {'key': 'disableAutomaticRollback', 'type': 'bool'},
}
def __init__(
self,
*,
enable_automatic_os_upgrade: Optional[bool] = None,
disable_automatic_rollback: Optional[bool] = None,
**kwargs
):
super(AutomaticOSUpgradePolicy, self).__init__(**kwargs)
self.enable_automatic_os_upgrade = enable_automatic_os_upgrade
self.disable_automatic_rollback = disable_automatic_rollback
class AutomaticOSUpgradeProperties(msrest.serialization.Model):
"""Describes automatic OS upgrade properties on the image.
All required parameters must be populated in order to send to Azure.
:param automatic_os_upgrade_supported: Required. Specifies whether automatic OS upgrade is
supported on the image.
:type automatic_os_upgrade_supported: bool
"""
_validation = {
'automatic_os_upgrade_supported': {'required': True},
}
_attribute_map = {
'automatic_os_upgrade_supported': {'key': 'automaticOSUpgradeSupported', 'type': 'bool'},
}
def __init__(
self,
*,
automatic_os_upgrade_supported: bool,
**kwargs
):
super(AutomaticOSUpgradeProperties, self).__init__(**kwargs)
self.automatic_os_upgrade_supported = automatic_os_upgrade_supported
class AutomaticRepairsPolicy(msrest.serialization.Model):
"""Specifies the configuration parameters for automatic repairs on the virtual machine scale set.
:param enabled: Specifies whether automatic repairs should be enabled on the virtual machine
scale set. The default value is false.
:type enabled: bool
:param grace_period: The amount of time for which automatic repairs are suspended due to a
state change on VM. The grace time starts after the state change has completed. This helps
avoid premature or accidental repairs. The time duration should be specified in ISO 8601
format. The minimum allowed grace period is 30 minutes (PT30M), which is also the default
value. The maximum allowed grace period is 90 minutes (PT90M).
:type grace_period: str
"""
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'grace_period': {'key': 'gracePeriod', 'type': 'str'},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
grace_period: Optional[str] = None,
**kwargs
):
super(AutomaticRepairsPolicy, self).__init__(**kwargs)
self.enabled = enabled
self.grace_period = grace_period
class Resource(msrest.serialization.Model):
"""The Resource model definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
class AvailabilitySet(Resource):
"""Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see `Manage the availability of virtual machines <https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json>`_. :code:`<br>`:code:`<br>` For more information on Azure planned maintenance, see `Planned maintenance for virtual machines in Azure <https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json>`_ :code:`<br>`:code:`<br>` Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: Sku of the availability set, only name is required to be set. See
AvailabilitySetSkuTypes for possible set of values. Use 'Aligned' for virtual machines with
managed disks and 'Classic' for virtual machines with unmanaged disks. Default value is
'Classic'.
:type sku: ~azure.mgmt.compute.v2019_03_01.models.Sku
:param platform_update_domain_count: Update Domain count.
:type platform_update_domain_count: int
:param platform_fault_domain_count: Fault Domain count.
:type platform_fault_domain_count: int
:param virtual_machines: A list of references to all virtual machines in the availability set.
:type virtual_machines: list[~azure.mgmt.compute.v2019_03_01.models.SubResource]
:param proximity_placement_group: Specifies information about the proximity placement group
that the availability set should be assigned to. :code:`<br>`:code:`<br>`Minimum api-version:
2018-04-01.
:type proximity_placement_group: ~azure.mgmt.compute.v2019_03_01.models.SubResource
:ivar statuses: The resource status information.
:vartype statuses: list[~azure.mgmt.compute.v2019_03_01.models.InstanceViewStatus]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'statuses': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'platform_update_domain_count': {'key': 'properties.platformUpdateDomainCount', 'type': 'int'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'virtual_machines': {'key': 'properties.virtualMachines', 'type': '[SubResource]'},
'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'SubResource'},
'statuses': {'key': 'properties.statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
platform_update_domain_count: Optional[int] = None,
platform_fault_domain_count: Optional[int] = None,
virtual_machines: Optional[List["SubResource"]] = None,
proximity_placement_group: Optional["SubResource"] = None,
**kwargs
):
super(AvailabilitySet, self).__init__(location=location, tags=tags, **kwargs)
self.sku = sku
self.platform_update_domain_count = platform_update_domain_count
self.platform_fault_domain_count = platform_fault_domain_count
self.virtual_machines = virtual_machines
self.proximity_placement_group = proximity_placement_group
self.statuses = None
class AvailabilitySetListResult(msrest.serialization.Model):
"""The List Availability Set operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. The list of availability sets.
:type value: list[~azure.mgmt.compute.v2019_03_01.models.AvailabilitySet]
:param next_link: The URI to fetch the next page of AvailabilitySets. Call ListNext() with this
URI to fetch the next page of AvailabilitySets.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[AvailabilitySet]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["AvailabilitySet"],
next_link: Optional[str] = None,
**kwargs
):
super(AvailabilitySetListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class UpdateResource(msrest.serialization.Model):
"""The Update Resource model definition.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(UpdateResource, self).__init__(**kwargs)
self.tags = tags
class AvailabilitySetUpdate(UpdateResource):
"""Specifies information about the availability set that the virtual machine should be assigned to. Only tags may be updated.
Variables are only populated by the server, and will be ignored when sending a request.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: Sku of the availability set.
:type sku: ~azure.mgmt.compute.v2019_03_01.models.Sku
:param platform_update_domain_count: Update Domain count.
:type platform_update_domain_count: int
:param platform_fault_domain_count: Fault Domain count.
:type platform_fault_domain_count: int
:param virtual_machines: A list of references to all virtual machines in the availability set.
:type virtual_machines: list[~azure.mgmt.compute.v2019_03_01.models.SubResource]
:param proximity_placement_group: Specifies information about the proximity placement group
that the availability set should be assigned to. :code:`<br>`:code:`<br>`Minimum api-version:
2018-04-01.
:type proximity_placement_group: ~azure.mgmt.compute.v2019_03_01.models.SubResource
:ivar statuses: The resource status information.
:vartype statuses: list[~azure.mgmt.compute.v2019_03_01.models.InstanceViewStatus]
"""
_validation = {
'statuses': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'platform_update_domain_count': {'key': 'properties.platformUpdateDomainCount', 'type': 'int'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'virtual_machines': {'key': 'properties.virtualMachines', 'type': '[SubResource]'},
'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'SubResource'},
'statuses': {'key': 'properties.statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
platform_update_domain_count: Optional[int] = None,
platform_fault_domain_count: Optional[int] = None,
virtual_machines: Optional[List["SubResource"]] = None,
proximity_placement_group: Optional["SubResource"] = None,
**kwargs
):
super(AvailabilitySetUpdate, self).__init__(tags=tags, **kwargs)
self.sku = sku
self.platform_update_domain_count = platform_update_domain_count
self.platform_fault_domain_count = platform_fault_domain_count
self.virtual_machines = virtual_machines
self.proximity_placement_group = proximity_placement_group
self.statuses = None
class BillingProfile(msrest.serialization.Model):
"""Specifies the billing related details of a Azure Spot VM or VMSS. :code:`<br>`:code:`<br>`Minimum api-version: 2019-03-01.
:param max_price: Specifies the maximum price you are willing to pay for a Azure Spot VM/VMSS.
This price is in US Dollars. :code:`<br>`:code:`<br>` This price will be compared with the
current Azure Spot price for the VM size. Also, the prices are compared at the time of
create/update of Azure Spot VM/VMSS and the operation will only succeed if the maxPrice is
greater than the current Azure Spot price. :code:`<br>`:code:`<br>` The maxPrice will also be
used for evicting a Azure Spot VM/VMSS if the current Azure Spot price goes beyond the maxPrice
after creation of VM/VMSS. :code:`<br>`:code:`<br>` Possible values are:
:code:`<br>`:code:`<br>` - Any decimal value greater than zero. Example: 0.01538
:code:`<br>`:code:`<br>` -1 – indicates default price to be up-to on-demand.
:code:`<br>`:code:`<br>` You can set the maxPrice to -1 to indicate that the Azure Spot VM/VMSS
should not be evicted for price reasons. Also, the default max price is -1 if it is not
provided by you. :code:`<br>`:code:`<br>`Minimum api-version: 2019-03-01.
:type max_price: float
"""
_attribute_map = {
'max_price': {'key': 'maxPrice', 'type': 'float'},
}
def __init__(
self,
*,
max_price: Optional[float] = None,
**kwargs
):
super(BillingProfile, self).__init__(**kwargs)
self.max_price = max_price
class BootDiagnostics(msrest.serialization.Model):
"""Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status. :code:`<br>`:code:`<br>` You can easily view the output of your console log. :code:`<br>`:code:`<br>` Azure also enables you to see a screenshot of the VM from the hypervisor.
:param enabled: Whether boot diagnostics should be enabled on the Virtual Machine.
:type enabled: bool
:param storage_uri: Uri of the storage account to use for placing the console output and
screenshot.
:type storage_uri: str
"""
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'storage_uri': {'key': 'storageUri', 'type': 'str'},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
storage_uri: Optional[str] = None,
**kwargs
):
super(BootDiagnostics, self).__init__(**kwargs)
self.enabled = enabled
self.storage_uri = storage_uri
class BootDiagnosticsInstanceView(msrest.serialization.Model):
"""The instance view of a virtual machine boot diagnostics.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar console_screenshot_blob_uri: The console screenshot blob URI.
:vartype console_screenshot_blob_uri: str
:ivar serial_console_log_blob_uri: The Linux serial console log blob Uri.
:vartype serial_console_log_blob_uri: str
:ivar status: The boot diagnostics status information for the VM. :code:`<br>`:code:`<br>`
NOTE: It will be set only if there are errors encountered in enabling boot diagnostics.
:vartype status: ~azure.mgmt.compute.v2019_03_01.models.InstanceViewStatus
"""
_validation = {
'console_screenshot_blob_uri': {'readonly': True},
'serial_console_log_blob_uri': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'console_screenshot_blob_uri': {'key': 'consoleScreenshotBlobUri', 'type': 'str'},
'serial_console_log_blob_uri': {'key': 'serialConsoleLogBlobUri', 'type': 'str'},
'status': {'key': 'status', 'type': 'InstanceViewStatus'},
}
def __init__(
self,
**kwargs
):
super(BootDiagnosticsInstanceView, self).__init__(**kwargs)
self.console_screenshot_blob_uri = None
self.serial_console_log_blob_uri = None
self.status = None
class ComputeOperationListResult(msrest.serialization.Model):
"""The List Compute Operation operation response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of compute operations.
:vartype value: list[~azure.mgmt.compute.v2019_03_01.models.ComputeOperationValue]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ComputeOperationValue]'},
}
def __init__(
self,
**kwargs
):
super(ComputeOperationListResult, self).__init__(**kwargs)
self.value = None
class ComputeOperationValue(msrest.serialization.Model):
"""Describes the properties of a Compute Operation value.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar origin: The origin of the compute operation.
:vartype origin: str
:ivar name: The name of the compute operation.
:vartype name: str
:ivar operation: The display name of the compute operation.
:vartype operation: str
:ivar resource: The display name of the resource the operation applies to.
:vartype resource: str
:ivar description: The description of the operation.
:vartype description: str
:ivar provider: The resource provider for the operation.
:vartype provider: str
"""
_validation = {
'origin': {'readonly': True},
'name': {'readonly': True},
'operation': {'readonly': True},
'resource': {'readonly': True},
'description': {'readonly': True},
'provider': {'readonly': True},
}
_attribute_map = {
'origin': {'key': 'origin', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'operation': {'key': 'display.operation', 'type': 'str'},
'resource': {'key': 'display.resource', 'type': 'str'},
'description': {'key': 'display.description', 'type': 'str'},
'provider': {'key': 'display.provider', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ComputeOperationValue, self).__init__(**kwargs)
self.origin = None
self.name = None
self.operation = None
self.resource = None
self.description = None
self.provider = None
class CreationData(msrest.serialization.Model):
"""Data used when creating a disk.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param create_option: Required. This enumerates the possible sources of a disk's creation.
Possible values include: "Empty", "Attach", "FromImage", "Import", "Copy", "Restore", "Upload".
:type create_option: str or ~azure.mgmt.compute.v2019_03_01.models.DiskCreateOption
:param storage_account_id: If createOption is Import, the Azure Resource Manager identifier of
the storage account containing the blob to import as a disk. Required only if the blob is in a
different subscription.
:type storage_account_id: str
:param image_reference: Disk source information.
:type image_reference: ~azure.mgmt.compute.v2019_03_01.models.ImageDiskReference
:param source_uri: If createOption is Import, this is the URI of a blob to be imported into a
managed disk.
:type source_uri: str
:param source_resource_id: If createOption is Copy, this is the ARM id of the source snapshot
or disk.
:type source_resource_id: str
:ivar source_unique_id: If this field is set, this is the unique id identifying the source of
this resource.
:vartype source_unique_id: str
:param upload_size_bytes: If createOption is Upload, this is the size of the contents of the
upload including the VHD footer. This value should be between 20972032 (20 MiB + 512 bytes for
the VHD footer) and 35183298347520 bytes (32 TiB + 512 bytes for the VHD footer).
:type upload_size_bytes: long
"""
_validation = {
'create_option': {'required': True},
'source_unique_id': {'readonly': True},
}
_attribute_map = {
'create_option': {'key': 'createOption', 'type': 'str'},
'storage_account_id': {'key': 'storageAccountId', 'type': 'str'},
'image_reference': {'key': 'imageReference', 'type': 'ImageDiskReference'},
'source_uri': {'key': 'sourceUri', 'type': 'str'},
'source_resource_id': {'key': 'sourceResourceId', 'type': 'str'},
'source_unique_id': {'key': 'sourceUniqueId', 'type': 'str'},
'upload_size_bytes': {'key': 'uploadSizeBytes', 'type': 'long'},
}
def __init__(
self,
*,
create_option: Union[str, "DiskCreateOption"],
storage_account_id: Optional[str] = None,
image_reference: Optional["ImageDiskReference"] = None,
source_uri: Optional[str] = None,
source_resource_id: Optional[str] = None,
upload_size_bytes: Optional[int] = None,
**kwargs
):
super(CreationData, self).__init__(**kwargs)
self.create_option = create_option
self.storage_account_id = storage_account_id
self.image_reference = image_reference
self.source_uri = source_uri
self.source_resource_id = source_resource_id
self.source_unique_id = None
self.upload_size_bytes = upload_size_bytes
class DataDisk(msrest.serialization.Model):
"""Describes a data disk.
All required parameters must be populated in order to send to Azure.
:param lun: Required. Specifies the logical unit number of the data disk. This value is used to
identify data disks within the VM and therefore must be unique for each data disk attached to a
VM.
:type lun: int
:param name: The disk name.
:type name: str
:param vhd: The virtual hard disk.
:type vhd: ~azure.mgmt.compute.v2019_03_01.models.VirtualHardDisk
:param image: The source user image virtual hard disk. The virtual hard disk will be copied
before being attached to the virtual machine. If SourceImage is provided, the destination
virtual hard drive must not exist.
:type image: ~azure.mgmt.compute.v2019_03_01.models.VirtualHardDisk
:param caching: Specifies the caching requirements. :code:`<br>`:code:`<br>` Possible values
are: :code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **ReadOnly**
:code:`<br>`:code:`<br>` **ReadWrite** :code:`<br>`:code:`<br>` Default: **None for Standard
storage. ReadOnly for Premium storage**. Possible values include: "None", "ReadOnly",
"ReadWrite".
:type caching: str or ~azure.mgmt.compute.v2019_03_01.models.CachingTypes
:param write_accelerator_enabled: Specifies whether writeAccelerator should be enabled or
disabled on the disk.
:type write_accelerator_enabled: bool
:param create_option: Required. Specifies how the virtual machine should be
created.:code:`<br>`:code:`<br>` Possible values are::code:`<br>`:code:`<br>` **Attach** \u2013
This value is used when you are using a specialized disk to create the virtual
machine.:code:`<br>`:code:`<br>` **FromImage** \u2013 This value is used when you are using an
image to create the virtual machine. If you are using a platform image, you also use the
imageReference element described above. If you are using a marketplace image, you also use the
plan element previously described. Possible values include: "FromImage", "Empty", "Attach".
:type create_option: str or ~azure.mgmt.compute.v2019_03_01.models.DiskCreateOptionTypes
:param disk_size_gb: Specifies the size of an empty data disk in gigabytes. This element can be
used to overwrite the size of the disk in a virtual machine image. :code:`<br>`:code:`<br>`
This value cannot be larger than 1023 GB.
:type disk_size_gb: int
:param managed_disk: The managed disk parameters.
:type managed_disk: ~azure.mgmt.compute.v2019_03_01.models.ManagedDiskParameters
:param to_be_detached: Specifies whether the data disk is in process of detachment from the
VirtualMachine/VirtualMachineScaleset.
:type to_be_detached: bool
"""
_validation = {
'lun': {'required': True},
'create_option': {'required': True},
}
_attribute_map = {
'lun': {'key': 'lun', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'vhd': {'key': 'vhd', 'type': 'VirtualHardDisk'},
'image': {'key': 'image', 'type': 'VirtualHardDisk'},
'caching': {'key': 'caching', 'type': 'str'},
'write_accelerator_enabled': {'key': 'writeAcceleratorEnabled', 'type': 'bool'},
'create_option': {'key': 'createOption', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'managed_disk': {'key': 'managedDisk', 'type': 'ManagedDiskParameters'},
'to_be_detached': {'key': 'toBeDetached', 'type': 'bool'},
}
def __init__(
self,
*,
lun: int,
create_option: Union[str, "DiskCreateOptionTypes"],
name: Optional[str] = None,
vhd: Optional["VirtualHardDisk"] = None,
image: Optional["VirtualHardDisk"] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
write_accelerator_enabled: Optional[bool] = None,
disk_size_gb: Optional[int] = None,
managed_disk: Optional["ManagedDiskParameters"] = None,
to_be_detached: Optional[bool] = None,
**kwargs
):
super(DataDisk, self).__init__(**kwargs)
self.lun = lun
self.name = name
self.vhd = vhd
self.image = image
self.caching = caching
self.write_accelerator_enabled = write_accelerator_enabled
self.create_option = create_option
self.disk_size_gb = disk_size_gb
self.managed_disk = managed_disk
self.to_be_detached = to_be_detached
class DataDiskImage(msrest.serialization.Model):
"""Contains the data disk images information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar lun: Specifies the logical unit number of the data disk. This value is used to identify
data disks within the VM and therefore must be unique for each data disk attached to a VM.
:vartype lun: int
"""
_validation = {
'lun': {'readonly': True},
}
_attribute_map = {
'lun': {'key': 'lun', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(DataDiskImage, self).__init__(**kwargs)
self.lun = None
class DedicatedHost(Resource):
"""Specifies information about the Dedicated host.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: Required. SKU of the dedicated host for Hardware Generation and VM family. Only
name is required to be set. List Microsoft.Compute SKUs for a list of possible values.
:type sku: ~azure.mgmt.compute.v2019_03_01.models.Sku
:param platform_fault_domain: Fault domain of the dedicated host within a dedicated host group.
:type platform_fault_domain: int
:param auto_replace_on_failure: Specifies whether the dedicated host should be replaced
automatically in case of a failure. The value is defaulted to 'true' when not provided.
:type auto_replace_on_failure: bool
:ivar host_id: A unique id generated and assigned to the dedicated host by the platform.
:code:`<br>`:code:`<br>` Does not change throughout the lifetime of the host.
:vartype host_id: str
:ivar virtual_machines: A list of references to all virtual machines in the Dedicated Host.
:vartype virtual_machines: list[~azure.mgmt.compute.v2019_03_01.models.SubResourceReadOnly]
:param license_type: Specifies the software license type that will be applied to the VMs
deployed on the dedicated host. :code:`<br>`:code:`<br>` Possible values are:
:code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **Windows_Server_Hybrid**
:code:`<br>`:code:`<br>` **Windows_Server_Perpetual** :code:`<br>`:code:`<br>` Default:
**None**. Possible values include: "None", "Windows_Server_Hybrid", "Windows_Server_Perpetual".
:type license_type: str or ~azure.mgmt.compute.v2019_03_01.models.DedicatedHostLicenseTypes
:ivar provisioning_time: The date when the host was first provisioned.
:vartype provisioning_time: ~datetime.datetime
:ivar provisioning_state: The provisioning state, which only appears in the response.
:vartype provisioning_state: str
:ivar instance_view: The dedicated host instance view.
:vartype instance_view: ~azure.mgmt.compute.v2019_03_01.models.DedicatedHostInstanceView
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'sku': {'required': True},
'platform_fault_domain': {'maximum': 2, 'minimum': 0},
'host_id': {'readonly': True},
'virtual_machines': {'readonly': True},
'provisioning_time': {'readonly': True},
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'platform_fault_domain': {'key': 'properties.platformFaultDomain', 'type': 'int'},
'auto_replace_on_failure': {'key': 'properties.autoReplaceOnFailure', 'type': 'bool'},
'host_id': {'key': 'properties.hostId', 'type': 'str'},
'virtual_machines': {'key': 'properties.virtualMachines', 'type': '[SubResourceReadOnly]'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'provisioning_time': {'key': 'properties.provisioningTime', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'DedicatedHostInstanceView'},
}
def __init__(
self,
*,
location: str,
sku: "Sku",
tags: Optional[Dict[str, str]] = None,
platform_fault_domain: Optional[int] = None,
auto_replace_on_failure: Optional[bool] = None,
license_type: Optional[Union[str, "DedicatedHostLicenseTypes"]] = None,
**kwargs
):
super(DedicatedHost, self).__init__(location=location, tags=tags, **kwargs)
self.sku = sku
self.platform_fault_domain = platform_fault_domain
self.auto_replace_on_failure = auto_replace_on_failure
self.host_id = None
self.virtual_machines = None
self.license_type = license_type
self.provisioning_time = None
self.provisioning_state = None
self.instance_view = None
class DedicatedHostAllocatableVM(msrest.serialization.Model):
"""Represents the dedicated host unutilized capacity in terms of a specific VM size.
:param vm_size: VM size in terms of which the unutilized capacity is represented.
:type vm_size: str
:param count: Maximum number of VMs of size vmSize that can fit in the dedicated host's
remaining capacity.
:type count: float
"""
_attribute_map = {
'vm_size': {'key': 'vmSize', 'type': 'str'},
'count': {'key': 'count', 'type': 'float'},
}
def __init__(
self,
*,
vm_size: Optional[str] = None,
count: Optional[float] = None,
**kwargs
):
super(DedicatedHostAllocatableVM, self).__init__(**kwargs)
self.vm_size = vm_size
self.count = count
class DedicatedHostAvailableCapacity(msrest.serialization.Model):
"""Dedicated host unutilized capacity.
:param allocatable_v_ms: The unutilized capacity of the dedicated host represented in terms of
each VM size that is allowed to be deployed to the dedicated host.
:type allocatable_v_ms: list[~azure.mgmt.compute.v2019_03_01.models.DedicatedHostAllocatableVM]
"""
_attribute_map = {
'allocatable_v_ms': {'key': 'allocatableVMs', 'type': '[DedicatedHostAllocatableVM]'},
}
def __init__(
self,
*,
allocatable_v_ms: Optional[List["DedicatedHostAllocatableVM"]] = None,
**kwargs
):
super(DedicatedHostAvailableCapacity, self).__init__(**kwargs)
self.allocatable_v_ms = allocatable_v_ms
class DedicatedHostGroup(Resource):
"""Specifies information about the dedicated host group that the dedicated hosts should be assigned to. :code:`<br>`:code:`<br>` Currently, a dedicated host can only be added to a dedicated host group at creation time. An existing dedicated host cannot be added to another dedicated host group.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param zones: Availability Zone to use for this host group. Only single zone is supported. The
zone can be assigned only during creation. If not provided, the group supports all zones in the
region. If provided, enforces each host in the group to be in the same zone.
:type zones: list[str]
:param platform_fault_domain_count: Number of fault domains that the host group can span.
:type platform_fault_domain_count: int
:ivar hosts: A list of references to all dedicated hosts in the dedicated host group.
:vartype hosts: list[~azure.mgmt.compute.v2019_03_01.models.SubResourceReadOnly]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'platform_fault_domain_count': {'maximum': 3, 'minimum': 1},
'hosts': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'zones': {'key': 'zones', 'type': '[str]'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'hosts': {'key': 'properties.hosts', 'type': '[SubResourceReadOnly]'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
zones: Optional[List[str]] = None,
platform_fault_domain_count: Optional[int] = None,
**kwargs
):
super(DedicatedHostGroup, self).__init__(location=location, tags=tags, **kwargs)
self.zones = zones
self.platform_fault_domain_count = platform_fault_domain_count
self.hosts = None
class DedicatedHostGroupListResult(msrest.serialization.Model):
"""The List Dedicated Host Group with resource group response.
All required parameters must be populated in order to send to Azure.
:param value: Required. The list of dedicated host groups.
:type value: list[~azure.mgmt.compute.v2019_03_01.models.DedicatedHostGroup]
:param next_link: The URI to fetch the next page of Dedicated Host Groups. Call ListNext() with
this URI to fetch the next page of Dedicated Host Groups.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DedicatedHostGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["DedicatedHostGroup"],
next_link: Optional[str] = None,
**kwargs
):
super(DedicatedHostGroupListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DedicatedHostGroupUpdate(UpdateResource):
"""Specifies information about the dedicated host group that the dedicated host should be assigned to. Only tags may be updated.
Variables are only populated by the server, and will be ignored when sending a request.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param zones: Availability Zone to use for this host group. Only single zone is supported. The
zone can be assigned only during creation. If not provided, the group supports all zones in the
region. If provided, enforces each host in the group to be in the same zone.
:type zones: list[str]
:param platform_fault_domain_count: Number of fault domains that the host group can span.
:type platform_fault_domain_count: int
:ivar hosts: A list of references to all dedicated hosts in the dedicated host group.
:vartype hosts: list[~azure.mgmt.compute.v2019_03_01.models.SubResourceReadOnly]
"""
_validation = {
'platform_fault_domain_count': {'maximum': 3, 'minimum': 1},
'hosts': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'zones': {'key': 'zones', 'type': '[str]'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'hosts': {'key': 'properties.hosts', 'type': '[SubResourceReadOnly]'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
zones: Optional[List[str]] = None,
platform_fault_domain_count: Optional[int] = None,
**kwargs
):
super(DedicatedHostGroupUpdate, self).__init__(tags=tags, **kwargs)
self.zones = zones
self.platform_fault_domain_count = platform_fault_domain_count
self.hosts = None
class DedicatedHostInstanceView(msrest.serialization.Model):
"""The instance view of a dedicated host.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar asset_id: Specifies the unique id of the dedicated physical machine on which the
dedicated host resides.
:vartype asset_id: str
:param available_capacity: Unutilized capacity of the dedicated host.
:type available_capacity: ~azure.mgmt.compute.v2019_03_01.models.DedicatedHostAvailableCapacity
:param statuses: The resource status information.
:type statuses: list[~azure.mgmt.compute.v2019_03_01.models.InstanceViewStatus]
"""
_validation = {
'asset_id': {'readonly': True},
}
_attribute_map = {
'asset_id': {'key': 'assetId', 'type': 'str'},
'available_capacity': {'key': 'availableCapacity', 'type': 'DedicatedHostAvailableCapacity'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
available_capacity: Optional["DedicatedHostAvailableCapacity"] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
super(DedicatedHostInstanceView, self).__init__(**kwargs)
self.asset_id = None
self.available_capacity = available_capacity
self.statuses = statuses
class DedicatedHostListResult(msrest.serialization.Model):
"""The list dedicated host operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. The list of dedicated hosts.
:type value: list[~azure.mgmt.compute.v2019_03_01.models.DedicatedHost]
:param next_link: The URI to fetch the next page of dedicated hosts. Call ListNext() with this
URI to fetch the next page of dedicated hosts.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DedicatedHost]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["DedicatedHost"],
next_link: Optional[str] = None,
**kwargs
):
super(DedicatedHostListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DedicatedHostUpdate(UpdateResource):
"""Specifies information about the dedicated host. Only tags, autoReplaceOnFailure and licenseType may be updated.
Variables are only populated by the server, and will be ignored when sending a request.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param platform_fault_domain: Fault domain of the dedicated host within a dedicated host group.
:type platform_fault_domain: int
:param auto_replace_on_failure: Specifies whether the dedicated host should be replaced
automatically in case of a failure. The value is defaulted to 'true' when not provided.
:type auto_replace_on_failure: bool
:ivar host_id: A unique id generated and assigned to the dedicated host by the platform.
:code:`<br>`:code:`<br>` Does not change throughout the lifetime of the host.
:vartype host_id: str
:ivar virtual_machines: A list of references to all virtual machines in the Dedicated Host.
:vartype virtual_machines: list[~azure.mgmt.compute.v2019_03_01.models.SubResourceReadOnly]
:param license_type: Specifies the software license type that will be applied to the VMs
deployed on the dedicated host. :code:`<br>`:code:`<br>` Possible values are:
:code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **Windows_Server_Hybrid**
:code:`<br>`:code:`<br>` **Windows_Server_Perpetual** :code:`<br>`:code:`<br>` Default:
**None**. Possible values include: "None", "Windows_Server_Hybrid", "Windows_Server_Perpetual".
:type license_type: str or ~azure.mgmt.compute.v2019_03_01.models.DedicatedHostLicenseTypes
:ivar provisioning_time: The date when the host was first provisioned.
:vartype provisioning_time: ~datetime.datetime
:ivar provisioning_state: The provisioning state, which only appears in the response.
:vartype provisioning_state: str
:ivar instance_view: The dedicated host instance view.
:vartype instance_view: ~azure.mgmt.compute.v2019_03_01.models.DedicatedHostInstanceView
"""
_validation = {
'platform_fault_domain': {'maximum': 2, 'minimum': 0},
'host_id': {'readonly': True},
'virtual_machines': {'readonly': True},
'provisioning_time': {'readonly': True},
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'platform_fault_domain': {'key': 'properties.platformFaultDomain', 'type': 'int'},
'auto_replace_on_failure': {'key': 'properties.autoReplaceOnFailure', 'type': 'bool'},
'host_id': {'key': 'properties.hostId', 'type': 'str'},
'virtual_machines': {'key': 'properties.virtualMachines', 'type': '[SubResourceReadOnly]'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'provisioning_time': {'key': 'properties.provisioningTime', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'DedicatedHostInstanceView'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
platform_fault_domain: Optional[int] = None,
auto_replace_on_failure: Optional[bool] = None,
license_type: Optional[Union[str, "DedicatedHostLicenseTypes"]] = None,
**kwargs
):
super(DedicatedHostUpdate, self).__init__(tags=tags, **kwargs)
self.platform_fault_domain = platform_fault_domain
self.auto_replace_on_failure = auto_replace_on_failure
self.host_id = None
self.virtual_machines = None
self.license_type = license_type
self.provisioning_time = None
self.provisioning_state = None
self.instance_view = None
class DiagnosticsProfile(msrest.serialization.Model):
"""Specifies the boot diagnostic settings state. :code:`<br>`:code:`<br>`Minimum api-version: 2015-06-15.
:param boot_diagnostics: Boot Diagnostics is a debugging feature which allows you to view
Console Output and Screenshot to diagnose VM status. :code:`<br>`:code:`<br>` You can easily
view the output of your console log. :code:`<br>`:code:`<br>` Azure also enables you to see a
screenshot of the VM from the hypervisor.
:type boot_diagnostics: ~azure.mgmt.compute.v2019_03_01.models.BootDiagnostics
"""
_attribute_map = {
'boot_diagnostics': {'key': 'bootDiagnostics', 'type': 'BootDiagnostics'},
}
def __init__(
self,
*,
boot_diagnostics: Optional["BootDiagnostics"] = None,
**kwargs
):
super(DiagnosticsProfile, self).__init__(**kwargs)
self.boot_diagnostics = boot_diagnostics
class DiffDiskSettings(msrest.serialization.Model):
"""Describes the parameters of ephemeral disk settings that can be specified for operating system disk. :code:`<br>`:code:`<br>` NOTE: The ephemeral disk settings can only be specified for managed disk.
:param option: Specifies the ephemeral disk settings for operating system disk. Possible values
include: "Local".
:type option: str or ~azure.mgmt.compute.v2019_03_01.models.DiffDiskOptions
"""
_attribute_map = {
'option': {'key': 'option', 'type': 'str'},
}
def __init__(
self,
*,
option: Optional[Union[str, "DiffDiskOptions"]] = None,
**kwargs
):
super(DiffDiskSettings, self).__init__(**kwargs)
self.option = option
class Disallowed(msrest.serialization.Model):
"""Describes the disallowed disk types.
:param disk_types: A list of disk types.
:type disk_types: list[str]
"""
_attribute_map = {
'disk_types': {'key': 'diskTypes', 'type': '[str]'},
}
def __init__(
self,
*,
disk_types: Optional[List[str]] = None,
**kwargs
):
super(Disallowed, self).__init__(**kwargs)
self.disk_types = disk_types
class Disk(Resource):
"""Disk resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar managed_by: A relative URI containing the ID of the VM that has the disk attached.
:vartype managed_by: str
:param sku: The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or
UltraSSD_LRS.
:type sku: ~azure.mgmt.compute.v2019_03_01.models.DiskSku
:param zones: The Logical zone list for Disk.
:type zones: list[str]
:ivar time_created: The time when the disk was created.
:vartype time_created: ~datetime.datetime
:param os_type: The Operating System type. Possible values include: "Windows", "Linux".
:type os_type: str or ~azure.mgmt.compute.v2019_03_01.models.OperatingSystemTypes
:param hyper_v_generation: The hypervisor generation of the Virtual Machine. Applicable to OS
disks only. Possible values include: "V1", "V2".
:type hyper_v_generation: str or ~azure.mgmt.compute.v2019_03_01.models.HyperVGeneration
:param creation_data: Disk source information. CreationData information cannot be changed after
the disk has been created.
:type creation_data: ~azure.mgmt.compute.v2019_03_01.models.CreationData
:param disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it
indicates the size of the disk to create. If this field is present for updates or creation with
other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a
running VM, and can only increase the disk's size.
:type disk_size_gb: int
:ivar disk_size_bytes: The size of the disk in bytes. This field is read only.
:vartype disk_size_bytes: long
:ivar unique_id: Unique Guid identifying the resource.
:vartype unique_id: str
:param encryption_settings_collection: Encryption settings collection used for Azure Disk
Encryption, can contain multiple encryption settings per disk or snapshot.
:type encryption_settings_collection:
~azure.mgmt.compute.v2019_03_01.models.EncryptionSettingsCollection
:ivar provisioning_state: The disk provisioning state.
:vartype provisioning_state: str
:param disk_iops_read_write: The number of IOPS allowed for this disk; only settable for
UltraSSD disks. One operation can transfer between 4k and 256k bytes.
:type disk_iops_read_write: long
:param disk_m_bps_read_write: The bandwidth allowed for this disk; only settable for UltraSSD
disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of
10.
:type disk_m_bps_read_write: int
:ivar disk_state: The state of the disk. Possible values include: "Unattached", "Attached",
"Reserved", "ActiveSAS", "ReadyToUpload", "ActiveUpload".
:vartype disk_state: str or ~azure.mgmt.compute.v2019_03_01.models.DiskState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'managed_by': {'readonly': True},
'time_created': {'readonly': True},
'disk_size_bytes': {'readonly': True},
'unique_id': {'readonly': True},
'provisioning_state': {'readonly': True},
'disk_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'managed_by': {'key': 'managedBy', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'DiskSku'},
'zones': {'key': 'zones', 'type': '[str]'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'hyper_v_generation': {'key': 'properties.hyperVGeneration', 'type': 'str'},
'creation_data': {'key': 'properties.creationData', 'type': 'CreationData'},
'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'},
'disk_size_bytes': {'key': 'properties.diskSizeBytes', 'type': 'long'},
'unique_id': {'key': 'properties.uniqueId', 'type': 'str'},
'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'disk_iops_read_write': {'key': 'properties.diskIOPSReadWrite', 'type': 'long'},
'disk_m_bps_read_write': {'key': 'properties.diskMBpsReadWrite', 'type': 'int'},
'disk_state': {'key': 'properties.diskState', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
sku: Optional["DiskSku"] = None,
zones: Optional[List[str]] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
hyper_v_generation: Optional[Union[str, "HyperVGeneration"]] = None,
creation_data: Optional["CreationData"] = None,
disk_size_gb: Optional[int] = None,
encryption_settings_collection: Optional["EncryptionSettingsCollection"] = None,
disk_iops_read_write: Optional[int] = None,
disk_m_bps_read_write: Optional[int] = None,
**kwargs
):
super(Disk, self).__init__(location=location, tags=tags, **kwargs)
self.managed_by = None
self.sku = sku
self.zones = zones
self.time_created = None
self.os_type = os_type
self.hyper_v_generation = hyper_v_generation
self.creation_data = creation_data
self.disk_size_gb = disk_size_gb
self.disk_size_bytes = None
self.unique_id = None
self.encryption_settings_collection = encryption_settings_collection
self.provisioning_state = None
self.disk_iops_read_write = disk_iops_read_write
self.disk_m_bps_read_write = disk_m_bps_read_write
self.disk_state = None
class DiskEncryptionSettings(msrest.serialization.Model):
"""Describes a Encryption Settings for a Disk.
:param disk_encryption_key: Specifies the location of the disk encryption key, which is a Key
Vault Secret.
:type disk_encryption_key: ~azure.mgmt.compute.v2019_03_01.models.KeyVaultSecretReference
:param key_encryption_key: Specifies the location of the key encryption key in Key Vault.
:type key_encryption_key: ~azure.mgmt.compute.v2019_03_01.models.KeyVaultKeyReference
:param enabled: Specifies whether disk encryption should be enabled on the virtual machine.
:type enabled: bool
"""
_attribute_map = {
'disk_encryption_key': {'key': 'diskEncryptionKey', 'type': 'KeyVaultSecretReference'},
'key_encryption_key': {'key': 'keyEncryptionKey', 'type': 'KeyVaultKeyReference'},
'enabled': {'key': 'enabled', 'type': 'bool'},
}
def __init__(
self,
*,
disk_encryption_key: Optional["KeyVaultSecretReference"] = None,
key_encryption_key: Optional["KeyVaultKeyReference"] = None,
enabled: Optional[bool] = None,
**kwargs
):
super(DiskEncryptionSettings, self).__init__(**kwargs)
self.disk_encryption_key = disk_encryption_key
self.key_encryption_key = key_encryption_key
self.enabled = enabled
class DiskInstanceView(msrest.serialization.Model):
"""The instance view of the disk.
:param name: The disk name.
:type name: str
:param encryption_settings: Specifies the encryption settings for the OS Disk.
:code:`<br>`:code:`<br>` Minimum api-version: 2015-06-15.
:type encryption_settings: list[~azure.mgmt.compute.v2019_03_01.models.DiskEncryptionSettings]
:param statuses: The resource status information.
:type statuses: list[~azure.mgmt.compute.v2019_03_01.models.InstanceViewStatus]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'encryption_settings': {'key': 'encryptionSettings', 'type': '[DiskEncryptionSettings]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
name: Optional[str] = None,
encryption_settings: Optional[List["DiskEncryptionSettings"]] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
super(DiskInstanceView, self).__init__(**kwargs)
self.name = name
self.encryption_settings = encryption_settings
self.statuses = statuses
class DiskList(msrest.serialization.Model):
"""The List Disks operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of disks.
:type value: list[~azure.mgmt.compute.v2019_03_01.models.Disk]
:param next_link: The uri to fetch the next page of disks. Call ListNext() with this to fetch
the next page of disks.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Disk]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Disk"],
next_link: Optional[str] = None,
**kwargs
):
super(DiskList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DiskSku(msrest.serialization.Model):
"""The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or UltraSSD_LRS.
Variables are only populated by the server, and will be ignored when sending a request.
:param name: The sku name. Possible values include: "Standard_LRS", "Premium_LRS",
"StandardSSD_LRS", "UltraSSD_LRS".
:type name: str or ~azure.mgmt.compute.v2019_03_01.models.DiskStorageAccountTypes
:ivar tier: The sku tier.
:vartype tier: str
"""
_validation = {
'tier': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[Union[str, "DiskStorageAccountTypes"]] = None,
**kwargs
):
super(DiskSku, self).__init__(**kwargs)
self.name = name
self.tier = None
class DiskUpdate(msrest.serialization.Model):
"""Disk update resource.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or
UltraSSD_LRS.
:type sku: ~azure.mgmt.compute.v2019_03_01.models.DiskSku
:param os_type: the Operating System type. Possible values include: "Windows", "Linux".
:type os_type: str or ~azure.mgmt.compute.v2019_03_01.models.OperatingSystemTypes
:param disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it
indicates the size of the disk to create. If this field is present for updates or creation with
other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a
running VM, and can only increase the disk's size.
:type disk_size_gb: int
:param encryption_settings_collection: Encryption settings collection used be Azure Disk
Encryption, can contain multiple encryption settings per disk or snapshot.
:type encryption_settings_collection:
~azure.mgmt.compute.v2019_03_01.models.EncryptionSettingsCollection
:param disk_iops_read_write: The number of IOPS allowed for this disk; only settable for
UltraSSD disks. One operation can transfer between 4k and 256k bytes.
:type disk_iops_read_write: long
:param disk_m_bps_read_write: The bandwidth allowed for this disk; only settable for UltraSSD
disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of
10.
:type disk_m_bps_read_write: int
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'DiskSku'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'},
'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'},
'disk_iops_read_write': {'key': 'properties.diskIOPSReadWrite', 'type': 'long'},
'disk_m_bps_read_write': {'key': 'properties.diskMBpsReadWrite', 'type': 'int'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
sku: Optional["DiskSku"] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
disk_size_gb: Optional[int] = None,
encryption_settings_collection: Optional["EncryptionSettingsCollection"] = None,
disk_iops_read_write: Optional[int] = None,
disk_m_bps_read_write: Optional[int] = None,
**kwargs
):
super(DiskUpdate, self).__init__(**kwargs)
self.tags = tags
self.sku = sku
self.os_type = os_type
self.disk_size_gb = disk_size_gb
self.encryption_settings_collection = encryption_settings_collection
self.disk_iops_read_write = disk_iops_read_write
self.disk_m_bps_read_write = disk_m_bps_read_write
class EncryptionSettingsCollection(msrest.serialization.Model):
"""Encryption settings for disk or snapshot.
All required parameters must be populated in order to send to Azure.
:param enabled: Required. Set this flag to true and provide DiskEncryptionKey and optional
KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and
KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object,
the existing settings remain unchanged.
:type enabled: bool
:param encryption_settings: A collection of encryption settings, one for each disk volume.
:type encryption_settings:
list[~azure.mgmt.compute.v2019_03_01.models.EncryptionSettingsElement]
:param encryption_settings_version: Describes what type of encryption is used for the disks.
Once this field is set, it cannot be overwritten. '1.0' corresponds to Azure Disk Encryption
with AAD app.'1.1' corresponds to Azure Disk Encryption.
:type encryption_settings_version: str
"""
_validation = {
'enabled': {'required': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'encryption_settings': {'key': 'encryptionSettings', 'type': '[EncryptionSettingsElement]'},
'encryption_settings_version': {'key': 'encryptionSettingsVersion', 'type': 'str'},
}
def __init__(
self,
*,
enabled: bool,
encryption_settings: Optional[List["EncryptionSettingsElement"]] = None,
encryption_settings_version: Optional[str] = None,
**kwargs
):
super(EncryptionSettingsCollection, self).__init__(**kwargs)
self.enabled = enabled
self.encryption_settings = encryption_settings
self.encryption_settings_version = encryption_settings_version
class EncryptionSettingsElement(msrest.serialization.Model):
"""Encryption settings for one disk volume.
:param disk_encryption_key: Key Vault Secret Url and vault id of the disk encryption key.
:type disk_encryption_key: ~azure.mgmt.compute.v2019_03_01.models.KeyVaultAndSecretReference
:param key_encryption_key: Key Vault Key Url and vault id of the key encryption key.
KeyEncryptionKey is optional and when provided is used to unwrap the disk encryption key.
:type key_encryption_key: ~azure.mgmt.compute.v2019_03_01.models.KeyVaultAndKeyReference
"""
_attribute_map = {
'disk_encryption_key': {'key': 'diskEncryptionKey', 'type': 'KeyVaultAndSecretReference'},
'key_encryption_key': {'key': 'keyEncryptionKey', 'type': 'KeyVaultAndKeyReference'},
}
def __init__(
self,
*,
disk_encryption_key: Optional["KeyVaultAndSecretReference"] = None,
key_encryption_key: Optional["KeyVaultAndKeyReference"] = None,
**kwargs
):
super(EncryptionSettingsElement, self).__init__(**kwargs)
self.disk_encryption_key = disk_encryption_key
self.key_encryption_key = key_encryption_key
class Gallery(Resource):
"""Specifies information about the Shared Image Gallery that you want to create or update.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param description: The description of this Shared Image Gallery resource. This property is
updatable.
:type description: str
:param identifier: Describes the gallery unique name.
:type identifier: ~azure.mgmt.compute.v2019_03_01.models.GalleryIdentifier
:ivar provisioning_state: The provisioning state, which only appears in the response. Possible
values include: "Creating", "Updating", "Failed", "Succeeded", "Deleting", "Migrating".
:vartype provisioning_state: str or
~azure.mgmt.compute.v2019_03_01.models.GalleryPropertiesProvisioningState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'description': {'key': 'properties.description', 'type': 'str'},
'identifier': {'key': 'properties.identifier', 'type': 'GalleryIdentifier'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
identifier: Optional["GalleryIdentifier"] = None,
**kwargs
):
super(Gallery, self).__init__(location=location, tags=tags, **kwargs)
self.description = description
self.identifier = identifier
self.provisioning_state = None
class GalleryApplication(Resource):
"""Specifies information about the gallery Application Definition that you want to create or update.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param description: The description of this gallery Application Definition resource. This
property is updatable.
:type description: str
:param eula: The Eula agreement for the gallery Application Definition.
:type eula: str
:param privacy_statement_uri: The privacy statement uri.
:type privacy_statement_uri: str
:param release_note_uri: The release note uri.
:type release_note_uri: str
:param end_of_life_date: The end of life date of the gallery Application Definition. This
property can be used for decommissioning purposes. This property is updatable.
:type end_of_life_date: ~datetime.datetime
:param supported_os_type: This property allows you to specify the supported type of the OS that
application is built for. :code:`<br>`:code:`<br>` Possible values are:
:code:`<br>`:code:`<br>` **Windows** :code:`<br>`:code:`<br>` **Linux**. Possible values
include: "Windows", "Linux".
:type supported_os_type: str or ~azure.mgmt.compute.v2019_03_01.models.OperatingSystemTypes
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'description': {'key': 'properties.description', 'type': 'str'},
'eula': {'key': 'properties.eula', 'type': 'str'},
'privacy_statement_uri': {'key': 'properties.privacyStatementUri', 'type': 'str'},
'release_note_uri': {'key': 'properties.releaseNoteUri', 'type': 'str'},
'end_of_life_date': {'key': 'properties.endOfLifeDate', 'type': 'iso-8601'},
'supported_os_type': {'key': 'properties.supportedOSType', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
eula: Optional[str] = None,
privacy_statement_uri: Optional[str] = None,
release_note_uri: Optional[str] = None,
end_of_life_date: Optional[datetime.datetime] = None,
supported_os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
**kwargs
):
super(GalleryApplication, self).__init__(location=location, tags=tags, **kwargs)
self.description = description
self.eula = eula
self.privacy_statement_uri = privacy_statement_uri
self.release_note_uri = release_note_uri
self.end_of_life_date = end_of_life_date
self.supported_os_type = supported_os_type
class GalleryApplicationList(msrest.serialization.Model):
"""The List Gallery Applications operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of Gallery Applications.
:type value: list[~azure.mgmt.compute.v2019_03_01.models.GalleryApplication]
:param next_link: The uri to fetch the next page of Application Definitions in the Application
Gallery. Call ListNext() with this to fetch the next page of gallery Application Definitions.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[GalleryApplication]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["GalleryApplication"],
next_link: Optional[str] = None,
**kwargs
):
super(GalleryApplicationList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class GalleryApplicationVersion(Resource):
"""Specifies information about the gallery Application Version that you want to create or update.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param publishing_profile: The publishing profile of a gallery image version.
:type publishing_profile:
~azure.mgmt.compute.v2019_03_01.models.GalleryApplicationVersionPublishingProfile
:ivar provisioning_state: The provisioning state, which only appears in the response. Possible
values include: "Creating", "Updating", "Failed", "Succeeded", "Deleting", "Migrating".
:vartype provisioning_state: str or
~azure.mgmt.compute.v2019_03_01.models.GalleryApplicationVersionPropertiesProvisioningState
:ivar replication_status: This is the replication status of the gallery Image Version.
:vartype replication_status: ~azure.mgmt.compute.v2019_03_01.models.ReplicationStatus
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'replication_status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'publishing_profile': {'key': 'properties.publishingProfile', 'type': 'GalleryApplicationVersionPublishingProfile'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'replication_status': {'key': 'properties.replicationStatus', 'type': 'ReplicationStatus'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
publishing_profile: Optional["GalleryApplicationVersionPublishingProfile"] = None,
**kwargs
):
super(GalleryApplicationVersion, self).__init__(location=location, tags=tags, **kwargs)
self.publishing_profile = publishing_profile
self.provisioning_state = None
self.replication_status = None
class GalleryApplicationVersionList(msrest.serialization.Model):
"""The List Gallery Application version operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of gallery Application Versions.
:type value: list[~azure.mgmt.compute.v2019_03_01.models.GalleryApplicationVersion]
:param next_link: The uri to fetch the next page of gallery Application Versions. Call
ListNext() with this to fetch the next page of gallery Application Versions.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[GalleryApplicationVersion]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["GalleryApplicationVersion"],
next_link: Optional[str] = None,
**kwargs
):
super(GalleryApplicationVersionList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class GalleryArtifactPublishingProfileBase(msrest.serialization.Model):
"""Describes the basic gallery artifact publishing profile.
Variables are only populated by the server, and will be ignored when sending a request.
:param target_regions: The target regions where the Image Version is going to be replicated to.
This property is updatable.
:type target_regions: list[~azure.mgmt.compute.v2019_03_01.models.TargetRegion]
:param replica_count: The number of replicas of the Image Version to be created per region.
This property would take effect for a region when regionalReplicaCount is not specified. This
property is updatable.
:type replica_count: int
:param exclude_from_latest: If set to true, Virtual Machines deployed from the latest version
of the Image Definition won't use this Image Version.
:type exclude_from_latest: bool
:ivar published_date: The timestamp for when the gallery Image Version is published.
:vartype published_date: ~datetime.datetime
:param end_of_life_date: The end of life date of the gallery Image Version. This property can
be used for decommissioning purposes. This property is updatable.
:type end_of_life_date: ~datetime.datetime
:param storage_account_type: Specifies the storage account type to be used to store the image.
This property is not updatable. Possible values include: "Standard_LRS", "Standard_ZRS".
:type storage_account_type: str or ~azure.mgmt.compute.v2019_03_01.models.StorageAccountType
"""
_validation = {
'published_date': {'readonly': True},
}
_attribute_map = {
'target_regions': {'key': 'targetRegions', 'type': '[TargetRegion]'},
'replica_count': {'key': 'replicaCount', 'type': 'int'},
'exclude_from_latest': {'key': 'excludeFromLatest', 'type': 'bool'},
'published_date': {'key': 'publishedDate', 'type': 'iso-8601'},
'end_of_life_date': {'key': 'endOfLifeDate', 'type': 'iso-8601'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
}
def __init__(
self,
*,
target_regions: Optional[List["TargetRegion"]] = None,
replica_count: Optional[int] = None,
exclude_from_latest: Optional[bool] = None,
end_of_life_date: Optional[datetime.datetime] = None,
storage_account_type: Optional[Union[str, "StorageAccountType"]] = None,
**kwargs
):
super(GalleryArtifactPublishingProfileBase, self).__init__(**kwargs)
self.target_regions = target_regions
self.replica_count = replica_count
self.exclude_from_latest = exclude_from_latest
self.published_date = None
self.end_of_life_date = end_of_life_date
self.storage_account_type = storage_account_type
class GalleryApplicationVersionPublishingProfile(GalleryArtifactPublishingProfileBase):
"""The publishing profile of a gallery image version.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param target_regions: The target regions where the Image Version is going to be replicated to.
This property is updatable.
:type target_regions: list[~azure.mgmt.compute.v2019_03_01.models.TargetRegion]
:param replica_count: The number of replicas of the Image Version to be created per region.
This property would take effect for a region when regionalReplicaCount is not specified. This
property is updatable.
:type replica_count: int
:param exclude_from_latest: If set to true, Virtual Machines deployed from the latest version
of the Image Definition won't use this Image Version.
:type exclude_from_latest: bool
:ivar published_date: The timestamp for when the gallery Image Version is published.
:vartype published_date: ~datetime.datetime
:param end_of_life_date: The end of life date of the gallery Image Version. This property can
be used for decommissioning purposes. This property is updatable.
:type end_of_life_date: ~datetime.datetime
:param storage_account_type: Specifies the storage account type to be used to store the image.
This property is not updatable. Possible values include: "Standard_LRS", "Standard_ZRS".
:type storage_account_type: str or ~azure.mgmt.compute.v2019_03_01.models.StorageAccountType
:param source: Required. The source image from which the Image Version is going to be created.
:type source: ~azure.mgmt.compute.v2019_03_01.models.UserArtifactSource
:param manage_actions:
:type manage_actions: ~azure.mgmt.compute.v2019_03_01.models.UserArtifactManage
:param enable_health_check: Optional. Whether or not this application reports health.
:type enable_health_check: bool
"""
_validation = {
'published_date': {'readonly': True},
'source': {'required': True},
}
_attribute_map = {
'target_regions': {'key': 'targetRegions', 'type': '[TargetRegion]'},
'replica_count': {'key': 'replicaCount', 'type': 'int'},
'exclude_from_latest': {'key': 'excludeFromLatest', 'type': 'bool'},
'published_date': {'key': 'publishedDate', 'type': 'iso-8601'},
'end_of_life_date': {'key': 'endOfLifeDate', 'type': 'iso-8601'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
'source': {'key': 'source', 'type': 'UserArtifactSource'},
'manage_actions': {'key': 'manageActions', 'type': 'UserArtifactManage'},
'enable_health_check': {'key': 'enableHealthCheck', 'type': 'bool'},
}
def __init__(
self,
*,
source: "UserArtifactSource",
target_regions: Optional[List["TargetRegion"]] = None,
replica_count: Optional[int] = None,
exclude_from_latest: Optional[bool] = None,
end_of_life_date: Optional[datetime.datetime] = None,
storage_account_type: Optional[Union[str, "StorageAccountType"]] = None,
manage_actions: Optional["UserArtifactManage"] = None,
enable_health_check: Optional[bool] = None,
**kwargs
):
super(GalleryApplicationVersionPublishingProfile, self).__init__(target_regions=target_regions, replica_count=replica_count, exclude_from_latest=exclude_from_latest, end_of_life_date=end_of_life_date, storage_account_type=storage_account_type, **kwargs)
self.source = source
self.manage_actions = manage_actions
self.enable_health_check = enable_health_check
class GalleryArtifactSource(msrest.serialization.Model):
"""The source image from which the Image Version is going to be created.
All required parameters must be populated in order to send to Azure.
:param managed_image: Required. The managed artifact.
:type managed_image: ~azure.mgmt.compute.v2019_03_01.models.ManagedArtifact
"""
_validation = {
'managed_image': {'required': True},
}
_attribute_map = {
'managed_image': {'key': 'managedImage', 'type': 'ManagedArtifact'},
}
def __init__(
self,
*,
managed_image: "ManagedArtifact",
**kwargs
):
super(GalleryArtifactSource, self).__init__(**kwargs)
self.managed_image = managed_image
class GalleryDiskImage(msrest.serialization.Model):
"""This is the disk image base class.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar size_in_gb: This property indicates the size of the VHD to be created.
:vartype size_in_gb: int
:ivar host_caching: The host caching of the disk. Valid values are 'None', 'ReadOnly', and
'ReadWrite'. Possible values include: "None", "ReadOnly", "ReadWrite".
:vartype host_caching: str or ~azure.mgmt.compute.v2019_03_01.models.HostCaching
"""
_validation = {
'size_in_gb': {'readonly': True},
'host_caching': {'readonly': True},
}
_attribute_map = {
'size_in_gb': {'key': 'sizeInGB', 'type': 'int'},
'host_caching': {'key': 'hostCaching', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(GalleryDiskImage, self).__init__(**kwargs)
self.size_in_gb = None
self.host_caching = None
class GalleryDataDiskImage(GalleryDiskImage):
"""This is the data disk image.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar size_in_gb: This property indicates the size of the VHD to be created.
:vartype size_in_gb: int
:ivar host_caching: The host caching of the disk. Valid values are 'None', 'ReadOnly', and
'ReadWrite'. Possible values include: "None", "ReadOnly", "ReadWrite".
:vartype host_caching: str or ~azure.mgmt.compute.v2019_03_01.models.HostCaching
:ivar lun: This property specifies the logical unit number of the data disk. This value is used
to identify data disks within the Virtual Machine and therefore must be unique for each data
disk attached to the Virtual Machine.
:vartype lun: int
"""
_validation = {
'size_in_gb': {'readonly': True},
'host_caching': {'readonly': True},
'lun': {'readonly': True},
}
_attribute_map = {
'size_in_gb': {'key': 'sizeInGB', 'type': 'int'},
'host_caching': {'key': 'hostCaching', 'type': 'str'},
'lun': {'key': 'lun', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(GalleryDataDiskImage, self).__init__(**kwargs)
self.lun = None
class GalleryIdentifier(msrest.serialization.Model):
"""Describes the gallery unique name.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar unique_name: The unique name of the Shared Image Gallery. This name is generated
automatically by Azure.
:vartype unique_name: str
"""
_validation = {
'unique_name': {'readonly': True},
}
_attribute_map = {
'unique_name': {'key': 'uniqueName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(GalleryIdentifier, self).__init__(**kwargs)
self.unique_name = None
class GalleryImage(Resource):
"""Specifies information about the gallery Image Definition that you want to create or update.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param description: The description of this gallery Image Definition resource. This property is
updatable.
:type description: str
:param eula: The Eula agreement for the gallery Image Definition.
:type eula: str
:param privacy_statement_uri: The privacy statement uri.
:type privacy_statement_uri: str
:param release_note_uri: The release note uri.
:type release_note_uri: str
:param os_type: This property allows you to specify the type of the OS that is included in the
disk when creating a VM from a managed image. :code:`<br>`:code:`<br>` Possible values are:
:code:`<br>`:code:`<br>` **Windows** :code:`<br>`:code:`<br>` **Linux**. Possible values
include: "Windows", "Linux".
:type os_type: str or ~azure.mgmt.compute.v2019_03_01.models.OperatingSystemTypes
:param os_state: This property allows the user to specify whether the virtual machines created
under this image are 'Generalized' or 'Specialized'. Possible values include: "Generalized",
"Specialized".
:type os_state: str or ~azure.mgmt.compute.v2019_03_01.models.OperatingSystemStateTypes
:param end_of_life_date: The end of life date of the gallery Image Definition. This property
can be used for decommissioning purposes. This property is updatable.
:type end_of_life_date: ~datetime.datetime
:param identifier: This is the gallery Image Definition identifier.
:type identifier: ~azure.mgmt.compute.v2019_03_01.models.GalleryImageIdentifier
:param recommended: The properties describe the recommended machine configuration for this
Image Definition. These properties are updatable.
:type recommended: ~azure.mgmt.compute.v2019_03_01.models.RecommendedMachineConfiguration
:param disallowed: Describes the disallowed disk types.
:type disallowed: ~azure.mgmt.compute.v2019_03_01.models.Disallowed
:param purchase_plan: Describes the gallery Image Definition purchase plan. This is used by
marketplace images.
:type purchase_plan: ~azure.mgmt.compute.v2019_03_01.models.ImagePurchasePlan
:ivar provisioning_state: The provisioning state, which only appears in the response. Possible
values include: "Creating", "Updating", "Failed", "Succeeded", "Deleting", "Migrating".
:vartype provisioning_state: str or
~azure.mgmt.compute.v2019_03_01.models.GalleryImagePropertiesProvisioningState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'description': {'key': 'properties.description', 'type': 'str'},
'eula': {'key': 'properties.eula', 'type': 'str'},
'privacy_statement_uri': {'key': 'properties.privacyStatementUri', 'type': 'str'},
'release_note_uri': {'key': 'properties.releaseNoteUri', 'type': 'str'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'os_state': {'key': 'properties.osState', 'type': 'str'},
'end_of_life_date': {'key': 'properties.endOfLifeDate', 'type': 'iso-8601'},
'identifier': {'key': 'properties.identifier', 'type': 'GalleryImageIdentifier'},
'recommended': {'key': 'properties.recommended', 'type': 'RecommendedMachineConfiguration'},
'disallowed': {'key': 'properties.disallowed', 'type': 'Disallowed'},
'purchase_plan': {'key': 'properties.purchasePlan', 'type': 'ImagePurchasePlan'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
eula: Optional[str] = None,
privacy_statement_uri: Optional[str] = None,
release_note_uri: Optional[str] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
os_state: Optional[Union[str, "OperatingSystemStateTypes"]] = None,
end_of_life_date: Optional[datetime.datetime] = None,
identifier: Optional["GalleryImageIdentifier"] = None,
recommended: Optional["RecommendedMachineConfiguration"] = None,
disallowed: Optional["Disallowed"] = None,
purchase_plan: Optional["ImagePurchasePlan"] = None,
**kwargs
):
super(GalleryImage, self).__init__(location=location, tags=tags, **kwargs)
self.description = description
self.eula = eula
self.privacy_statement_uri = privacy_statement_uri
self.release_note_uri = release_note_uri
self.os_type = os_type
self.os_state = os_state
self.end_of_life_date = end_of_life_date
self.identifier = identifier
self.recommended = recommended
self.disallowed = disallowed
self.purchase_plan = purchase_plan
self.provisioning_state = None
class GalleryImageIdentifier(msrest.serialization.Model):
"""This is the gallery Image Definition identifier.
All required parameters must be populated in order to send to Azure.
:param publisher: Required. The name of the gallery Image Definition publisher.
:type publisher: str
:param offer: Required. The name of the gallery Image Definition offer.
:type offer: str
:param sku: Required. The name of the gallery Image Definition SKU.
:type sku: str
"""
_validation = {
'publisher': {'required': True},
'offer': {'required': True},
'sku': {'required': True},
}
_attribute_map = {
'publisher': {'key': 'publisher', 'type': 'str'},
'offer': {'key': 'offer', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'str'},
}
def __init__(
self,
*,
publisher: str,
offer: str,
sku: str,
**kwargs
):
super(GalleryImageIdentifier, self).__init__(**kwargs)
self.publisher = publisher
self.offer = offer
self.sku = sku
class GalleryImageList(msrest.serialization.Model):
"""The List Gallery Images operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of Shared Image Gallery images.
:type value: list[~azure.mgmt.compute.v2019_03_01.models.GalleryImage]
:param next_link: The uri to fetch the next page of Image Definitions in the Shared Image
Gallery. Call ListNext() with this to fetch the next page of gallery Image Definitions.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[GalleryImage]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["GalleryImage"],
next_link: Optional[str] = None,
**kwargs
):
super(GalleryImageList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class GalleryImageVersion(Resource):
"""Specifies information about the gallery Image Version that you want to create or update.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param publishing_profile: The publishing profile of a gallery Image Version.
:type publishing_profile:
~azure.mgmt.compute.v2019_03_01.models.GalleryImageVersionPublishingProfile
:ivar provisioning_state: The provisioning state, which only appears in the response. Possible
values include: "Creating", "Updating", "Failed", "Succeeded", "Deleting", "Migrating".
:vartype provisioning_state: str or
~azure.mgmt.compute.v2019_03_01.models.GalleryImageVersionPropertiesProvisioningState
:ivar storage_profile: This is the storage profile of a Gallery Image Version.
:vartype storage_profile:
~azure.mgmt.compute.v2019_03_01.models.GalleryImageVersionStorageProfile
:ivar replication_status: This is the replication status of the gallery Image Version.
:vartype replication_status: ~azure.mgmt.compute.v2019_03_01.models.ReplicationStatus
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'storage_profile': {'readonly': True},
'replication_status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'publishing_profile': {'key': 'properties.publishingProfile', 'type': 'GalleryImageVersionPublishingProfile'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'GalleryImageVersionStorageProfile'},
'replication_status': {'key': 'properties.replicationStatus', 'type': 'ReplicationStatus'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
publishing_profile: Optional["GalleryImageVersionPublishingProfile"] = None,
**kwargs
):
super(GalleryImageVersion, self).__init__(location=location, tags=tags, **kwargs)
self.publishing_profile = publishing_profile
self.provisioning_state = None
self.storage_profile = None
self.replication_status = None
class GalleryImageVersionList(msrest.serialization.Model):
"""The List Gallery Image version operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of gallery Image Versions.
:type value: list[~azure.mgmt.compute.v2019_03_01.models.GalleryImageVersion]
:param next_link: The uri to fetch the next page of gallery Image Versions. Call ListNext()
with this to fetch the next page of gallery Image Versions.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[GalleryImageVersion]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["GalleryImageVersion"],
next_link: Optional[str] = None,
**kwargs
):
super(GalleryImageVersionList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class GalleryImageVersionPublishingProfile(GalleryArtifactPublishingProfileBase):
"""The publishing profile of a gallery Image Version.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param target_regions: The target regions where the Image Version is going to be replicated to.
This property is updatable.
:type target_regions: list[~azure.mgmt.compute.v2019_03_01.models.TargetRegion]
:param replica_count: The number of replicas of the Image Version to be created per region.
This property would take effect for a region when regionalReplicaCount is not specified. This
property is updatable.
:type replica_count: int
:param exclude_from_latest: If set to true, Virtual Machines deployed from the latest version
of the Image Definition won't use this Image Version.
:type exclude_from_latest: bool
:ivar published_date: The timestamp for when the gallery Image Version is published.
:vartype published_date: ~datetime.datetime
:param end_of_life_date: The end of life date of the gallery Image Version. This property can
be used for decommissioning purposes. This property is updatable.
:type end_of_life_date: ~datetime.datetime
:param storage_account_type: Specifies the storage account type to be used to store the image.
This property is not updatable. Possible values include: "Standard_LRS", "Standard_ZRS".
:type storage_account_type: str or ~azure.mgmt.compute.v2019_03_01.models.StorageAccountType
:param source: Required. The source image from which the Image Version is going to be created.
:type source: ~azure.mgmt.compute.v2019_03_01.models.GalleryArtifactSource
"""
_validation = {
'published_date': {'readonly': True},
'source': {'required': True},
}
_attribute_map = {
'target_regions': {'key': 'targetRegions', 'type': '[TargetRegion]'},
'replica_count': {'key': 'replicaCount', 'type': 'int'},
'exclude_from_latest': {'key': 'excludeFromLatest', 'type': 'bool'},
'published_date': {'key': 'publishedDate', 'type': 'iso-8601'},
'end_of_life_date': {'key': 'endOfLifeDate', 'type': 'iso-8601'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
'source': {'key': 'source', 'type': 'GalleryArtifactSource'},
}
def __init__(
self,
*,
source: "GalleryArtifactSource",
target_regions: Optional[List["TargetRegion"]] = None,
replica_count: Optional[int] = None,
exclude_from_latest: Optional[bool] = None,
end_of_life_date: Optional[datetime.datetime] = None,
storage_account_type: Optional[Union[str, "StorageAccountType"]] = None,
**kwargs
):
super(GalleryImageVersionPublishingProfile, self).__init__(target_regions=target_regions, replica_count=replica_count, exclude_from_latest=exclude_from_latest, end_of_life_date=end_of_life_date, storage_account_type=storage_account_type, **kwargs)
self.source = source
class GalleryImageVersionStorageProfile(msrest.serialization.Model):
"""This is the storage profile of a Gallery Image Version.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar os_disk_image: This is the disk image base class.
:vartype os_disk_image: ~azure.mgmt.compute.v2019_03_01.models.GalleryDiskImage
:ivar data_disk_images: A list of data disk images.
:vartype data_disk_images: list[~azure.mgmt.compute.v2019_03_01.models.GalleryDataDiskImage]
"""
_validation = {
'os_disk_image': {'readonly': True},
'data_disk_images': {'readonly': True},
}
_attribute_map = {
'os_disk_image': {'key': 'osDiskImage', 'type': 'GalleryDiskImage'},
'data_disk_images': {'key': 'dataDiskImages', 'type': '[GalleryDataDiskImage]'},
}
def __init__(
self,
**kwargs
):
super(GalleryImageVersionStorageProfile, self).__init__(**kwargs)
self.os_disk_image = None
self.data_disk_images = None
class GalleryList(msrest.serialization.Model):
"""The List Galleries operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of galleries.
:type value: list[~azure.mgmt.compute.v2019_03_01.models.Gallery]
:param next_link: The uri to fetch the next page of galleries. Call ListNext() with this to
fetch the next page of galleries.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Gallery]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Gallery"],
next_link: Optional[str] = None,
**kwargs
):
super(GalleryList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class GalleryOSDiskImage(GalleryDiskImage):
"""This is the OS disk image.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar size_in_gb: This property indicates the size of the VHD to be created.
:vartype size_in_gb: int
:ivar host_caching: The host caching of the disk. Valid values are 'None', 'ReadOnly', and
'ReadWrite'. Possible values include: "None", "ReadOnly", "ReadWrite".
:vartype host_caching: str or ~azure.mgmt.compute.v2019_03_01.models.HostCaching
"""
_validation = {
'size_in_gb': {'readonly': True},
'host_caching': {'readonly': True},
}
_attribute_map = {
'size_in_gb': {'key': 'sizeInGB', 'type': 'int'},
'host_caching': {'key': 'hostCaching', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(GalleryOSDiskImage, self).__init__(**kwargs)
class GrantAccessData(msrest.serialization.Model):
"""Data used for requesting a SAS.
All required parameters must be populated in order to send to Azure.
:param access: Required. Possible values include: "None", "Read", "Write".
:type access: str or ~azure.mgmt.compute.v2019_03_01.models.AccessLevel
:param duration_in_seconds: Required. Time duration in seconds until the SAS access expires.
:type duration_in_seconds: int
"""
_validation = {
'access': {'required': True},
'duration_in_seconds': {'required': True},
}
_attribute_map = {
'access': {'key': 'access', 'type': 'str'},
'duration_in_seconds': {'key': 'durationInSeconds', 'type': 'int'},
}
def __init__(
self,
*,
access: Union[str, "AccessLevel"],
duration_in_seconds: int,
**kwargs
):
super(GrantAccessData, self).__init__(**kwargs)
self.access = access
self.duration_in_seconds = duration_in_seconds
class HardwareProfile(msrest.serialization.Model):
"""Specifies the hardware settings for the virtual machine.
:param vm_size: Specifies the size of the virtual machine. For more information about virtual
machine sizes, see `Sizes for virtual machines <https://docs.microsoft.com/azure/virtual-
machines/virtual-machines-windows-sizes?toc=%2fazure%2fvirtual-
machines%2fwindows%2ftoc.json>`_. :code:`<br>`:code:`<br>` The available VM sizes depend on
region and availability set. For a list of available sizes use these APIs:
:code:`<br>`:code:`<br>` `List all available virtual machine sizes in an availability set
<https://docs.microsoft.com/rest/api/compute/availabilitysets/listavailablesizes>`_
:code:`<br>`:code:`<br>` `List all available virtual machine sizes in a region
<https://docs.microsoft.com/rest/api/compute/virtualmachinesizes/list>`_
:code:`<br>`:code:`<br>` `List all available virtual machine sizes for resizing
<https://docs.microsoft.com/rest/api/compute/virtualmachines/listavailablesizes>`_. Possible
values include: "Basic_A0", "Basic_A1", "Basic_A2", "Basic_A3", "Basic_A4", "Standard_A0",
"Standard_A1", "Standard_A2", "Standard_A3", "Standard_A4", "Standard_A5", "Standard_A6",
"Standard_A7", "Standard_A8", "Standard_A9", "Standard_A10", "Standard_A11", "Standard_A1_v2",
"Standard_A2_v2", "Standard_A4_v2", "Standard_A8_v2", "Standard_A2m_v2", "Standard_A4m_v2",
"Standard_A8m_v2", "Standard_B1s", "Standard_B1ms", "Standard_B2s", "Standard_B2ms",
"Standard_B4ms", "Standard_B8ms", "Standard_D1", "Standard_D2", "Standard_D3", "Standard_D4",
"Standard_D11", "Standard_D12", "Standard_D13", "Standard_D14", "Standard_D1_v2",
"Standard_D2_v2", "Standard_D3_v2", "Standard_D4_v2", "Standard_D5_v2", "Standard_D2_v3",
"Standard_D4_v3", "Standard_D8_v3", "Standard_D16_v3", "Standard_D32_v3", "Standard_D64_v3",
"Standard_D2s_v3", "Standard_D4s_v3", "Standard_D8s_v3", "Standard_D16s_v3",
"Standard_D32s_v3", "Standard_D64s_v3", "Standard_D11_v2", "Standard_D12_v2",
"Standard_D13_v2", "Standard_D14_v2", "Standard_D15_v2", "Standard_DS1", "Standard_DS2",
"Standard_DS3", "Standard_DS4", "Standard_DS11", "Standard_DS12", "Standard_DS13",
"Standard_DS14", "Standard_DS1_v2", "Standard_DS2_v2", "Standard_DS3_v2", "Standard_DS4_v2",
"Standard_DS5_v2", "Standard_DS11_v2", "Standard_DS12_v2", "Standard_DS13_v2",
"Standard_DS14_v2", "Standard_DS15_v2", "Standard_DS13-4_v2", "Standard_DS13-2_v2",
"Standard_DS14-8_v2", "Standard_DS14-4_v2", "Standard_E2_v3", "Standard_E4_v3",
"Standard_E8_v3", "Standard_E16_v3", "Standard_E32_v3", "Standard_E64_v3", "Standard_E2s_v3",
"Standard_E4s_v3", "Standard_E8s_v3", "Standard_E16s_v3", "Standard_E32s_v3",
"Standard_E64s_v3", "Standard_E32-16_v3", "Standard_E32-8s_v3", "Standard_E64-32s_v3",
"Standard_E64-16s_v3", "Standard_F1", "Standard_F2", "Standard_F4", "Standard_F8",
"Standard_F16", "Standard_F1s", "Standard_F2s", "Standard_F4s", "Standard_F8s",
"Standard_F16s", "Standard_F2s_v2", "Standard_F4s_v2", "Standard_F8s_v2", "Standard_F16s_v2",
"Standard_F32s_v2", "Standard_F64s_v2", "Standard_F72s_v2", "Standard_G1", "Standard_G2",
"Standard_G3", "Standard_G4", "Standard_G5", "Standard_GS1", "Standard_GS2", "Standard_GS3",
"Standard_GS4", "Standard_GS5", "Standard_GS4-8", "Standard_GS4-4", "Standard_GS5-16",
"Standard_GS5-8", "Standard_H8", "Standard_H16", "Standard_H8m", "Standard_H16m",
"Standard_H16r", "Standard_H16mr", "Standard_L4s", "Standard_L8s", "Standard_L16s",
"Standard_L32s", "Standard_M64s", "Standard_M64ms", "Standard_M128s", "Standard_M128ms",
"Standard_M64-32ms", "Standard_M64-16ms", "Standard_M128-64ms", "Standard_M128-32ms",
"Standard_NC6", "Standard_NC12", "Standard_NC24", "Standard_NC24r", "Standard_NC6s_v2",
"Standard_NC12s_v2", "Standard_NC24s_v2", "Standard_NC24rs_v2", "Standard_NC6s_v3",
"Standard_NC12s_v3", "Standard_NC24s_v3", "Standard_NC24rs_v3", "Standard_ND6s",
"Standard_ND12s", "Standard_ND24s", "Standard_ND24rs", "Standard_NV6", "Standard_NV12",
"Standard_NV24".
:type vm_size: str or ~azure.mgmt.compute.v2019_03_01.models.VirtualMachineSizeTypes
"""
_attribute_map = {
'vm_size': {'key': 'vmSize', 'type': 'str'},
}
def __init__(
self,
*,
vm_size: Optional[Union[str, "VirtualMachineSizeTypes"]] = None,
**kwargs
):
super(HardwareProfile, self).__init__(**kwargs)
self.vm_size = vm_size
class Image(Resource):
"""The source user image virtual hard disk. The virtual hard disk will be copied before being attached to the virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param source_virtual_machine: The source virtual machine from which Image is created.
:type source_virtual_machine: ~azure.mgmt.compute.v2019_03_01.models.SubResource
:param storage_profile: Specifies the storage settings for the virtual machine disks.
:type storage_profile: ~azure.mgmt.compute.v2019_03_01.models.ImageStorageProfile
:ivar provisioning_state: The provisioning state.
:vartype provisioning_state: str
:param hyper_v_generation: Gets the HyperVGenerationType of the VirtualMachine created from the
image. Possible values include: "V1", "V2".
:type hyper_v_generation: str or ~azure.mgmt.compute.v2019_03_01.models.HyperVGenerationTypes
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'source_virtual_machine': {'key': 'properties.sourceVirtualMachine', 'type': 'SubResource'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'ImageStorageProfile'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'hyper_v_generation': {'key': 'properties.hyperVGeneration', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
source_virtual_machine: Optional["SubResource"] = None,
storage_profile: Optional["ImageStorageProfile"] = None,
hyper_v_generation: Optional[Union[str, "HyperVGenerationTypes"]] = None,
**kwargs
):
super(Image, self).__init__(location=location, tags=tags, **kwargs)
self.source_virtual_machine = source_virtual_machine
self.storage_profile = storage_profile
self.provisioning_state = None
self.hyper_v_generation = hyper_v_generation
class ImageDataDisk(msrest.serialization.Model):
"""Describes a data disk.
All required parameters must be populated in order to send to Azure.
:param lun: Required. Specifies the logical unit number of the data disk. This value is used to
identify data disks within the VM and therefore must be unique for each data disk attached to a
VM.
:type lun: int
:param snapshot: The snapshot.
:type snapshot: ~azure.mgmt.compute.v2019_03_01.models.SubResource
:param managed_disk: The managedDisk.
:type managed_disk: ~azure.mgmt.compute.v2019_03_01.models.SubResource
:param blob_uri: The Virtual Hard Disk.
:type blob_uri: str
:param caching: Specifies the caching requirements. :code:`<br>`:code:`<br>` Possible values
are: :code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **ReadOnly**
:code:`<br>`:code:`<br>` **ReadWrite** :code:`<br>`:code:`<br>` Default: **None for Standard
storage. ReadOnly for Premium storage**. Possible values include: "None", "ReadOnly",
"ReadWrite".
:type caching: str or ~azure.mgmt.compute.v2019_03_01.models.CachingTypes
:param disk_size_gb: Specifies the size of empty data disks in gigabytes. This element can be
used to overwrite the name of the disk in a virtual machine image. :code:`<br>`:code:`<br>`
This value cannot be larger than 1023 GB.
:type disk_size_gb: int
:param storage_account_type: Specifies the storage account type for the managed disk. NOTE:
UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values
include: "Standard_LRS", "Premium_LRS", "StandardSSD_LRS", "UltraSSD_LRS".
:type storage_account_type: str or ~azure.mgmt.compute.v2019_03_01.models.StorageAccountTypes
"""
_validation = {
'lun': {'required': True},
}
_attribute_map = {
'lun': {'key': 'lun', 'type': 'int'},
'snapshot': {'key': 'snapshot', 'type': 'SubResource'},
'managed_disk': {'key': 'managedDisk', 'type': 'SubResource'},
'blob_uri': {'key': 'blobUri', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
}
def __init__(
self,
*,
lun: int,
snapshot: Optional["SubResource"] = None,
managed_disk: Optional["SubResource"] = None,
blob_uri: Optional[str] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
disk_size_gb: Optional[int] = None,
storage_account_type: Optional[Union[str, "StorageAccountTypes"]] = None,
**kwargs
):
super(ImageDataDisk, self).__init__(**kwargs)
self.lun = lun
self.snapshot = snapshot
self.managed_disk = managed_disk
self.blob_uri = blob_uri
self.caching = caching
self.disk_size_gb = disk_size_gb
self.storage_account_type = storage_account_type
class ImageDiskReference(msrest.serialization.Model):
"""The source image used for creating the disk.
All required parameters must be populated in order to send to Azure.
:param id: Required. A relative uri containing either a Platform Image Repository or user image
reference.
:type id: str
:param lun: If the disk is created from an image's data disk, this is an index that indicates
which of the data disks in the image to use. For OS disks, this field is null.
:type lun: int
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'lun': {'key': 'lun', 'type': 'int'},
}
def __init__(
self,
*,
id: str,
lun: Optional[int] = None,
**kwargs
):
super(ImageDiskReference, self).__init__(**kwargs)
self.id = id
self.lun = lun
class ImageListResult(msrest.serialization.Model):
"""The List Image operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. The list of Images.
:type value: list[~azure.mgmt.compute.v2019_03_01.models.Image]
:param next_link: The uri to fetch the next page of Images. Call ListNext() with this to fetch
the next page of Images.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Image]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Image"],
next_link: Optional[str] = None,
**kwargs
):
super(ImageListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ImageOSDisk(msrest.serialization.Model):
"""Describes an Operating System disk.
All required parameters must be populated in order to send to Azure.
:param os_type: Required. This property allows you to specify the type of the OS that is
included in the disk if creating a VM from a custom image. :code:`<br>`:code:`<br>` Possible
values are: :code:`<br>`:code:`<br>` **Windows** :code:`<br>`:code:`<br>` **Linux**. Possible
values include: "Windows", "Linux".
:type os_type: str or ~azure.mgmt.compute.v2019_03_01.models.OperatingSystemTypes
:param os_state: Required. The OS State. Possible values include: "Generalized", "Specialized".
:type os_state: str or ~azure.mgmt.compute.v2019_03_01.models.OperatingSystemStateTypes
:param snapshot: The snapshot.
:type snapshot: ~azure.mgmt.compute.v2019_03_01.models.SubResource
:param managed_disk: The managedDisk.
:type managed_disk: ~azure.mgmt.compute.v2019_03_01.models.SubResource
:param blob_uri: The Virtual Hard Disk.
:type blob_uri: str
:param caching: Specifies the caching requirements. :code:`<br>`:code:`<br>` Possible values
are: :code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **ReadOnly**
:code:`<br>`:code:`<br>` **ReadWrite** :code:`<br>`:code:`<br>` Default: **None for Standard
storage. ReadOnly for Premium storage**. Possible values include: "None", "ReadOnly",
"ReadWrite".
:type caching: str or ~azure.mgmt.compute.v2019_03_01.models.CachingTypes
:param disk_size_gb: Specifies the size of empty data disks in gigabytes. This element can be
used to overwrite the name of the disk in a virtual machine image. :code:`<br>`:code:`<br>`
This value cannot be larger than 1023 GB.
:type disk_size_gb: int
:param storage_account_type: Specifies the storage account type for the managed disk.
UltraSSD_LRS cannot be used with OS Disk. Possible values include: "Standard_LRS",
"Premium_LRS", "StandardSSD_LRS", "UltraSSD_LRS".
:type storage_account_type: str or ~azure.mgmt.compute.v2019_03_01.models.StorageAccountTypes
"""
_validation = {
'os_type': {'required': True},
'os_state': {'required': True},
}
_attribute_map = {
'os_type': {'key': 'osType', 'type': 'str'},
'os_state': {'key': 'osState', 'type': 'str'},
'snapshot': {'key': 'snapshot', 'type': 'SubResource'},
'managed_disk': {'key': 'managedDisk', 'type': 'SubResource'},
'blob_uri': {'key': 'blobUri', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
}
def __init__(
self,
*,
os_type: Union[str, "OperatingSystemTypes"],
os_state: Union[str, "OperatingSystemStateTypes"],
snapshot: Optional["SubResource"] = None,
managed_disk: Optional["SubResource"] = None,
blob_uri: Optional[str] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
disk_size_gb: Optional[int] = None,
storage_account_type: Optional[Union[str, "StorageAccountTypes"]] = None,
**kwargs
):
super(ImageOSDisk, self).__init__(**kwargs)
self.os_type = os_type
self.os_state = os_state
self.snapshot = snapshot
self.managed_disk = managed_disk
self.blob_uri = blob_uri
self.caching = caching
self.disk_size_gb = disk_size_gb
self.storage_account_type = storage_account_type
class ImagePurchasePlan(msrest.serialization.Model):
"""Describes the gallery Image Definition purchase plan. This is used by marketplace images.
:param name: The plan ID.
:type name: str
:param publisher: The publisher ID.
:type publisher: str
:param product: The product ID.
:type product: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'publisher': {'key': 'publisher', 'type': 'str'},
'product': {'key': 'product', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
publisher: Optional[str] = None,
product: Optional[str] = None,
**kwargs
):
super(ImagePurchasePlan, self).__init__(**kwargs)
self.name = name
self.publisher = publisher
self.product = product
class SubResource(msrest.serialization.Model):
"""SubResource.
:param id: Resource Id.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(SubResource, self).__init__(**kwargs)
self.id = id
class ImageReference(SubResource):
"""Specifies information about the image to use. You can specify information about platform images, marketplace images, or virtual machine images. This element is required when you want to use a platform image, marketplace image, or virtual machine image, but is not used in other creation operations. NOTE: Image reference publisher and offer can only be set when you create the scale set.
:param id: Resource Id.
:type id: str
:param publisher: The image publisher.
:type publisher: str
:param offer: Specifies the offer of the platform image or marketplace image used to create the
virtual machine.
:type offer: str
:param sku: The image SKU.
:type sku: str
:param version: Specifies the version of the platform image or marketplace image used to create
the virtual machine. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and
Build are decimal numbers. Specify 'latest' to use the latest version of an image available at
deploy time. Even if you use 'latest', the VM image will not automatically update after deploy
time even if a new version becomes available.
:type version: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'publisher': {'key': 'publisher', 'type': 'str'},
'offer': {'key': 'offer', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
publisher: Optional[str] = None,
offer: Optional[str] = None,
sku: Optional[str] = None,
version: Optional[str] = None,
**kwargs
):
super(ImageReference, self).__init__(id=id, **kwargs)
self.publisher = publisher
self.offer = offer
self.sku = sku
self.version = version
class ImageStorageProfile(msrest.serialization.Model):
"""Describes a storage profile.
:param os_disk: Specifies information about the operating system disk used by the virtual
machine. :code:`<br>`:code:`<br>` For more information about disks, see `About disks and VHDs
for Azure virtual machines <https://docs.microsoft.com/azure/virtual-machines/virtual-machines-
windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json>`_.
:type os_disk: ~azure.mgmt.compute.v2019_03_01.models.ImageOSDisk
:param data_disks: Specifies the parameters that are used to add a data disk to a virtual
machine. :code:`<br>`:code:`<br>` For more information about disks, see `About disks and VHDs
for Azure virtual machines <https://docs.microsoft.com/azure/virtual-machines/virtual-machines-
windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json>`_.
:type data_disks: list[~azure.mgmt.compute.v2019_03_01.models.ImageDataDisk]
:param zone_resilient: Specifies whether an image is zone resilient or not. Default is false.
Zone resilient images can be created only in regions that provide Zone Redundant Storage (ZRS).
:type zone_resilient: bool
"""
_attribute_map = {
'os_disk': {'key': 'osDisk', 'type': 'ImageOSDisk'},
'data_disks': {'key': 'dataDisks', 'type': '[ImageDataDisk]'},
'zone_resilient': {'key': 'zoneResilient', 'type': 'bool'},
}
def __init__(
self,
*,
os_disk: Optional["ImageOSDisk"] = None,
data_disks: Optional[List["ImageDataDisk"]] = None,
zone_resilient: Optional[bool] = None,
**kwargs
):
super(ImageStorageProfile, self).__init__(**kwargs)
self.os_disk = os_disk
self.data_disks = data_disks
self.zone_resilient = zone_resilient
class ImageUpdate(UpdateResource):
"""The source user image virtual hard disk. Only tags may be updated.
Variables are only populated by the server, and will be ignored when sending a request.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param source_virtual_machine: The source virtual machine from which Image is created.
:type source_virtual_machine: ~azure.mgmt.compute.v2019_03_01.models.SubResource
:param storage_profile: Specifies the storage settings for the virtual machine disks.
:type storage_profile: ~azure.mgmt.compute.v2019_03_01.models.ImageStorageProfile
:ivar provisioning_state: The provisioning state.
:vartype provisioning_state: str
:param hyper_v_generation: Gets the HyperVGenerationType of the VirtualMachine created from the
image. Possible values include: "V1", "V2".
:type hyper_v_generation: str or ~azure.mgmt.compute.v2019_03_01.models.HyperVGenerationTypes
"""
_validation = {
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'source_virtual_machine': {'key': 'properties.sourceVirtualMachine', 'type': 'SubResource'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'ImageStorageProfile'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'hyper_v_generation': {'key': 'properties.hyperVGeneration', 'type': 'str'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
source_virtual_machine: Optional["SubResource"] = None,
storage_profile: Optional["ImageStorageProfile"] = None,
hyper_v_generation: Optional[Union[str, "HyperVGenerationTypes"]] = None,
**kwargs
):
super(ImageUpdate, self).__init__(tags=tags, **kwargs)
self.source_virtual_machine = source_virtual_machine
self.storage_profile = storage_profile
self.provisioning_state = None
self.hyper_v_generation = hyper_v_generation
class InnerError(msrest.serialization.Model):
"""Inner error details.
:param exceptiontype: The exception type.
:type exceptiontype: str
:param errordetail: The internal error message or exception dump.
:type errordetail: str
"""
_attribute_map = {
'exceptiontype': {'key': 'exceptiontype', 'type': 'str'},
'errordetail': {'key': 'errordetail', 'type': 'str'},
}
def __init__(
self,
*,
exceptiontype: Optional[str] = None,
errordetail: Optional[str] = None,
**kwargs
):
super(InnerError, self).__init__(**kwargs)
self.exceptiontype = exceptiontype
self.errordetail = errordetail
class InstanceViewStatus(msrest.serialization.Model):
"""Instance view status.
:param code: The status code.
:type code: str
:param level: The level code. Possible values include: "Info", "Warning", "Error".
:type level: str or ~azure.mgmt.compute.v2019_03_01.models.StatusLevelTypes
:param display_status: The short localizable label for the status.
:type display_status: str
:param message: The detailed status message, including for alerts and error messages.
:type message: str
:param time: The time of the status.
:type time: ~datetime.datetime
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'level': {'key': 'level', 'type': 'str'},
'display_status': {'key': 'displayStatus', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'time': {'key': 'time', 'type': 'iso-8601'},
}
def __init__(
self,
*,
code: Optional[str] = None,
level: Optional[Union[str, "StatusLevelTypes"]] = None,
display_status: Optional[str] = None,
message: Optional[str] = None,
time: Optional[datetime.datetime] = None,
**kwargs
):
super(InstanceViewStatus, self).__init__(**kwargs)
self.code = code
self.level = level
self.display_status = display_status
self.message = message
self.time = time
class KeyVaultAndKeyReference(msrest.serialization.Model):
"""Key Vault Key Url and vault id of KeK, KeK is optional and when provided is used to unwrap the encryptionKey.
All required parameters must be populated in order to send to Azure.
:param source_vault: Required. Resource id of the KeyVault containing the key or secret.
:type source_vault: ~azure.mgmt.compute.v2019_03_01.models.SourceVault
:param key_url: Required. Url pointing to a key or secret in KeyVault.
:type key_url: str
"""
_validation = {
'source_vault': {'required': True},
'key_url': {'required': True},
}
_attribute_map = {
'source_vault': {'key': 'sourceVault', 'type': 'SourceVault'},
'key_url': {'key': 'keyUrl', 'type': 'str'},
}
def __init__(
self,
*,
source_vault: "SourceVault",
key_url: str,
**kwargs
):
super(KeyVaultAndKeyReference, self).__init__(**kwargs)
self.source_vault = source_vault
self.key_url = key_url
class KeyVaultAndSecretReference(msrest.serialization.Model):
"""Key Vault Secret Url and vault id of the encryption key.
All required parameters must be populated in order to send to Azure.
:param source_vault: Required. Resource id of the KeyVault containing the key or secret.
:type source_vault: ~azure.mgmt.compute.v2019_03_01.models.SourceVault
:param secret_url: Required. Url pointing to a key or secret in KeyVault.
:type secret_url: str
"""
_validation = {
'source_vault': {'required': True},
'secret_url': {'required': True},
}
_attribute_map = {
'source_vault': {'key': 'sourceVault', 'type': 'SourceVault'},
'secret_url': {'key': 'secretUrl', 'type': 'str'},
}
def __init__(
self,
*,
source_vault: "SourceVault",
secret_url: str,
**kwargs
):
super(KeyVaultAndSecretReference, self).__init__(**kwargs)
self.source_vault = source_vault
self.secret_url = secret_url
class KeyVaultKeyReference(msrest.serialization.Model):
"""Describes a reference to Key Vault Key.
All required parameters must be populated in order to send to Azure.
:param key_url: Required. The URL referencing a key encryption key in Key Vault.
:type key_url: str
:param source_vault: Required. The relative URL of the Key Vault containing the key.
:type source_vault: ~azure.mgmt.compute.v2019_03_01.models.SubResource
"""
_validation = {
'key_url': {'required': True},
'source_vault': {'required': True},
}
_attribute_map = {
'key_url': {'key': 'keyUrl', 'type': 'str'},
'source_vault': {'key': 'sourceVault', 'type': 'SubResource'},
}
def __init__(
self,
*,
key_url: str,
source_vault: "SubResource",
**kwargs
):
super(KeyVaultKeyReference, self).__init__(**kwargs)
self.key_url = key_url
self.source_vault = source_vault
class KeyVaultSecretReference(msrest.serialization.Model):
"""Describes a reference to Key Vault Secret.
All required parameters must be populated in order to send to Azure.
:param secret_url: Required. The URL referencing a secret in a Key Vault.
:type secret_url: str
:param source_vault: Required. The relative URL of the Key Vault containing the secret.
:type source_vault: ~azure.mgmt.compute.v2019_03_01.models.SubResource
"""
_validation = {
'secret_url': {'required': True},
'source_vault': {'required': True},
}
_attribute_map = {
'secret_url': {'key': 'secretUrl', 'type': 'str'},
'source_vault': {'key': 'sourceVault', 'type': 'SubResource'},
}
def __init__(
self,
*,
secret_url: str,
source_vault: "SubResource",
**kwargs
):
super(KeyVaultSecretReference, self).__init__(**kwargs)
self.secret_url = secret_url
self.source_vault = source_vault
class LinuxConfiguration(msrest.serialization.Model):
"""Specifies the Linux operating system settings on the virtual machine. :code:`<br>`:code:`<br>`For a list of supported Linux distributions, see `Linux on Azure-Endorsed Distributions <https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-endorsed-distros?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json>`_ :code:`<br>`:code:`<br>` For running non-endorsed distributions, see `Information for Non-Endorsed Distributions <https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-create-upload-generic?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json>`_.
:param disable_password_authentication: Specifies whether password authentication should be
disabled.
:type disable_password_authentication: bool
:param ssh: Specifies the ssh key configuration for a Linux OS.
:type ssh: ~azure.mgmt.compute.v2019_03_01.models.SshConfiguration
:param provision_vm_agent: Indicates whether virtual machine agent should be provisioned on the
virtual machine. :code:`<br>`:code:`<br>` When this property is not specified in the request
body, default behavior is to set it to true. This will ensure that VM Agent is installed on
the VM so that extensions can be added to the VM later.
:type provision_vm_agent: bool
"""
_attribute_map = {
'disable_password_authentication': {'key': 'disablePasswordAuthentication', 'type': 'bool'},
'ssh': {'key': 'ssh', 'type': 'SshConfiguration'},
'provision_vm_agent': {'key': 'provisionVMAgent', 'type': 'bool'},
}
def __init__(
self,
*,
disable_password_authentication: Optional[bool] = None,
ssh: Optional["SshConfiguration"] = None,
provision_vm_agent: Optional[bool] = None,
**kwargs
):
super(LinuxConfiguration, self).__init__(**kwargs)
self.disable_password_authentication = disable_password_authentication
self.ssh = ssh
self.provision_vm_agent = provision_vm_agent
class ListUsagesResult(msrest.serialization.Model):
"""The List Usages operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. The list of compute resource usages.
:type value: list[~azure.mgmt.compute.v2019_03_01.models.Usage]
:param next_link: The URI to fetch the next page of compute resource usage information. Call
ListNext() with this to fetch the next page of compute resource usage information.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Usage]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Usage"],
next_link: Optional[str] = None,
**kwargs
):
super(ListUsagesResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class LogAnalyticsInputBase(msrest.serialization.Model):
"""Api input base class for LogAnalytics Api.
All required parameters must be populated in order to send to Azure.
:param blob_container_sas_uri: Required. SAS Uri of the logging blob container to which
LogAnalytics Api writes output logs to.
:type blob_container_sas_uri: str
:param from_time: Required. From time of the query.
:type from_time: ~datetime.datetime
:param to_time: Required. To time of the query.
:type to_time: ~datetime.datetime
:param group_by_throttle_policy: Group query result by Throttle Policy applied.
:type group_by_throttle_policy: bool
:param group_by_operation_name: Group query result by Operation Name.
:type group_by_operation_name: bool
:param group_by_resource_name: Group query result by Resource Name.
:type group_by_resource_name: bool
"""
_validation = {
'blob_container_sas_uri': {'required': True},
'from_time': {'required': True},
'to_time': {'required': True},
}
_attribute_map = {
'blob_container_sas_uri': {'key': 'blobContainerSasUri', 'type': 'str'},
'from_time': {'key': 'fromTime', 'type': 'iso-8601'},
'to_time': {'key': 'toTime', 'type': 'iso-8601'},
'group_by_throttle_policy': {'key': 'groupByThrottlePolicy', 'type': 'bool'},
'group_by_operation_name': {'key': 'groupByOperationName', 'type': 'bool'},
'group_by_resource_name': {'key': 'groupByResourceName', 'type': 'bool'},
}
def __init__(
self,
*,
blob_container_sas_uri: str,
from_time: datetime.datetime,
to_time: datetime.datetime,
group_by_throttle_policy: Optional[bool] = None,
group_by_operation_name: Optional[bool] = None,
group_by_resource_name: Optional[bool] = None,
**kwargs
):
super(LogAnalyticsInputBase, self).__init__(**kwargs)
self.blob_container_sas_uri = blob_container_sas_uri
self.from_time = from_time
self.to_time = to_time
self.group_by_throttle_policy = group_by_throttle_policy
self.group_by_operation_name = group_by_operation_name
self.group_by_resource_name = group_by_resource_name
class LogAnalyticsOperationResult(msrest.serialization.Model):
"""LogAnalytics operation status response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar properties: LogAnalyticsOutput.
:vartype properties: ~azure.mgmt.compute.v2019_03_01.models.LogAnalyticsOutput
"""
_validation = {
'properties': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': 'LogAnalyticsOutput'},
}
def __init__(
self,
**kwargs
):
super(LogAnalyticsOperationResult, self).__init__(**kwargs)
self.properties = None
class LogAnalyticsOutput(msrest.serialization.Model):
"""LogAnalytics output properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar output: Output file Uri path to blob container.
:vartype output: str
"""
_validation = {
'output': {'readonly': True},
}
_attribute_map = {
'output': {'key': 'output', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LogAnalyticsOutput, self).__init__(**kwargs)
self.output = None
class MaintenanceRedeployStatus(msrest.serialization.Model):
"""Maintenance Operation Status.
:param is_customer_initiated_maintenance_allowed: True, if customer is allowed to perform
Maintenance.
:type is_customer_initiated_maintenance_allowed: bool
:param pre_maintenance_window_start_time: Start Time for the Pre Maintenance Window.
:type pre_maintenance_window_start_time: ~datetime.datetime
:param pre_maintenance_window_end_time: End Time for the Pre Maintenance Window.
:type pre_maintenance_window_end_time: ~datetime.datetime
:param maintenance_window_start_time: Start Time for the Maintenance Window.
:type maintenance_window_start_time: ~datetime.datetime
:param maintenance_window_end_time: End Time for the Maintenance Window.
:type maintenance_window_end_time: ~datetime.datetime
:param last_operation_result_code: The Last Maintenance Operation Result Code. Possible values
include: "None", "RetryLater", "MaintenanceAborted", "MaintenanceCompleted".
:type last_operation_result_code: str or
~azure.mgmt.compute.v2019_03_01.models.MaintenanceOperationResultCodeTypes
:param last_operation_message: Message returned for the last Maintenance Operation.
:type last_operation_message: str
"""
_attribute_map = {
'is_customer_initiated_maintenance_allowed': {'key': 'isCustomerInitiatedMaintenanceAllowed', 'type': 'bool'},
'pre_maintenance_window_start_time': {'key': 'preMaintenanceWindowStartTime', 'type': 'iso-8601'},
'pre_maintenance_window_end_time': {'key': 'preMaintenanceWindowEndTime', 'type': 'iso-8601'},
'maintenance_window_start_time': {'key': 'maintenanceWindowStartTime', 'type': 'iso-8601'},
'maintenance_window_end_time': {'key': 'maintenanceWindowEndTime', 'type': 'iso-8601'},
'last_operation_result_code': {'key': 'lastOperationResultCode', 'type': 'str'},
'last_operation_message': {'key': 'lastOperationMessage', 'type': 'str'},
}
def __init__(
self,
*,
is_customer_initiated_maintenance_allowed: Optional[bool] = None,
pre_maintenance_window_start_time: Optional[datetime.datetime] = None,
pre_maintenance_window_end_time: Optional[datetime.datetime] = None,
maintenance_window_start_time: Optional[datetime.datetime] = None,
maintenance_window_end_time: Optional[datetime.datetime] = None,
last_operation_result_code: Optional[Union[str, "MaintenanceOperationResultCodeTypes"]] = None,
last_operation_message: Optional[str] = None,
**kwargs
):
super(MaintenanceRedeployStatus, self).__init__(**kwargs)
self.is_customer_initiated_maintenance_allowed = is_customer_initiated_maintenance_allowed
self.pre_maintenance_window_start_time = pre_maintenance_window_start_time
self.pre_maintenance_window_end_time = pre_maintenance_window_end_time
self.maintenance_window_start_time = maintenance_window_start_time
self.maintenance_window_end_time = maintenance_window_end_time
self.last_operation_result_code = last_operation_result_code
self.last_operation_message = last_operation_message
class ManagedArtifact(msrest.serialization.Model):
"""The managed artifact.
All required parameters must be populated in order to send to Azure.
:param id: Required. The managed artifact id.
:type id: str
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: str,
**kwargs
):
super(ManagedArtifact, self).__init__(**kwargs)
self.id = id
class ManagedDiskParameters(SubResource):
"""The parameters of a managed disk.
:param id: Resource Id.
:type id: str
:param storage_account_type: Specifies the storage account type for the managed disk. NOTE:
UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values
include: "Standard_LRS", "Premium_LRS", "StandardSSD_LRS", "UltraSSD_LRS".
:type storage_account_type: str or ~azure.mgmt.compute.v2019_03_01.models.StorageAccountTypes
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
storage_account_type: Optional[Union[str, "StorageAccountTypes"]] = None,
**kwargs
):
super(ManagedDiskParameters, self).__init__(id=id, **kwargs)
self.storage_account_type = storage_account_type
class NetworkInterfaceReference(SubResource):
"""Describes a network interface reference.
:param id: Resource Id.
:type id: str
:param primary: Specifies the primary network interface in case the virtual machine has more
than 1 network interface.
:type primary: bool
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
}
def __init__(
self,
*,
id: Optional[str] = None,
primary: Optional[bool] = None,
**kwargs
):
super(NetworkInterfaceReference, self).__init__(id=id, **kwargs)
self.primary = primary
class NetworkProfile(msrest.serialization.Model):
"""Specifies the network interfaces of the virtual machine.
:param network_interfaces: Specifies the list of resource Ids for the network interfaces
associated with the virtual machine.
:type network_interfaces:
list[~azure.mgmt.compute.v2019_03_01.models.NetworkInterfaceReference]
"""
_attribute_map = {
'network_interfaces': {'key': 'networkInterfaces', 'type': '[NetworkInterfaceReference]'},
}
def __init__(
self,
*,
network_interfaces: Optional[List["NetworkInterfaceReference"]] = None,
**kwargs
):
super(NetworkProfile, self).__init__(**kwargs)
self.network_interfaces = network_interfaces
class OSDisk(msrest.serialization.Model):
"""Specifies information about the operating system disk used by the virtual machine. :code:`<br>`:code:`<br>` For more information about disks, see `About disks and VHDs for Azure virtual machines <https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json>`_.
All required parameters must be populated in order to send to Azure.
:param os_type: This property allows you to specify the type of the OS that is included in the
disk if creating a VM from user-image or a specialized VHD. :code:`<br>`:code:`<br>` Possible
values are: :code:`<br>`:code:`<br>` **Windows** :code:`<br>`:code:`<br>` **Linux**. Possible
values include: "Windows", "Linux".
:type os_type: str or ~azure.mgmt.compute.v2019_03_01.models.OperatingSystemTypes
:param encryption_settings: Specifies the encryption settings for the OS Disk.
:code:`<br>`:code:`<br>` Minimum api-version: 2015-06-15.
:type encryption_settings: ~azure.mgmt.compute.v2019_03_01.models.DiskEncryptionSettings
:param name: The disk name.
:type name: str
:param vhd: The virtual hard disk.
:type vhd: ~azure.mgmt.compute.v2019_03_01.models.VirtualHardDisk
:param image: The source user image virtual hard disk. The virtual hard disk will be copied
before being attached to the virtual machine. If SourceImage is provided, the destination
virtual hard drive must not exist.
:type image: ~azure.mgmt.compute.v2019_03_01.models.VirtualHardDisk
:param caching: Specifies the caching requirements. :code:`<br>`:code:`<br>` Possible values
are: :code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **ReadOnly**
:code:`<br>`:code:`<br>` **ReadWrite** :code:`<br>`:code:`<br>` Default: **None for Standard
storage. ReadOnly for Premium storage**. Possible values include: "None", "ReadOnly",
"ReadWrite".
:type caching: str or ~azure.mgmt.compute.v2019_03_01.models.CachingTypes
:param write_accelerator_enabled: Specifies whether writeAccelerator should be enabled or
disabled on the disk.
:type write_accelerator_enabled: bool
:param diff_disk_settings: Specifies the ephemeral Disk Settings for the operating system disk
used by the virtual machine.
:type diff_disk_settings: ~azure.mgmt.compute.v2019_03_01.models.DiffDiskSettings
:param create_option: Required. Specifies how the virtual machine should be
created.:code:`<br>`:code:`<br>` Possible values are::code:`<br>`:code:`<br>` **Attach** \u2013
This value is used when you are using a specialized disk to create the virtual
machine.:code:`<br>`:code:`<br>` **FromImage** \u2013 This value is used when you are using an
image to create the virtual machine. If you are using a platform image, you also use the
imageReference element described above. If you are using a marketplace image, you also use the
plan element previously described. Possible values include: "FromImage", "Empty", "Attach".
:type create_option: str or ~azure.mgmt.compute.v2019_03_01.models.DiskCreateOptionTypes
:param disk_size_gb: Specifies the size of an empty data disk in gigabytes. This element can be
used to overwrite the size of the disk in a virtual machine image. :code:`<br>`:code:`<br>`
This value cannot be larger than 1023 GB.
:type disk_size_gb: int
:param managed_disk: The managed disk parameters.
:type managed_disk: ~azure.mgmt.compute.v2019_03_01.models.ManagedDiskParameters
"""
_validation = {
'create_option': {'required': True},
}
_attribute_map = {
'os_type': {'key': 'osType', 'type': 'str'},
'encryption_settings': {'key': 'encryptionSettings', 'type': 'DiskEncryptionSettings'},
'name': {'key': 'name', 'type': 'str'},
'vhd': {'key': 'vhd', 'type': 'VirtualHardDisk'},
'image': {'key': 'image', 'type': 'VirtualHardDisk'},
'caching': {'key': 'caching', 'type': 'str'},
'write_accelerator_enabled': {'key': 'writeAcceleratorEnabled', 'type': 'bool'},
'diff_disk_settings': {'key': 'diffDiskSettings', 'type': 'DiffDiskSettings'},
'create_option': {'key': 'createOption', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'managed_disk': {'key': 'managedDisk', 'type': 'ManagedDiskParameters'},
}
def __init__(
self,
*,
create_option: Union[str, "DiskCreateOptionTypes"],
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
encryption_settings: Optional["DiskEncryptionSettings"] = None,
name: Optional[str] = None,
vhd: Optional["VirtualHardDisk"] = None,
image: Optional["VirtualHardDisk"] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
write_accelerator_enabled: Optional[bool] = None,
diff_disk_settings: Optional["DiffDiskSettings"] = None,
disk_size_gb: Optional[int] = None,
managed_disk: Optional["ManagedDiskParameters"] = None,
**kwargs
):
super(OSDisk, self).__init__(**kwargs)
self.os_type = os_type
self.encryption_settings = encryption_settings
self.name = name
self.vhd = vhd
self.image = image
self.caching = caching
self.write_accelerator_enabled = write_accelerator_enabled
self.diff_disk_settings = diff_disk_settings
self.create_option = create_option
self.disk_size_gb = disk_size_gb
self.managed_disk = managed_disk
class OSDiskImage(msrest.serialization.Model):
"""Contains the os disk image information.
All required parameters must be populated in order to send to Azure.
:param operating_system: Required. The operating system of the osDiskImage. Possible values
include: "Windows", "Linux".
:type operating_system: str or ~azure.mgmt.compute.v2019_03_01.models.OperatingSystemTypes
"""
_validation = {
'operating_system': {'required': True},
}
_attribute_map = {
'operating_system': {'key': 'operatingSystem', 'type': 'str'},
}
def __init__(
self,
*,
operating_system: Union[str, "OperatingSystemTypes"],
**kwargs
):
super(OSDiskImage, self).__init__(**kwargs)
self.operating_system = operating_system
class OSProfile(msrest.serialization.Model):
"""Specifies the operating system settings for the virtual machine.
:param computer_name: Specifies the host OS name of the virtual machine.
:code:`<br>`:code:`<br>` This name cannot be updated after the VM is created.
:code:`<br>`:code:`<br>` **Max-length (Windows):** 15 characters :code:`<br>`:code:`<br>`
**Max-length (Linux):** 64 characters. :code:`<br>`:code:`<br>` For naming conventions and
restrictions see `Azure infrastructure services implementation guidelines
<https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-infrastructure-
subscription-accounts-guidelines?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json#1-naming-
conventions>`_.
:type computer_name: str
:param admin_username: Specifies the name of the administrator account.
:code:`<br>`:code:`<br>` **Windows-only restriction:** Cannot end in "."
:code:`<br>`:code:`<br>` **Disallowed values:** "administrator", "admin", "user", "user1",
"test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2",
"aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql",
"support", "support_388945a0", "sys", "test2", "test3", "user4", "user5".
:code:`<br>`:code:`<br>` **Minimum-length (Linux):** 1 character :code:`<br>`:code:`<br>`
**Max-length (Linux):** 64 characters :code:`<br>`:code:`<br>` **Max-length (Windows):** 20
characters :code:`<br>`:code:`<br>`:code:`<li>` For root access to the Linux VM, see `Using
root privileges on Linux virtual machines in Azure <https://docs.microsoft.com/azure/virtual-
machines/virtual-machines-linux-use-root-privileges?toc=%2fazure%2fvirtual-
machines%2flinux%2ftoc.json>`_\ :code:`<br>`:code:`<li>` For a list of built-in system users on
Linux that should not be used in this field, see `Selecting User Names for Linux on Azure
<https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-
usernames?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json>`_.
:type admin_username: str
:param admin_password: Specifies the password of the administrator account.
:code:`<br>`:code:`<br>` **Minimum-length (Windows):** 8 characters :code:`<br>`:code:`<br>`
**Minimum-length (Linux):** 6 characters :code:`<br>`:code:`<br>` **Max-length (Windows):** 123
characters :code:`<br>`:code:`<br>` **Max-length (Linux):** 72 characters
:code:`<br>`:code:`<br>` **Complexity requirements:** 3 out of 4 conditions below need to be
fulfilled :code:`<br>` Has lower characters :code:`<br>`Has upper characters :code:`<br>` Has a
digit :code:`<br>` Has a special character (Regex match [\W_]) :code:`<br>`:code:`<br>`
**Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word",
"pass@word1", "Password!", "Password1", "Password22", "iloveyou!" :code:`<br>`:code:`<br>` For
resetting the password, see `How to reset the Remote Desktop service or its login password in a
Windows VM <https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-reset-
rdp?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json>`_ :code:`<br>`:code:`<br>` For
resetting root password, see `Manage users, SSH, and check or repair disks on Azure Linux VMs
using the VMAccess Extension <https://docs.microsoft.com/azure/virtual-machines/virtual-
machines-linux-using-vmaccess-extension?toc=%2fazure%2fvirtual-
machines%2flinux%2ftoc.json#reset-root-password>`_.
:type admin_password: str
:param custom_data: Specifies a base-64 encoded string of custom data. The base-64 encoded
string is decoded to a binary array that is saved as a file on the Virtual Machine. The maximum
length of the binary array is 65535 bytes. :code:`<br>`:code:`<br>` For using cloud-init for
your VM, see `Using cloud-init to customize a Linux VM during creation
<https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-
init?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json>`_.
:type custom_data: str
:param windows_configuration: Specifies Windows operating system settings on the virtual
machine.
:type windows_configuration: ~azure.mgmt.compute.v2019_03_01.models.WindowsConfiguration
:param linux_configuration: Specifies the Linux operating system settings on the virtual
machine. :code:`<br>`:code:`<br>`For a list of supported Linux distributions, see `Linux on
Azure-Endorsed Distributions <https://docs.microsoft.com/azure/virtual-machines/virtual-
machines-linux-endorsed-distros?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json>`_
:code:`<br>`:code:`<br>` For running non-endorsed distributions, see `Information for Non-
Endorsed Distributions <https://docs.microsoft.com/azure/virtual-machines/virtual-machines-
linux-create-upload-generic?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json>`_.
:type linux_configuration: ~azure.mgmt.compute.v2019_03_01.models.LinuxConfiguration
:param secrets: Specifies set of certificates that should be installed onto the virtual
machine.
:type secrets: list[~azure.mgmt.compute.v2019_03_01.models.VaultSecretGroup]
:param allow_extension_operations: Specifies whether extension operations should be allowed on
the virtual machine. :code:`<br>`:code:`<br>`This may only be set to False when no extensions
are present on the virtual machine.
:type allow_extension_operations: bool
:param require_guest_provision_signal: Specifies whether the guest provision signal is required
from the virtual machine.
:type require_guest_provision_signal: bool
"""
_attribute_map = {
'computer_name': {'key': 'computerName', 'type': 'str'},
'admin_username': {'key': 'adminUsername', 'type': 'str'},
'admin_password': {'key': 'adminPassword', 'type': 'str'},
'custom_data': {'key': 'customData', 'type': 'str'},
'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'},
'linux_configuration': {'key': 'linuxConfiguration', 'type': 'LinuxConfiguration'},
'secrets': {'key': 'secrets', 'type': '[VaultSecretGroup]'},
'allow_extension_operations': {'key': 'allowExtensionOperations', 'type': 'bool'},
'require_guest_provision_signal': {'key': 'requireGuestProvisionSignal', 'type': 'bool'},
}
def __init__(
self,
*,
computer_name: Optional[str] = None,
admin_username: Optional[str] = None,
admin_password: Optional[str] = None,
custom_data: Optional[str] = None,
windows_configuration: Optional["WindowsConfiguration"] = None,
linux_configuration: Optional["LinuxConfiguration"] = None,
secrets: Optional[List["VaultSecretGroup"]] = None,
allow_extension_operations: Optional[bool] = None,
require_guest_provision_signal: Optional[bool] = None,
**kwargs
):
super(OSProfile, self).__init__(**kwargs)
self.computer_name = computer_name
self.admin_username = admin_username
self.admin_password = admin_password
self.custom_data = custom_data
self.windows_configuration = windows_configuration
self.linux_configuration = linux_configuration
self.secrets = secrets
self.allow_extension_operations = allow_extension_operations
self.require_guest_provision_signal = require_guest_provision_signal
class Plan(msrest.serialization.Model):
"""Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**.
:param name: The plan ID.
:type name: str
:param publisher: The publisher ID.
:type publisher: str
:param product: Specifies the product of the image from the marketplace. This is the same value
as Offer under the imageReference element.
:type product: str
:param promotion_code: The promotion code.
:type promotion_code: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'publisher': {'key': 'publisher', 'type': 'str'},
'product': {'key': 'product', 'type': 'str'},
'promotion_code': {'key': 'promotionCode', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
publisher: Optional[str] = None,
product: Optional[str] = None,
promotion_code: Optional[str] = None,
**kwargs
):
super(Plan, self).__init__(**kwargs)
self.name = name
self.publisher = publisher
self.product = product
self.promotion_code = promotion_code
class ProximityPlacementGroup(Resource):
"""Specifies information about the proximity placement group.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param proximity_placement_group_type: Specifies the type of the proximity placement group.
:code:`<br>`:code:`<br>` Possible values are: :code:`<br>`:code:`<br>` **Standard** : Co-locate
resources within an Azure region or Availability Zone. :code:`<br>`:code:`<br>` **Ultra** : For
future use. Possible values include: "Standard", "Ultra".
:type proximity_placement_group_type: str or
~azure.mgmt.compute.v2019_03_01.models.ProximityPlacementGroupType
:ivar virtual_machines: A list of references to all virtual machines in the proximity placement
group.
:vartype virtual_machines: list[~azure.mgmt.compute.v2019_03_01.models.SubResource]
:ivar virtual_machine_scale_sets: A list of references to all virtual machine scale sets in the
proximity placement group.
:vartype virtual_machine_scale_sets: list[~azure.mgmt.compute.v2019_03_01.models.SubResource]
:ivar availability_sets: A list of references to all availability sets in the proximity
placement group.
:vartype availability_sets: list[~azure.mgmt.compute.v2019_03_01.models.SubResource]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'virtual_machines': {'readonly': True},
'virtual_machine_scale_sets': {'readonly': True},
'availability_sets': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'proximity_placement_group_type': {'key': 'properties.proximityPlacementGroupType', 'type': 'str'},
'virtual_machines': {'key': 'properties.virtualMachines', 'type': '[SubResource]'},
'virtual_machine_scale_sets': {'key': 'properties.virtualMachineScaleSets', 'type': '[SubResource]'},
'availability_sets': {'key': 'properties.availabilitySets', 'type': '[SubResource]'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
proximity_placement_group_type: Optional[Union[str, "ProximityPlacementGroupType"]] = None,
**kwargs
):
super(ProximityPlacementGroup, self).__init__(location=location, tags=tags, **kwargs)
self.proximity_placement_group_type = proximity_placement_group_type
self.virtual_machines = None
self.virtual_machine_scale_sets = None
self.availability_sets = None
class ProximityPlacementGroupListResult(msrest.serialization.Model):
"""The List Proximity Placement Group operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. The list of proximity placement groups.
:type value: list[~azure.mgmt.compute.v2019_03_01.models.ProximityPlacementGroup]
:param next_link: The URI to fetch the next page of proximity placement groups.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ProximityPlacementGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["ProximityPlacementGroup"],
next_link: Optional[str] = None,
**kwargs
):
super(ProximityPlacementGroupListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ProximityPlacementGroupUpdate(UpdateResource):
"""Specifies information about the proximity placement group.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(ProximityPlacementGroupUpdate, self).__init__(tags=tags, **kwargs)
class PurchasePlan(msrest.serialization.Model):
"""Used for establishing the purchase context of any 3rd Party artifact through MarketPlace.
All required parameters must be populated in order to send to Azure.
:param publisher: Required. The publisher ID.
:type publisher: str
:param name: Required. The plan ID.
:type name: str
:param product: Required. Specifies the product of the image from the marketplace. This is the
same value as Offer under the imageReference element.
:type product: str
"""
_validation = {
'publisher': {'required': True},
'name': {'required': True},
'product': {'required': True},
}
_attribute_map = {
'publisher': {'key': 'publisher', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'product': {'key': 'product', 'type': 'str'},
}
def __init__(
self,
*,
publisher: str,
name: str,
product: str,
**kwargs
):
super(PurchasePlan, self).__init__(**kwargs)
self.publisher = publisher
self.name = name
self.product = product
class RecommendedMachineConfiguration(msrest.serialization.Model):
"""The properties describe the recommended machine configuration for this Image Definition. These properties are updatable.
:param v_cp_us: Describes the resource range.
:type v_cp_us: ~azure.mgmt.compute.v2019_03_01.models.ResourceRange
:param memory: Describes the resource range.
:type memory: ~azure.mgmt.compute.v2019_03_01.models.ResourceRange
"""
_attribute_map = {
'v_cp_us': {'key': 'vCPUs', 'type': 'ResourceRange'},
'memory': {'key': 'memory', 'type': 'ResourceRange'},
}
def __init__(
self,
*,
v_cp_us: Optional["ResourceRange"] = None,
memory: Optional["ResourceRange"] = None,
**kwargs
):
super(RecommendedMachineConfiguration, self).__init__(**kwargs)
self.v_cp_us = v_cp_us
self.memory = memory
class RecoveryWalkResponse(msrest.serialization.Model):
"""Response after calling a manual recovery walk.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar walk_performed: Whether the recovery walk was performed.
:vartype walk_performed: bool
:ivar next_platform_update_domain: The next update domain that needs to be walked. Null means
walk spanning all update domains has been completed.
:vartype next_platform_update_domain: int
"""
_validation = {
'walk_performed': {'readonly': True},
'next_platform_update_domain': {'readonly': True},
}
_attribute_map = {
'walk_performed': {'key': 'walkPerformed', 'type': 'bool'},
'next_platform_update_domain': {'key': 'nextPlatformUpdateDomain', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(RecoveryWalkResponse, self).__init__(**kwargs)
self.walk_performed = None
self.next_platform_update_domain = None
class RegionalReplicationStatus(msrest.serialization.Model):
"""This is the regional replication status.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar region: The region to which the gallery Image Version is being replicated to.
:vartype region: str
:ivar state: This is the regional replication state. Possible values include: "Unknown",
"Replicating", "Completed", "Failed".
:vartype state: str or ~azure.mgmt.compute.v2019_03_01.models.ReplicationState
:ivar details: The details of the replication status.
:vartype details: str
:ivar progress: It indicates progress of the replication job.
:vartype progress: int
"""
_validation = {
'region': {'readonly': True},
'state': {'readonly': True},
'details': {'readonly': True},
'progress': {'readonly': True},
}
_attribute_map = {
'region': {'key': 'region', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'details': {'key': 'details', 'type': 'str'},
'progress': {'key': 'progress', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(RegionalReplicationStatus, self).__init__(**kwargs)
self.region = None
self.state = None
self.details = None
self.progress = None
class ReplicationStatus(msrest.serialization.Model):
"""This is the replication status of the gallery Image Version.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar aggregated_state: This is the aggregated replication status based on all the regional
replication status flags. Possible values include: "Unknown", "InProgress", "Completed",
"Failed".
:vartype aggregated_state: str or
~azure.mgmt.compute.v2019_03_01.models.AggregatedReplicationState
:ivar summary: This is a summary of replication status for each region.
:vartype summary: list[~azure.mgmt.compute.v2019_03_01.models.RegionalReplicationStatus]
"""
_validation = {
'aggregated_state': {'readonly': True},
'summary': {'readonly': True},
}
_attribute_map = {
'aggregated_state': {'key': 'aggregatedState', 'type': 'str'},
'summary': {'key': 'summary', 'type': '[RegionalReplicationStatus]'},
}
def __init__(
self,
**kwargs
):
super(ReplicationStatus, self).__init__(**kwargs)
self.aggregated_state = None
self.summary = None
class RequestRateByIntervalInput(LogAnalyticsInputBase):
"""Api request input for LogAnalytics getRequestRateByInterval Api.
All required parameters must be populated in order to send to Azure.
:param blob_container_sas_uri: Required. SAS Uri of the logging blob container to which
LogAnalytics Api writes output logs to.
:type blob_container_sas_uri: str
:param from_time: Required. From time of the query.
:type from_time: ~datetime.datetime
:param to_time: Required. To time of the query.
:type to_time: ~datetime.datetime
:param group_by_throttle_policy: Group query result by Throttle Policy applied.
:type group_by_throttle_policy: bool
:param group_by_operation_name: Group query result by Operation Name.
:type group_by_operation_name: bool
:param group_by_resource_name: Group query result by Resource Name.
:type group_by_resource_name: bool
:param interval_length: Required. Interval value in minutes used to create LogAnalytics call
rate logs. Possible values include: "ThreeMins", "FiveMins", "ThirtyMins", "SixtyMins".
:type interval_length: str or ~azure.mgmt.compute.v2019_03_01.models.IntervalInMins
"""
_validation = {
'blob_container_sas_uri': {'required': True},
'from_time': {'required': True},
'to_time': {'required': True},
'interval_length': {'required': True},
}
_attribute_map = {
'blob_container_sas_uri': {'key': 'blobContainerSasUri', 'type': 'str'},
'from_time': {'key': 'fromTime', 'type': 'iso-8601'},
'to_time': {'key': 'toTime', 'type': 'iso-8601'},
'group_by_throttle_policy': {'key': 'groupByThrottlePolicy', 'type': 'bool'},
'group_by_operation_name': {'key': 'groupByOperationName', 'type': 'bool'},
'group_by_resource_name': {'key': 'groupByResourceName', 'type': 'bool'},
'interval_length': {'key': 'intervalLength', 'type': 'str'},
}
def __init__(
self,
*,
blob_container_sas_uri: str,
from_time: datetime.datetime,
to_time: datetime.datetime,
interval_length: Union[str, "IntervalInMins"],
group_by_throttle_policy: Optional[bool] = None,
group_by_operation_name: Optional[bool] = None,
group_by_resource_name: Optional[bool] = None,
**kwargs
):
super(RequestRateByIntervalInput, self).__init__(blob_container_sas_uri=blob_container_sas_uri, from_time=from_time, to_time=to_time, group_by_throttle_policy=group_by_throttle_policy, group_by_operation_name=group_by_operation_name, group_by_resource_name=group_by_resource_name, **kwargs)
self.interval_length = interval_length
class ResourceRange(msrest.serialization.Model):
"""Describes the resource range.
:param min: The minimum number of the resource.
:type min: int
:param max: The maximum number of the resource.
:type max: int
"""
_attribute_map = {
'min': {'key': 'min', 'type': 'int'},
'max': {'key': 'max', 'type': 'int'},
}
def __init__(
self,
*,
min: Optional[int] = None,
max: Optional[int] = None,
**kwargs
):
super(ResourceRange, self).__init__(**kwargs)
self.min = min
self.max = max
class RollbackStatusInfo(msrest.serialization.Model):
"""Information about rollback on failed VM instances after a OS Upgrade operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar successfully_rolledback_instance_count: The number of instances which have been
successfully rolled back.
:vartype successfully_rolledback_instance_count: int
:ivar failed_rolledback_instance_count: The number of instances which failed to rollback.
:vartype failed_rolledback_instance_count: int
:ivar rollback_error: Error details if OS rollback failed.
:vartype rollback_error: ~azure.mgmt.compute.v2019_03_01.models.ApiError
"""
_validation = {
'successfully_rolledback_instance_count': {'readonly': True},
'failed_rolledback_instance_count': {'readonly': True},
'rollback_error': {'readonly': True},
}
_attribute_map = {
'successfully_rolledback_instance_count': {'key': 'successfullyRolledbackInstanceCount', 'type': 'int'},
'failed_rolledback_instance_count': {'key': 'failedRolledbackInstanceCount', 'type': 'int'},
'rollback_error': {'key': 'rollbackError', 'type': 'ApiError'},
}
def __init__(
self,
**kwargs
):
super(RollbackStatusInfo, self).__init__(**kwargs)
self.successfully_rolledback_instance_count = None
self.failed_rolledback_instance_count = None
self.rollback_error = None
class RollingUpgradePolicy(msrest.serialization.Model):
"""The configuration parameters used while performing a rolling upgrade.
:param max_batch_instance_percent: The maximum percent of total virtual machine instances that
will be upgraded simultaneously by the rolling upgrade in one batch. As this is a maximum,
unhealthy instances in previous or future batches can cause the percentage of instances in a
batch to decrease to ensure higher reliability. The default value for this parameter is 20%.
:type max_batch_instance_percent: int
:param max_unhealthy_instance_percent: The maximum percentage of the total virtual machine
instances in the scale set that can be simultaneously unhealthy, either as a result of being
upgraded, or by being found in an unhealthy state by the virtual machine health checks before
the rolling upgrade aborts. This constraint will be checked prior to starting any batch. The
default value for this parameter is 20%.
:type max_unhealthy_instance_percent: int
:param max_unhealthy_upgraded_instance_percent: The maximum percentage of upgraded virtual
machine instances that can be found to be in an unhealthy state. This check will happen after
each batch is upgraded. If this percentage is ever exceeded, the rolling update aborts. The
default value for this parameter is 20%.
:type max_unhealthy_upgraded_instance_percent: int
:param pause_time_between_batches: The wait time between completing the update for all virtual
machines in one batch and starting the next batch. The time duration should be specified in ISO
8601 format. The default value is 0 seconds (PT0S).
:type pause_time_between_batches: str
"""
_validation = {
'max_batch_instance_percent': {'maximum': 100, 'minimum': 5},
'max_unhealthy_instance_percent': {'maximum': 100, 'minimum': 5},
'max_unhealthy_upgraded_instance_percent': {'maximum': 100, 'minimum': 0},
}
_attribute_map = {
'max_batch_instance_percent': {'key': 'maxBatchInstancePercent', 'type': 'int'},
'max_unhealthy_instance_percent': {'key': 'maxUnhealthyInstancePercent', 'type': 'int'},
'max_unhealthy_upgraded_instance_percent': {'key': 'maxUnhealthyUpgradedInstancePercent', 'type': 'int'},
'pause_time_between_batches': {'key': 'pauseTimeBetweenBatches', 'type': 'str'},
}
def __init__(
self,
*,
max_batch_instance_percent: Optional[int] = None,
max_unhealthy_instance_percent: Optional[int] = None,
max_unhealthy_upgraded_instance_percent: Optional[int] = None,
pause_time_between_batches: Optional[str] = None,
**kwargs
):
super(RollingUpgradePolicy, self).__init__(**kwargs)
self.max_batch_instance_percent = max_batch_instance_percent
self.max_unhealthy_instance_percent = max_unhealthy_instance_percent
self.max_unhealthy_upgraded_instance_percent = max_unhealthy_upgraded_instance_percent
self.pause_time_between_batches = pause_time_between_batches
class RollingUpgradeProgressInfo(msrest.serialization.Model):
"""Information about the number of virtual machine instances in each upgrade state.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar successful_instance_count: The number of instances that have been successfully upgraded.
:vartype successful_instance_count: int
:ivar failed_instance_count: The number of instances that have failed to be upgraded
successfully.
:vartype failed_instance_count: int
:ivar in_progress_instance_count: The number of instances that are currently being upgraded.
:vartype in_progress_instance_count: int
:ivar pending_instance_count: The number of instances that have not yet begun to be upgraded.
:vartype pending_instance_count: int
"""
_validation = {
'successful_instance_count': {'readonly': True},
'failed_instance_count': {'readonly': True},
'in_progress_instance_count': {'readonly': True},
'pending_instance_count': {'readonly': True},
}
_attribute_map = {
'successful_instance_count': {'key': 'successfulInstanceCount', 'type': 'int'},
'failed_instance_count': {'key': 'failedInstanceCount', 'type': 'int'},
'in_progress_instance_count': {'key': 'inProgressInstanceCount', 'type': 'int'},
'pending_instance_count': {'key': 'pendingInstanceCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(RollingUpgradeProgressInfo, self).__init__(**kwargs)
self.successful_instance_count = None
self.failed_instance_count = None
self.in_progress_instance_count = None
self.pending_instance_count = None
class RollingUpgradeRunningStatus(msrest.serialization.Model):
"""Information about the current running state of the overall upgrade.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Code indicating the current status of the upgrade. Possible values include:
"RollingForward", "Cancelled", "Completed", "Faulted".
:vartype code: str or ~azure.mgmt.compute.v2019_03_01.models.RollingUpgradeStatusCode
:ivar start_time: Start time of the upgrade.
:vartype start_time: ~datetime.datetime
:ivar last_action: The last action performed on the rolling upgrade. Possible values include:
"Start", "Cancel".
:vartype last_action: str or ~azure.mgmt.compute.v2019_03_01.models.RollingUpgradeActionType
:ivar last_action_time: Last action time of the upgrade.
:vartype last_action_time: ~datetime.datetime
"""
_validation = {
'code': {'readonly': True},
'start_time': {'readonly': True},
'last_action': {'readonly': True},
'last_action_time': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'last_action': {'key': 'lastAction', 'type': 'str'},
'last_action_time': {'key': 'lastActionTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(RollingUpgradeRunningStatus, self).__init__(**kwargs)
self.code = None
self.start_time = None
self.last_action = None
self.last_action_time = None
class RollingUpgradeStatusInfo(Resource):
"""The status of the latest virtual machine scale set rolling upgrade.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar policy: The rolling upgrade policies applied for this upgrade.
:vartype policy: ~azure.mgmt.compute.v2019_03_01.models.RollingUpgradePolicy
:ivar running_status: Information about the current running state of the overall upgrade.
:vartype running_status: ~azure.mgmt.compute.v2019_03_01.models.RollingUpgradeRunningStatus
:ivar progress: Information about the number of virtual machine instances in each upgrade
state.
:vartype progress: ~azure.mgmt.compute.v2019_03_01.models.RollingUpgradeProgressInfo
:ivar error: Error details for this upgrade, if there are any.
:vartype error: ~azure.mgmt.compute.v2019_03_01.models.ApiError
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'policy': {'readonly': True},
'running_status': {'readonly': True},
'progress': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'policy': {'key': 'properties.policy', 'type': 'RollingUpgradePolicy'},
'running_status': {'key': 'properties.runningStatus', 'type': 'RollingUpgradeRunningStatus'},
'progress': {'key': 'properties.progress', 'type': 'RollingUpgradeProgressInfo'},
'error': {'key': 'properties.error', 'type': 'ApiError'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(RollingUpgradeStatusInfo, self).__init__(location=location, tags=tags, **kwargs)
self.policy = None
self.running_status = None
self.progress = None
self.error = None
class RunCommandDocumentBase(msrest.serialization.Model):
"""Describes the properties of a Run Command metadata.
All required parameters must be populated in order to send to Azure.
:param schema: Required. The VM run command schema.
:type schema: str
:param id: Required. The VM run command id.
:type id: str
:param os_type: Required. The Operating System type. Possible values include: "Windows",
"Linux".
:type os_type: str or ~azure.mgmt.compute.v2019_03_01.models.OperatingSystemTypes
:param label: Required. The VM run command label.
:type label: str
:param description: Required. The VM run command description.
:type description: str
"""
_validation = {
'schema': {'required': True},
'id': {'required': True},
'os_type': {'required': True},
'label': {'required': True},
'description': {'required': True},
}
_attribute_map = {
'schema': {'key': '$schema', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'os_type': {'key': 'osType', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
schema: str,
id: str,
os_type: Union[str, "OperatingSystemTypes"],
label: str,
description: str,
**kwargs
):
super(RunCommandDocumentBase, self).__init__(**kwargs)
self.schema = schema
self.id = id
self.os_type = os_type
self.label = label
self.description = description
class RunCommandDocument(RunCommandDocumentBase):
"""Describes the properties of a Run Command.
All required parameters must be populated in order to send to Azure.
:param schema: Required. The VM run command schema.
:type schema: str
:param id: Required. The VM run command id.
:type id: str
:param os_type: Required. The Operating System type. Possible values include: "Windows",
"Linux".
:type os_type: str or ~azure.mgmt.compute.v2019_03_01.models.OperatingSystemTypes
:param label: Required. The VM run command label.
:type label: str
:param description: Required. The VM run command description.
:type description: str
:param script: Required. The script to be executed.
:type script: list[str]
:param parameters: The parameters used by the script.
:type parameters: list[~azure.mgmt.compute.v2019_03_01.models.RunCommandParameterDefinition]
"""
_validation = {
'schema': {'required': True},
'id': {'required': True},
'os_type': {'required': True},
'label': {'required': True},
'description': {'required': True},
'script': {'required': True},
}
_attribute_map = {
'schema': {'key': '$schema', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'os_type': {'key': 'osType', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'script': {'key': 'script', 'type': '[str]'},
'parameters': {'key': 'parameters', 'type': '[RunCommandParameterDefinition]'},
}
def __init__(
self,
*,
schema: str,
id: str,
os_type: Union[str, "OperatingSystemTypes"],
label: str,
description: str,
script: List[str],
parameters: Optional[List["RunCommandParameterDefinition"]] = None,
**kwargs
):
super(RunCommandDocument, self).__init__(schema=schema, id=id, os_type=os_type, label=label, description=description, **kwargs)
self.script = script
self.parameters = parameters
class RunCommandInput(msrest.serialization.Model):
"""Capture Virtual Machine parameters.
All required parameters must be populated in order to send to Azure.
:param command_id: Required. The run command id.
:type command_id: str
:param script: Optional. The script to be executed. When this value is given, the given script
will override the default script of the command.
:type script: list[str]
:param parameters: The run command parameters.
:type parameters: list[~azure.mgmt.compute.v2019_03_01.models.RunCommandInputParameter]
"""
_validation = {
'command_id': {'required': True},
}
_attribute_map = {
'command_id': {'key': 'commandId', 'type': 'str'},
'script': {'key': 'script', 'type': '[str]'},
'parameters': {'key': 'parameters', 'type': '[RunCommandInputParameter]'},
}
def __init__(
self,
*,
command_id: str,
script: Optional[List[str]] = None,
parameters: Optional[List["RunCommandInputParameter"]] = None,
**kwargs
):
super(RunCommandInput, self).__init__(**kwargs)
self.command_id = command_id
self.script = script
self.parameters = parameters
class RunCommandInputParameter(msrest.serialization.Model):
"""Describes the properties of a run command parameter.
All required parameters must be populated in order to send to Azure.
:param name: Required. The run command parameter name.
:type name: str
:param value: Required. The run command parameter value.
:type value: str
"""
_validation = {
'name': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
value: str,
**kwargs
):
super(RunCommandInputParameter, self).__init__(**kwargs)
self.name = name
self.value = value
class RunCommandListResult(msrest.serialization.Model):
"""The List Virtual Machine operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. The list of virtual machine run commands.
:type value: list[~azure.mgmt.compute.v2019_03_01.models.RunCommandDocumentBase]
:param next_link: The uri to fetch the next page of run commands. Call ListNext() with this to
fetch the next page of run commands.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[RunCommandDocumentBase]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["RunCommandDocumentBase"],
next_link: Optional[str] = None,
**kwargs
):
super(RunCommandListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class RunCommandParameterDefinition(msrest.serialization.Model):
"""Describes the properties of a run command parameter.
All required parameters must be populated in order to send to Azure.
:param name: Required. The run command parameter name.
:type name: str
:param type: Required. The run command parameter type.
:type type: str
:param default_value: The run command parameter default value.
:type default_value: str
:param required: The run command parameter required.
:type required: bool
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'default_value': {'key': 'defaultValue', 'type': 'str'},
'required': {'key': 'required', 'type': 'bool'},
}
def __init__(
self,
*,
name: str,
type: str,
default_value: Optional[str] = None,
required: Optional[bool] = False,
**kwargs
):
super(RunCommandParameterDefinition, self).__init__(**kwargs)
self.name = name
self.type = type
self.default_value = default_value
self.required = required
class RunCommandResult(msrest.serialization.Model):
"""RunCommandResult.
:param value: Run command operation response.
:type value: list[~azure.mgmt.compute.v2019_03_01.models.InstanceViewStatus]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
value: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
super(RunCommandResult, self).__init__(**kwargs)
self.value = value
class ScaleInPolicy(msrest.serialization.Model):
"""Describes a scale-in policy for a virtual machine scale set.
:param rules: The rules to be followed when scaling-in a virtual machine scale set.
:code:`<br>`:code:`<br>` Possible values are: :code:`<br>`:code:`<br>` **Default** When a
virtual machine scale set is scaled in, the scale set will first be balanced across zones if it
is a zonal scale set. Then, it will be balanced across Fault Domains as far as possible. Within
each Fault Domain, the virtual machines chosen for removal will be the newest ones that are not
protected from scale-in. :code:`<br>`:code:`<br>` **OldestVM** When a virtual machine scale set
is being scaled-in, the oldest virtual machines that are not protected from scale-in will be
chosen for removal. For zonal virtual machine scale sets, the scale set will first be balanced
across zones. Within each zone, the oldest virtual machines that are not protected will be
chosen for removal. :code:`<br>`:code:`<br>` **NewestVM** When a virtual machine scale set is
being scaled-in, the newest virtual machines that are not protected from scale-in will be
chosen for removal. For zonal virtual machine scale sets, the scale set will first be balanced
across zones. Within each zone, the newest virtual machines that are not protected will be
chosen for removal. :code:`<br>`:code:`<br>`.
:type rules: list[str or
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetScaleInRules]
"""
_attribute_map = {
'rules': {'key': 'rules', 'type': '[str]'},
}
def __init__(
self,
*,
rules: Optional[List[Union[str, "VirtualMachineScaleSetScaleInRules"]]] = None,
**kwargs
):
super(ScaleInPolicy, self).__init__(**kwargs)
self.rules = rules
class ScheduledEventsProfile(msrest.serialization.Model):
"""ScheduledEventsProfile.
:param terminate_notification_profile: Specifies Terminate Scheduled Event related
configurations.
:type terminate_notification_profile:
~azure.mgmt.compute.v2019_03_01.models.TerminateNotificationProfile
"""
_attribute_map = {
'terminate_notification_profile': {'key': 'terminateNotificationProfile', 'type': 'TerminateNotificationProfile'},
}
def __init__(
self,
*,
terminate_notification_profile: Optional["TerminateNotificationProfile"] = None,
**kwargs
):
super(ScheduledEventsProfile, self).__init__(**kwargs)
self.terminate_notification_profile = terminate_notification_profile
class Sku(msrest.serialization.Model):
"""Describes a virtual machine scale set sku. NOTE: If the new VM SKU is not supported on the hardware the scale set is currently on, you need to deallocate the VMs in the scale set before you modify the SKU name.
:param name: The sku name.
:type name: str
:param tier: Specifies the tier of virtual machines in a scale set.:code:`<br />`:code:`<br />`
Possible Values::code:`<br />`:code:`<br />` **Standard**\ :code:`<br />`:code:`<br />`
**Basic**.
:type tier: str
:param capacity: Specifies the number of virtual machines in the scale set.
:type capacity: long
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'long'},
}
def __init__(
self,
*,
name: Optional[str] = None,
tier: Optional[str] = None,
capacity: Optional[int] = None,
**kwargs
):
super(Sku, self).__init__(**kwargs)
self.name = name
self.tier = tier
self.capacity = capacity
class Snapshot(Resource):
"""Snapshot resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar managed_by: Unused. Always Null.
:vartype managed_by: str
:param sku: The snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS.
:type sku: ~azure.mgmt.compute.v2019_03_01.models.SnapshotSku
:ivar time_created: The time when the disk was created.
:vartype time_created: ~datetime.datetime
:param os_type: The Operating System type. Possible values include: "Windows", "Linux".
:type os_type: str or ~azure.mgmt.compute.v2019_03_01.models.OperatingSystemTypes
:param hyper_v_generation: The hypervisor generation of the Virtual Machine. Applicable to OS
disks only. Possible values include: "V1", "V2".
:type hyper_v_generation: str or ~azure.mgmt.compute.v2019_03_01.models.HyperVGeneration
:param creation_data: Disk source information. CreationData information cannot be changed after
the disk has been created.
:type creation_data: ~azure.mgmt.compute.v2019_03_01.models.CreationData
:param disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it
indicates the size of the disk to create. If this field is present for updates or creation with
other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a
running VM, and can only increase the disk's size.
:type disk_size_gb: int
:ivar disk_size_bytes: The size of the disk in bytes. This field is read only.
:vartype disk_size_bytes: long
:ivar unique_id: Unique Guid identifying the resource.
:vartype unique_id: str
:param encryption_settings_collection: Encryption settings collection used be Azure Disk
Encryption, can contain multiple encryption settings per disk or snapshot.
:type encryption_settings_collection:
~azure.mgmt.compute.v2019_03_01.models.EncryptionSettingsCollection
:ivar provisioning_state: The disk provisioning state.
:vartype provisioning_state: str
:param incremental: Whether a snapshot is incremental. Incremental snapshots on the same disk
occupy less space than full snapshots and can be diffed.
:type incremental: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'managed_by': {'readonly': True},
'time_created': {'readonly': True},
'disk_size_bytes': {'readonly': True},
'unique_id': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'managed_by': {'key': 'managedBy', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'SnapshotSku'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'hyper_v_generation': {'key': 'properties.hyperVGeneration', 'type': 'str'},
'creation_data': {'key': 'properties.creationData', 'type': 'CreationData'},
'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'},
'disk_size_bytes': {'key': 'properties.diskSizeBytes', 'type': 'long'},
'unique_id': {'key': 'properties.uniqueId', 'type': 'str'},
'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'incremental': {'key': 'properties.incremental', 'type': 'bool'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
sku: Optional["SnapshotSku"] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
hyper_v_generation: Optional[Union[str, "HyperVGeneration"]] = None,
creation_data: Optional["CreationData"] = None,
disk_size_gb: Optional[int] = None,
encryption_settings_collection: Optional["EncryptionSettingsCollection"] = None,
incremental: Optional[bool] = None,
**kwargs
):
super(Snapshot, self).__init__(location=location, tags=tags, **kwargs)
self.managed_by = None
self.sku = sku
self.time_created = None
self.os_type = os_type
self.hyper_v_generation = hyper_v_generation
self.creation_data = creation_data
self.disk_size_gb = disk_size_gb
self.disk_size_bytes = None
self.unique_id = None
self.encryption_settings_collection = encryption_settings_collection
self.provisioning_state = None
self.incremental = incremental
class SnapshotList(msrest.serialization.Model):
"""The List Snapshots operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of snapshots.
:type value: list[~azure.mgmt.compute.v2019_03_01.models.Snapshot]
:param next_link: The uri to fetch the next page of snapshots. Call ListNext() with this to
fetch the next page of snapshots.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Snapshot]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Snapshot"],
next_link: Optional[str] = None,
**kwargs
):
super(SnapshotList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class SnapshotSku(msrest.serialization.Model):
"""The snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS.
Variables are only populated by the server, and will be ignored when sending a request.
:param name: The sku name. Possible values include: "Standard_LRS", "Premium_LRS",
"Standard_ZRS".
:type name: str or ~azure.mgmt.compute.v2019_03_01.models.SnapshotStorageAccountTypes
:ivar tier: The sku tier.
:vartype tier: str
"""
_validation = {
'tier': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[Union[str, "SnapshotStorageAccountTypes"]] = None,
**kwargs
):
super(SnapshotSku, self).__init__(**kwargs)
self.name = name
self.tier = None
class SnapshotUpdate(msrest.serialization.Model):
"""Snapshot update resource.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: The snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS.
:type sku: ~azure.mgmt.compute.v2019_03_01.models.SnapshotSku
:param os_type: the Operating System type. Possible values include: "Windows", "Linux".
:type os_type: str or ~azure.mgmt.compute.v2019_03_01.models.OperatingSystemTypes
:param disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it
indicates the size of the disk to create. If this field is present for updates or creation with
other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a
running VM, and can only increase the disk's size.
:type disk_size_gb: int
:param encryption_settings_collection: Encryption settings collection used be Azure Disk
Encryption, can contain multiple encryption settings per disk or snapshot.
:type encryption_settings_collection:
~azure.mgmt.compute.v2019_03_01.models.EncryptionSettingsCollection
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'SnapshotSku'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'},
'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
sku: Optional["SnapshotSku"] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
disk_size_gb: Optional[int] = None,
encryption_settings_collection: Optional["EncryptionSettingsCollection"] = None,
**kwargs
):
super(SnapshotUpdate, self).__init__(**kwargs)
self.tags = tags
self.sku = sku
self.os_type = os_type
self.disk_size_gb = disk_size_gb
self.encryption_settings_collection = encryption_settings_collection
class SourceVault(msrest.serialization.Model):
"""The vault id is an Azure Resource Manager Resource id in the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}.
:param id: Resource Id.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(SourceVault, self).__init__(**kwargs)
self.id = id
class SshConfiguration(msrest.serialization.Model):
"""SSH configuration for Linux based VMs running on Azure.
:param public_keys: The list of SSH public keys used to authenticate with linux based VMs.
:type public_keys: list[~azure.mgmt.compute.v2019_03_01.models.SshPublicKey]
"""
_attribute_map = {
'public_keys': {'key': 'publicKeys', 'type': '[SshPublicKey]'},
}
def __init__(
self,
*,
public_keys: Optional[List["SshPublicKey"]] = None,
**kwargs
):
super(SshConfiguration, self).__init__(**kwargs)
self.public_keys = public_keys
class SshPublicKey(msrest.serialization.Model):
"""Contains information about SSH certificate public key and the path on the Linux VM where the public key is placed.
:param path: Specifies the full path on the created VM where ssh public key is stored. If the
file already exists, the specified key is appended to the file. Example:
/home/user/.ssh/authorized_keys.
:type path: str
:param key_data: SSH public key certificate used to authenticate with the VM through ssh. The
key needs to be at least 2048-bit and in ssh-rsa format. :code:`<br>`:code:`<br>` For creating
ssh keys, see `Create SSH keys on Linux and Mac for Linux VMs in Azure
<https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-mac-create-ssh-
keys?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json>`_.
:type key_data: str
"""
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'key_data': {'key': 'keyData', 'type': 'str'},
}
def __init__(
self,
*,
path: Optional[str] = None,
key_data: Optional[str] = None,
**kwargs
):
super(SshPublicKey, self).__init__(**kwargs)
self.path = path
self.key_data = key_data
class StorageProfile(msrest.serialization.Model):
"""Specifies the storage settings for the virtual machine disks.
:param image_reference: Specifies information about the image to use. You can specify
information about platform images, marketplace images, or virtual machine images. This element
is required when you want to use a platform image, marketplace image, or virtual machine image,
but is not used in other creation operations.
:type image_reference: ~azure.mgmt.compute.v2019_03_01.models.ImageReference
:param os_disk: Specifies information about the operating system disk used by the virtual
machine. :code:`<br>`:code:`<br>` For more information about disks, see `About disks and VHDs
for Azure virtual machines <https://docs.microsoft.com/azure/virtual-machines/virtual-machines-
windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json>`_.
:type os_disk: ~azure.mgmt.compute.v2019_03_01.models.OSDisk
:param data_disks: Specifies the parameters that are used to add a data disk to a virtual
machine. :code:`<br>`:code:`<br>` For more information about disks, see `About disks and VHDs
for Azure virtual machines <https://docs.microsoft.com/azure/virtual-machines/virtual-machines-
windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json>`_.
:type data_disks: list[~azure.mgmt.compute.v2019_03_01.models.DataDisk]
"""
_attribute_map = {
'image_reference': {'key': 'imageReference', 'type': 'ImageReference'},
'os_disk': {'key': 'osDisk', 'type': 'OSDisk'},
'data_disks': {'key': 'dataDisks', 'type': '[DataDisk]'},
}
def __init__(
self,
*,
image_reference: Optional["ImageReference"] = None,
os_disk: Optional["OSDisk"] = None,
data_disks: Optional[List["DataDisk"]] = None,
**kwargs
):
super(StorageProfile, self).__init__(**kwargs)
self.image_reference = image_reference
self.os_disk = os_disk
self.data_disks = data_disks
class SubResourceReadOnly(msrest.serialization.Model):
"""SubResourceReadOnly.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SubResourceReadOnly, self).__init__(**kwargs)
self.id = None
class TargetRegion(msrest.serialization.Model):
"""Describes the target region information.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the region.
:type name: str
:param regional_replica_count: The number of replicas of the Image Version to be created per
region. This property is updatable.
:type regional_replica_count: int
:param storage_account_type: Specifies the storage account type to be used to store the image.
This property is not updatable. Possible values include: "Standard_LRS", "Standard_ZRS".
:type storage_account_type: str or ~azure.mgmt.compute.v2019_03_01.models.StorageAccountType
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'regional_replica_count': {'key': 'regionalReplicaCount', 'type': 'int'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
regional_replica_count: Optional[int] = None,
storage_account_type: Optional[Union[str, "StorageAccountType"]] = None,
**kwargs
):
super(TargetRegion, self).__init__(**kwargs)
self.name = name
self.regional_replica_count = regional_replica_count
self.storage_account_type = storage_account_type
class TerminateNotificationProfile(msrest.serialization.Model):
"""TerminateNotificationProfile.
:param not_before_timeout: Configurable length of time a Virtual Machine being deleted will
have to potentially approve the Terminate Scheduled Event before the event is auto approved
(timed out). The configuration must be specified in ISO 8601 format, the default value is 5
minutes (PT5M).
:type not_before_timeout: str
:param enable: Specifies whether the Terminate Scheduled event is enabled or disabled.
:type enable: bool
"""
_attribute_map = {
'not_before_timeout': {'key': 'notBeforeTimeout', 'type': 'str'},
'enable': {'key': 'enable', 'type': 'bool'},
}
def __init__(
self,
*,
not_before_timeout: Optional[str] = None,
enable: Optional[bool] = None,
**kwargs
):
super(TerminateNotificationProfile, self).__init__(**kwargs)
self.not_before_timeout = not_before_timeout
self.enable = enable
class ThrottledRequestsInput(LogAnalyticsInputBase):
"""Api request input for LogAnalytics getThrottledRequests Api.
All required parameters must be populated in order to send to Azure.
:param blob_container_sas_uri: Required. SAS Uri of the logging blob container to which
LogAnalytics Api writes output logs to.
:type blob_container_sas_uri: str
:param from_time: Required. From time of the query.
:type from_time: ~datetime.datetime
:param to_time: Required. To time of the query.
:type to_time: ~datetime.datetime
:param group_by_throttle_policy: Group query result by Throttle Policy applied.
:type group_by_throttle_policy: bool
:param group_by_operation_name: Group query result by Operation Name.
:type group_by_operation_name: bool
:param group_by_resource_name: Group query result by Resource Name.
:type group_by_resource_name: bool
"""
_validation = {
'blob_container_sas_uri': {'required': True},
'from_time': {'required': True},
'to_time': {'required': True},
}
_attribute_map = {
'blob_container_sas_uri': {'key': 'blobContainerSasUri', 'type': 'str'},
'from_time': {'key': 'fromTime', 'type': 'iso-8601'},
'to_time': {'key': 'toTime', 'type': 'iso-8601'},
'group_by_throttle_policy': {'key': 'groupByThrottlePolicy', 'type': 'bool'},
'group_by_operation_name': {'key': 'groupByOperationName', 'type': 'bool'},
'group_by_resource_name': {'key': 'groupByResourceName', 'type': 'bool'},
}
def __init__(
self,
*,
blob_container_sas_uri: str,
from_time: datetime.datetime,
to_time: datetime.datetime,
group_by_throttle_policy: Optional[bool] = None,
group_by_operation_name: Optional[bool] = None,
group_by_resource_name: Optional[bool] = None,
**kwargs
):
super(ThrottledRequestsInput, self).__init__(blob_container_sas_uri=blob_container_sas_uri, from_time=from_time, to_time=to_time, group_by_throttle_policy=group_by_throttle_policy, group_by_operation_name=group_by_operation_name, group_by_resource_name=group_by_resource_name, **kwargs)
class UpgradeOperationHistoricalStatusInfo(msrest.serialization.Model):
"""Virtual Machine Scale Set OS Upgrade History operation response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar properties: Information about the properties of the upgrade operation.
:vartype properties:
~azure.mgmt.compute.v2019_03_01.models.UpgradeOperationHistoricalStatusInfoProperties
:ivar type: Resource type.
:vartype type: str
:ivar location: Resource location.
:vartype location: str
"""
_validation = {
'properties': {'readonly': True},
'type': {'readonly': True},
'location': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': 'UpgradeOperationHistoricalStatusInfoProperties'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UpgradeOperationHistoricalStatusInfo, self).__init__(**kwargs)
self.properties = None
self.type = None
self.location = None
class UpgradeOperationHistoricalStatusInfoProperties(msrest.serialization.Model):
"""Describes each OS upgrade on the Virtual Machine Scale Set.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar running_status: Information about the overall status of the upgrade operation.
:vartype running_status: ~azure.mgmt.compute.v2019_03_01.models.UpgradeOperationHistoryStatus
:ivar progress: Counts of the VMs in each state.
:vartype progress: ~azure.mgmt.compute.v2019_03_01.models.RollingUpgradeProgressInfo
:ivar error: Error Details for this upgrade if there are any.
:vartype error: ~azure.mgmt.compute.v2019_03_01.models.ApiError
:ivar started_by: Invoker of the Upgrade Operation. Possible values include: "Unknown", "User",
"Platform".
:vartype started_by: str or ~azure.mgmt.compute.v2019_03_01.models.UpgradeOperationInvoker
:ivar target_image_reference: Image Reference details.
:vartype target_image_reference: ~azure.mgmt.compute.v2019_03_01.models.ImageReference
:ivar rollback_info: Information about OS rollback if performed.
:vartype rollback_info: ~azure.mgmt.compute.v2019_03_01.models.RollbackStatusInfo
"""
_validation = {
'running_status': {'readonly': True},
'progress': {'readonly': True},
'error': {'readonly': True},
'started_by': {'readonly': True},
'target_image_reference': {'readonly': True},
'rollback_info': {'readonly': True},
}
_attribute_map = {
'running_status': {'key': 'runningStatus', 'type': 'UpgradeOperationHistoryStatus'},
'progress': {'key': 'progress', 'type': 'RollingUpgradeProgressInfo'},
'error': {'key': 'error', 'type': 'ApiError'},
'started_by': {'key': 'startedBy', 'type': 'str'},
'target_image_reference': {'key': 'targetImageReference', 'type': 'ImageReference'},
'rollback_info': {'key': 'rollbackInfo', 'type': 'RollbackStatusInfo'},
}
def __init__(
self,
**kwargs
):
super(UpgradeOperationHistoricalStatusInfoProperties, self).__init__(**kwargs)
self.running_status = None
self.progress = None
self.error = None
self.started_by = None
self.target_image_reference = None
self.rollback_info = None
class UpgradeOperationHistoryStatus(msrest.serialization.Model):
"""Information about the current running state of the overall upgrade.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Code indicating the current status of the upgrade. Possible values include:
"RollingForward", "Cancelled", "Completed", "Faulted".
:vartype code: str or ~azure.mgmt.compute.v2019_03_01.models.UpgradeState
:ivar start_time: Start time of the upgrade.
:vartype start_time: ~datetime.datetime
:ivar end_time: End time of the upgrade.
:vartype end_time: ~datetime.datetime
"""
_validation = {
'code': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(UpgradeOperationHistoryStatus, self).__init__(**kwargs)
self.code = None
self.start_time = None
self.end_time = None
class UpgradePolicy(msrest.serialization.Model):
"""Describes an upgrade policy - automatic, manual, or rolling.
:param mode: Specifies the mode of an upgrade to virtual machines in the scale set.:code:`<br
/>`:code:`<br />` Possible values are::code:`<br />`:code:`<br />` **Manual** - You control
the application of updates to virtual machines in the scale set. You do this by using the
manualUpgrade action.:code:`<br />`:code:`<br />` **Automatic** - All virtual machines in the
scale set are automatically updated at the same time. Possible values include: "Automatic",
"Manual", "Rolling".
:type mode: str or ~azure.mgmt.compute.v2019_03_01.models.UpgradeMode
:param rolling_upgrade_policy: The configuration parameters used while performing a rolling
upgrade.
:type rolling_upgrade_policy: ~azure.mgmt.compute.v2019_03_01.models.RollingUpgradePolicy
:param automatic_os_upgrade_policy: Configuration parameters used for performing automatic OS
Upgrade.
:type automatic_os_upgrade_policy:
~azure.mgmt.compute.v2019_03_01.models.AutomaticOSUpgradePolicy
"""
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'rolling_upgrade_policy': {'key': 'rollingUpgradePolicy', 'type': 'RollingUpgradePolicy'},
'automatic_os_upgrade_policy': {'key': 'automaticOSUpgradePolicy', 'type': 'AutomaticOSUpgradePolicy'},
}
def __init__(
self,
*,
mode: Optional[Union[str, "UpgradeMode"]] = None,
rolling_upgrade_policy: Optional["RollingUpgradePolicy"] = None,
automatic_os_upgrade_policy: Optional["AutomaticOSUpgradePolicy"] = None,
**kwargs
):
super(UpgradePolicy, self).__init__(**kwargs)
self.mode = mode
self.rolling_upgrade_policy = rolling_upgrade_policy
self.automatic_os_upgrade_policy = automatic_os_upgrade_policy
class Usage(msrest.serialization.Model):
"""Describes Compute Resource Usage.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar unit: Required. An enum describing the unit of usage measurement. Default value: "Count".
:vartype unit: str
:param current_value: Required. The current usage of the resource.
:type current_value: int
:param limit: Required. The maximum permitted usage of the resource.
:type limit: long
:param name: Required. The name of the type of usage.
:type name: ~azure.mgmt.compute.v2019_03_01.models.UsageName
"""
_validation = {
'unit': {'required': True, 'constant': True},
'current_value': {'required': True},
'limit': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'long'},
'name': {'key': 'name', 'type': 'UsageName'},
}
unit = "Count"
def __init__(
self,
*,
current_value: int,
limit: int,
name: "UsageName",
**kwargs
):
super(Usage, self).__init__(**kwargs)
self.current_value = current_value
self.limit = limit
self.name = name
class UsageName(msrest.serialization.Model):
"""The Usage Names.
:param value: The name of the resource.
:type value: str
:param localized_value: The localized name of the resource.
:type localized_value: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[str] = None,
localized_value: Optional[str] = None,
**kwargs
):
super(UsageName, self).__init__(**kwargs)
self.value = value
self.localized_value = localized_value
class UserArtifactManage(msrest.serialization.Model):
"""UserArtifactManage.
All required parameters must be populated in order to send to Azure.
:param install: Required. Required. The path and arguments to install the gallery application.
This is limited to 4096 characters.
:type install: str
:param remove: Required. Required. The path and arguments to remove the gallery application.
This is limited to 4096 characters.
:type remove: str
:param update: Optional. The path and arguments to update the gallery application. If not
present, then update operation will invoke remove command on the previous version and install
command on the current version of the gallery application. This is limited to 4096 characters.
:type update: str
"""
_validation = {
'install': {'required': True},
'remove': {'required': True},
}
_attribute_map = {
'install': {'key': 'install', 'type': 'str'},
'remove': {'key': 'remove', 'type': 'str'},
'update': {'key': 'update', 'type': 'str'},
}
def __init__(
self,
*,
install: str,
remove: str,
update: Optional[str] = None,
**kwargs
):
super(UserArtifactManage, self).__init__(**kwargs)
self.install = install
self.remove = remove
self.update = update
class UserArtifactSource(msrest.serialization.Model):
"""The source image from which the Image Version is going to be created.
All required parameters must be populated in order to send to Azure.
:param media_link: Required. Required. The mediaLink of the artifact, must be a readable
storage page blob.
:type media_link: str
:param default_configuration_link: Optional. The defaultConfigurationLink of the artifact, must
be a readable storage page blob.
:type default_configuration_link: str
"""
_validation = {
'media_link': {'required': True},
}
_attribute_map = {
'media_link': {'key': 'mediaLink', 'type': 'str'},
'default_configuration_link': {'key': 'defaultConfigurationLink', 'type': 'str'},
}
def __init__(
self,
*,
media_link: str,
default_configuration_link: Optional[str] = None,
**kwargs
):
super(UserArtifactSource, self).__init__(**kwargs)
self.media_link = media_link
self.default_configuration_link = default_configuration_link
class UserAssignedIdentitiesValue(msrest.serialization.Model):
"""UserAssignedIdentitiesValue.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of user assigned identity.
:vartype principal_id: str
:ivar client_id: The client id of user assigned identity.
:vartype client_id: str
"""
_validation = {
'principal_id': {'readonly': True},
'client_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UserAssignedIdentitiesValue, self).__init__(**kwargs)
self.principal_id = None
self.client_id = None
class VaultCertificate(msrest.serialization.Model):
"""Describes a single certificate reference in a Key Vault, and where the certificate should reside on the VM.
:param certificate_url: This is the URL of a certificate that has been uploaded to Key Vault as
a secret. For adding a secret to the Key Vault, see `Add a key or secret to the key vault
<https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add>`_. In this case, your
certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded
in UTF-8: :code:`<br>`:code:`<br>` {:code:`<br>` "data":":code:`<Base64-encoded-
certificate>`",:code:`<br>` "dataType":"pfx",:code:`<br>` "password":":code:`<pfx-file-
password>`":code:`<br>`}.
:type certificate_url: str
:param certificate_store: For Windows VMs, specifies the certificate store on the Virtual
Machine to which the certificate should be added. The specified certificate store is implicitly
in the LocalMachine account. :code:`<br>`:code:`<br>`For Linux VMs, the certificate file is
placed under the /var/lib/waagent directory, with the file name <UppercaseThumbprint>.crt
for the X509 certificate file and <UppercaseThumbprint>.prv for private key. Both of
these files are .pem formatted.
:type certificate_store: str
"""
_attribute_map = {
'certificate_url': {'key': 'certificateUrl', 'type': 'str'},
'certificate_store': {'key': 'certificateStore', 'type': 'str'},
}
def __init__(
self,
*,
certificate_url: Optional[str] = None,
certificate_store: Optional[str] = None,
**kwargs
):
super(VaultCertificate, self).__init__(**kwargs)
self.certificate_url = certificate_url
self.certificate_store = certificate_store
class VaultSecretGroup(msrest.serialization.Model):
"""Describes a set of certificates which are all in the same Key Vault.
:param source_vault: The relative URL of the Key Vault containing all of the certificates in
VaultCertificates.
:type source_vault: ~azure.mgmt.compute.v2019_03_01.models.SubResource
:param vault_certificates: The list of key vault references in SourceVault which contain
certificates.
:type vault_certificates: list[~azure.mgmt.compute.v2019_03_01.models.VaultCertificate]
"""
_attribute_map = {
'source_vault': {'key': 'sourceVault', 'type': 'SubResource'},
'vault_certificates': {'key': 'vaultCertificates', 'type': '[VaultCertificate]'},
}
def __init__(
self,
*,
source_vault: Optional["SubResource"] = None,
vault_certificates: Optional[List["VaultCertificate"]] = None,
**kwargs
):
super(VaultSecretGroup, self).__init__(**kwargs)
self.source_vault = source_vault
self.vault_certificates = vault_certificates
class VirtualHardDisk(msrest.serialization.Model):
"""Describes the uri of a disk.
:param uri: Specifies the virtual hard disk's uri.
:type uri: str
"""
_attribute_map = {
'uri': {'key': 'uri', 'type': 'str'},
}
def __init__(
self,
*,
uri: Optional[str] = None,
**kwargs
):
super(VirtualHardDisk, self).__init__(**kwargs)
self.uri = uri
class VirtualMachine(Resource):
"""Describes a Virtual Machine.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param plan: Specifies information about the marketplace image used to create the virtual
machine. This element is only used for marketplace images. Before you can use a marketplace
image from an API, you must enable the image for programmatic use. In the Azure portal, find
the marketplace image that you want to use and then click **Want to deploy programmatically,
Get Started ->**. Enter any required information and then click **Save**.
:type plan: ~azure.mgmt.compute.v2019_03_01.models.Plan
:ivar resources: The virtual machine child extension resources.
:vartype resources: list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineExtension]
:param identity: The identity of the virtual machine, if configured.
:type identity: ~azure.mgmt.compute.v2019_03_01.models.VirtualMachineIdentity
:param zones: The virtual machine zones.
:type zones: list[str]
:param hardware_profile: Specifies the hardware settings for the virtual machine.
:type hardware_profile: ~azure.mgmt.compute.v2019_03_01.models.HardwareProfile
:param storage_profile: Specifies the storage settings for the virtual machine disks.
:type storage_profile: ~azure.mgmt.compute.v2019_03_01.models.StorageProfile
:param additional_capabilities: Specifies additional capabilities enabled or disabled on the
virtual machine.
:type additional_capabilities: ~azure.mgmt.compute.v2019_03_01.models.AdditionalCapabilities
:param os_profile: Specifies the operating system settings for the virtual machine.
:type os_profile: ~azure.mgmt.compute.v2019_03_01.models.OSProfile
:param network_profile: Specifies the network interfaces of the virtual machine.
:type network_profile: ~azure.mgmt.compute.v2019_03_01.models.NetworkProfile
:param diagnostics_profile: Specifies the boot diagnostic settings state.
:code:`<br>`:code:`<br>`Minimum api-version: 2015-06-15.
:type diagnostics_profile: ~azure.mgmt.compute.v2019_03_01.models.DiagnosticsProfile
:param availability_set: Specifies information about the availability set that the virtual
machine should be assigned to. Virtual machines specified in the same availability set are
allocated to different nodes to maximize availability. For more information about availability
sets, see `Manage the availability of virtual machines
<https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-
availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json>`_. :code:`<br>`:code:`<br>`
For more information on Azure planned maintenance, see `Planned maintenance for virtual
machines in Azure <https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-
planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json>`_
:code:`<br>`:code:`<br>` Currently, a VM can only be added to availability set at creation
time. An existing VM cannot be added to an availability set. :code:`<br>`:code:`<br>`This
property cannot exist along with a non-null properties.virtualMachineScaleSet reference.
:type availability_set: ~azure.mgmt.compute.v2019_03_01.models.SubResource
:param virtual_machine_scale_set: Specifies information about the virtual machine scale set
that the virtual machine should be assigned to. Virtual machines specified in the same virtual
machine scale set are allocated to different nodes to maximize availability. Currently, a VM
can only be added to virtual machine scale set at creation time. An existing VM cannot be added
to a virtual machine scale set. :code:`<br>`:code:`<br>`This property cannot exist along with a
non-null properties.availabilitySet reference. :code:`<br>`:code:`<br>`Minimum api‐version:
2019‐03‐01.
:type virtual_machine_scale_set: ~azure.mgmt.compute.v2019_03_01.models.SubResource
:param proximity_placement_group: Specifies information about the proximity placement group
that the virtual machine should be assigned to. :code:`<br>`:code:`<br>`Minimum api-version:
2018-04-01.
:type proximity_placement_group: ~azure.mgmt.compute.v2019_03_01.models.SubResource
:param priority: Specifies the priority for the virtual machine.
:code:`<br>`:code:`<br>`Minimum api-version: 2019-03-01. Possible values include: "Regular",
"Low", "Spot".
:type priority: str or ~azure.mgmt.compute.v2019_03_01.models.VirtualMachinePriorityTypes
:param eviction_policy: Specifies the eviction policy for the Azure Spot virtual machine. Only
supported value is 'Deallocate'. :code:`<br>`:code:`<br>`Minimum api-version: 2019-03-01.
Possible values include: "Deallocate", "Delete".
:type eviction_policy: str or
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineEvictionPolicyTypes
:param billing_profile: Specifies the billing related details of a Azure Spot virtual machine.
:code:`<br>`:code:`<br>`Minimum api-version: 2019-03-01.
:type billing_profile: ~azure.mgmt.compute.v2019_03_01.models.BillingProfile
:param host: Specifies information about the dedicated host that the virtual machine resides
in. :code:`<br>`:code:`<br>`Minimum api-version: 2018-10-01.
:type host: ~azure.mgmt.compute.v2019_03_01.models.SubResource
:ivar provisioning_state: The provisioning state, which only appears in the response.
:vartype provisioning_state: str
:ivar instance_view: The virtual machine instance view.
:vartype instance_view: ~azure.mgmt.compute.v2019_03_01.models.VirtualMachineInstanceView
:param license_type: Specifies that the image or disk that is being used was licensed on-
premises. This element is only used for images that contain the Windows Server operating
system. :code:`<br>`:code:`<br>` Possible values are: :code:`<br>`:code:`<br>` Windows_Client
:code:`<br>`:code:`<br>` Windows_Server :code:`<br>`:code:`<br>` If this element is included in
a request for an update, the value must match the initial value. This value cannot be updated.
:code:`<br>`:code:`<br>` For more information, see `Azure Hybrid Use Benefit for Windows Server
<https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-
licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json>`_ :code:`<br>`:code:`<br>`
Minimum api-version: 2015-06-15.
:type license_type: str
:ivar vm_id: Specifies the VM unique ID which is a 128-bits identifier that is encoded and
stored in all Azure IaaS VMs SMBIOS and can be read using platform BIOS commands.
:vartype vm_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'resources': {'readonly': True},
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
'vm_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'plan': {'key': 'plan', 'type': 'Plan'},
'resources': {'key': 'resources', 'type': '[VirtualMachineExtension]'},
'identity': {'key': 'identity', 'type': 'VirtualMachineIdentity'},
'zones': {'key': 'zones', 'type': '[str]'},
'hardware_profile': {'key': 'properties.hardwareProfile', 'type': 'HardwareProfile'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'StorageProfile'},
'additional_capabilities': {'key': 'properties.additionalCapabilities', 'type': 'AdditionalCapabilities'},
'os_profile': {'key': 'properties.osProfile', 'type': 'OSProfile'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'NetworkProfile'},
'diagnostics_profile': {'key': 'properties.diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'availability_set': {'key': 'properties.availabilitySet', 'type': 'SubResource'},
'virtual_machine_scale_set': {'key': 'properties.virtualMachineScaleSet', 'type': 'SubResource'},
'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'SubResource'},
'priority': {'key': 'properties.priority', 'type': 'str'},
'eviction_policy': {'key': 'properties.evictionPolicy', 'type': 'str'},
'billing_profile': {'key': 'properties.billingProfile', 'type': 'BillingProfile'},
'host': {'key': 'properties.host', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineInstanceView'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'vm_id': {'key': 'properties.vmId', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
plan: Optional["Plan"] = None,
identity: Optional["VirtualMachineIdentity"] = None,
zones: Optional[List[str]] = None,
hardware_profile: Optional["HardwareProfile"] = None,
storage_profile: Optional["StorageProfile"] = None,
additional_capabilities: Optional["AdditionalCapabilities"] = None,
os_profile: Optional["OSProfile"] = None,
network_profile: Optional["NetworkProfile"] = None,
diagnostics_profile: Optional["DiagnosticsProfile"] = None,
availability_set: Optional["SubResource"] = None,
virtual_machine_scale_set: Optional["SubResource"] = None,
proximity_placement_group: Optional["SubResource"] = None,
priority: Optional[Union[str, "VirtualMachinePriorityTypes"]] = None,
eviction_policy: Optional[Union[str, "VirtualMachineEvictionPolicyTypes"]] = None,
billing_profile: Optional["BillingProfile"] = None,
host: Optional["SubResource"] = None,
license_type: Optional[str] = None,
**kwargs
):
super(VirtualMachine, self).__init__(location=location, tags=tags, **kwargs)
self.plan = plan
self.resources = None
self.identity = identity
self.zones = zones
self.hardware_profile = hardware_profile
self.storage_profile = storage_profile
self.additional_capabilities = additional_capabilities
self.os_profile = os_profile
self.network_profile = network_profile
self.diagnostics_profile = diagnostics_profile
self.availability_set = availability_set
self.virtual_machine_scale_set = virtual_machine_scale_set
self.proximity_placement_group = proximity_placement_group
self.priority = priority
self.eviction_policy = eviction_policy
self.billing_profile = billing_profile
self.host = host
self.provisioning_state = None
self.instance_view = None
self.license_type = license_type
self.vm_id = None
class VirtualMachineAgentInstanceView(msrest.serialization.Model):
"""The instance view of the VM Agent running on the virtual machine.
:param vm_agent_version: The VM Agent full version.
:type vm_agent_version: str
:param extension_handlers: The virtual machine extension handler instance view.
:type extension_handlers:
list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineExtensionHandlerInstanceView]
:param statuses: The resource status information.
:type statuses: list[~azure.mgmt.compute.v2019_03_01.models.InstanceViewStatus]
"""
_attribute_map = {
'vm_agent_version': {'key': 'vmAgentVersion', 'type': 'str'},
'extension_handlers': {'key': 'extensionHandlers', 'type': '[VirtualMachineExtensionHandlerInstanceView]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
vm_agent_version: Optional[str] = None,
extension_handlers: Optional[List["VirtualMachineExtensionHandlerInstanceView"]] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
super(VirtualMachineAgentInstanceView, self).__init__(**kwargs)
self.vm_agent_version = vm_agent_version
self.extension_handlers = extension_handlers
self.statuses = statuses
class VirtualMachineCaptureParameters(msrest.serialization.Model):
"""Capture Virtual Machine parameters.
All required parameters must be populated in order to send to Azure.
:param vhd_prefix: Required. The captured virtual hard disk's name prefix.
:type vhd_prefix: str
:param destination_container_name: Required. The destination container name.
:type destination_container_name: str
:param overwrite_vhds: Required. Specifies whether to overwrite the destination virtual hard
disk, in case of conflict.
:type overwrite_vhds: bool
"""
_validation = {
'vhd_prefix': {'required': True},
'destination_container_name': {'required': True},
'overwrite_vhds': {'required': True},
}
_attribute_map = {
'vhd_prefix': {'key': 'vhdPrefix', 'type': 'str'},
'destination_container_name': {'key': 'destinationContainerName', 'type': 'str'},
'overwrite_vhds': {'key': 'overwriteVhds', 'type': 'bool'},
}
def __init__(
self,
*,
vhd_prefix: str,
destination_container_name: str,
overwrite_vhds: bool,
**kwargs
):
super(VirtualMachineCaptureParameters, self).__init__(**kwargs)
self.vhd_prefix = vhd_prefix
self.destination_container_name = destination_container_name
self.overwrite_vhds = overwrite_vhds
class VirtualMachineCaptureResult(SubResource):
"""Output of virtual machine capture operation.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource Id.
:type id: str
:ivar schema: the schema of the captured virtual machine.
:vartype schema: str
:ivar content_version: the version of the content.
:vartype content_version: str
:ivar parameters: parameters of the captured virtual machine.
:vartype parameters: object
:ivar resources: a list of resource items of the captured virtual machine.
:vartype resources: list[object]
"""
_validation = {
'schema': {'readonly': True},
'content_version': {'readonly': True},
'parameters': {'readonly': True},
'resources': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'schema': {'key': '$schema', 'type': 'str'},
'content_version': {'key': 'contentVersion', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'object'},
'resources': {'key': 'resources', 'type': '[object]'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(VirtualMachineCaptureResult, self).__init__(id=id, **kwargs)
self.schema = None
self.content_version = None
self.parameters = None
self.resources = None
class VirtualMachineExtension(Resource):
"""Describes a Virtual Machine Extension.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param force_update_tag: How the extension handler should be forced to update even if the
extension configuration has not changed.
:type force_update_tag: str
:param publisher: The name of the extension handler publisher.
:type publisher: str
:param type_properties_type: Specifies the type of the extension; an example is
"CustomScriptExtension".
:type type_properties_type: str
:param type_handler_version: Specifies the version of the script handler.
:type type_handler_version: str
:param auto_upgrade_minor_version: Indicates whether the extension should use a newer minor
version if one is available at deployment time. Once deployed, however, the extension will not
upgrade minor versions unless redeployed, even with this property set to true.
:type auto_upgrade_minor_version: bool
:param settings: Json formatted public settings for the extension.
:type settings: object
:param protected_settings: The extension can contain either protectedSettings or
protectedSettingsFromKeyVault or no protected settings at all.
:type protected_settings: object
:ivar provisioning_state: The provisioning state, which only appears in the response.
:vartype provisioning_state: str
:param instance_view: The virtual machine extension instance view.
:type instance_view: ~azure.mgmt.compute.v2019_03_01.models.VirtualMachineExtensionInstanceView
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
'publisher': {'key': 'properties.publisher', 'type': 'str'},
'type_properties_type': {'key': 'properties.type', 'type': 'str'},
'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'},
'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': 'object'},
'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineExtensionInstanceView'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
force_update_tag: Optional[str] = None,
publisher: Optional[str] = None,
type_properties_type: Optional[str] = None,
type_handler_version: Optional[str] = None,
auto_upgrade_minor_version: Optional[bool] = None,
settings: Optional[object] = None,
protected_settings: Optional[object] = None,
instance_view: Optional["VirtualMachineExtensionInstanceView"] = None,
**kwargs
):
super(VirtualMachineExtension, self).__init__(location=location, tags=tags, **kwargs)
self.force_update_tag = force_update_tag
self.publisher = publisher
self.type_properties_type = type_properties_type
self.type_handler_version = type_handler_version
self.auto_upgrade_minor_version = auto_upgrade_minor_version
self.settings = settings
self.protected_settings = protected_settings
self.provisioning_state = None
self.instance_view = instance_view
class VirtualMachineExtensionHandlerInstanceView(msrest.serialization.Model):
"""The instance view of a virtual machine extension handler.
:param type: Specifies the type of the extension; an example is "CustomScriptExtension".
:type type: str
:param type_handler_version: Specifies the version of the script handler.
:type type_handler_version: str
:param status: The extension handler status.
:type status: ~azure.mgmt.compute.v2019_03_01.models.InstanceViewStatus
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'type_handler_version': {'key': 'typeHandlerVersion', 'type': 'str'},
'status': {'key': 'status', 'type': 'InstanceViewStatus'},
}
def __init__(
self,
*,
type: Optional[str] = None,
type_handler_version: Optional[str] = None,
status: Optional["InstanceViewStatus"] = None,
**kwargs
):
super(VirtualMachineExtensionHandlerInstanceView, self).__init__(**kwargs)
self.type = type
self.type_handler_version = type_handler_version
self.status = status
class VirtualMachineExtensionImage(Resource):
"""Describes a Virtual Machine Extension Image.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param operating_system: The operating system this extension supports.
:type operating_system: str
:param compute_role: The type of role (IaaS or PaaS) this extension supports.
:type compute_role: str
:param handler_schema: The schema defined by publisher, where extension consumers should
provide settings in a matching schema.
:type handler_schema: str
:param vm_scale_set_enabled: Whether the extension can be used on xRP VMScaleSets. By default
existing extensions are usable on scalesets, but there might be cases where a publisher wants
to explicitly indicate the extension is only enabled for CRP VMs but not VMSS.
:type vm_scale_set_enabled: bool
:param supports_multiple_extensions: Whether the handler can support multiple extensions.
:type supports_multiple_extensions: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'operating_system': {'key': 'properties.operatingSystem', 'type': 'str'},
'compute_role': {'key': 'properties.computeRole', 'type': 'str'},
'handler_schema': {'key': 'properties.handlerSchema', 'type': 'str'},
'vm_scale_set_enabled': {'key': 'properties.vmScaleSetEnabled', 'type': 'bool'},
'supports_multiple_extensions': {'key': 'properties.supportsMultipleExtensions', 'type': 'bool'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
operating_system: Optional[str] = None,
compute_role: Optional[str] = None,
handler_schema: Optional[str] = None,
vm_scale_set_enabled: Optional[bool] = None,
supports_multiple_extensions: Optional[bool] = None,
**kwargs
):
super(VirtualMachineExtensionImage, self).__init__(location=location, tags=tags, **kwargs)
self.operating_system = operating_system
self.compute_role = compute_role
self.handler_schema = handler_schema
self.vm_scale_set_enabled = vm_scale_set_enabled
self.supports_multiple_extensions = supports_multiple_extensions
class VirtualMachineExtensionInstanceView(msrest.serialization.Model):
"""The instance view of a virtual machine extension.
:param name: The virtual machine extension name.
:type name: str
:param type: Specifies the type of the extension; an example is "CustomScriptExtension".
:type type: str
:param type_handler_version: Specifies the version of the script handler.
:type type_handler_version: str
:param substatuses: The resource status information.
:type substatuses: list[~azure.mgmt.compute.v2019_03_01.models.InstanceViewStatus]
:param statuses: The resource status information.
:type statuses: list[~azure.mgmt.compute.v2019_03_01.models.InstanceViewStatus]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'type_handler_version': {'key': 'typeHandlerVersion', 'type': 'str'},
'substatuses': {'key': 'substatuses', 'type': '[InstanceViewStatus]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
name: Optional[str] = None,
type: Optional[str] = None,
type_handler_version: Optional[str] = None,
substatuses: Optional[List["InstanceViewStatus"]] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
super(VirtualMachineExtensionInstanceView, self).__init__(**kwargs)
self.name = name
self.type = type
self.type_handler_version = type_handler_version
self.substatuses = substatuses
self.statuses = statuses
class VirtualMachineExtensionsListResult(msrest.serialization.Model):
"""The List Extension operation response.
:param value: The list of extensions.
:type value: list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineExtension]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineExtension]'},
}
def __init__(
self,
*,
value: Optional[List["VirtualMachineExtension"]] = None,
**kwargs
):
super(VirtualMachineExtensionsListResult, self).__init__(**kwargs)
self.value = value
class VirtualMachineExtensionUpdate(UpdateResource):
"""Describes a Virtual Machine Extension.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param force_update_tag: How the extension handler should be forced to update even if the
extension configuration has not changed.
:type force_update_tag: str
:param publisher: The name of the extension handler publisher.
:type publisher: str
:param type: Specifies the type of the extension; an example is "CustomScriptExtension".
:type type: str
:param type_handler_version: Specifies the version of the script handler.
:type type_handler_version: str
:param auto_upgrade_minor_version: Indicates whether the extension should use a newer minor
version if one is available at deployment time. Once deployed, however, the extension will not
upgrade minor versions unless redeployed, even with this property set to true.
:type auto_upgrade_minor_version: bool
:param settings: Json formatted public settings for the extension.
:type settings: object
:param protected_settings: The extension can contain either protectedSettings or
protectedSettingsFromKeyVault or no protected settings at all.
:type protected_settings: object
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
'publisher': {'key': 'properties.publisher', 'type': 'str'},
'type': {'key': 'properties.type', 'type': 'str'},
'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'},
'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': 'object'},
'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
force_update_tag: Optional[str] = None,
publisher: Optional[str] = None,
type: Optional[str] = None,
type_handler_version: Optional[str] = None,
auto_upgrade_minor_version: Optional[bool] = None,
settings: Optional[object] = None,
protected_settings: Optional[object] = None,
**kwargs
):
super(VirtualMachineExtensionUpdate, self).__init__(tags=tags, **kwargs)
self.force_update_tag = force_update_tag
self.publisher = publisher
self.type = type
self.type_handler_version = type_handler_version
self.auto_upgrade_minor_version = auto_upgrade_minor_version
self.settings = settings
self.protected_settings = protected_settings
class VirtualMachineHealthStatus(msrest.serialization.Model):
"""The health status of the VM.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar status: The health status information for the VM.
:vartype status: ~azure.mgmt.compute.v2019_03_01.models.InstanceViewStatus
"""
_validation = {
'status': {'readonly': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'InstanceViewStatus'},
}
def __init__(
self,
**kwargs
):
super(VirtualMachineHealthStatus, self).__init__(**kwargs)
self.status = None
class VirtualMachineIdentity(msrest.serialization.Model):
"""Identity for the virtual machine.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of virtual machine identity. This property will only be
provided for a system assigned identity.
:vartype principal_id: str
:ivar tenant_id: The tenant id associated with the virtual machine. This property will only be
provided for a system assigned identity.
:vartype tenant_id: str
:param type: The type of identity used for the virtual machine. The type 'SystemAssigned,
UserAssigned' includes both an implicitly created identity and a set of user assigned
identities. The type 'None' will remove any identities from the virtual machine. Possible
values include: "SystemAssigned", "UserAssigned", "SystemAssigned, UserAssigned", "None".
:type type: str or ~azure.mgmt.compute.v2019_03_01.models.ResourceIdentityType
:param user_assigned_identities: The list of user identities associated with the Virtual
Machine. The user identity dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
:type user_assigned_identities: dict[str,
~azure.mgmt.compute.v2019_03_01.models.UserAssignedIdentitiesValue]
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedIdentitiesValue}'},
}
def __init__(
self,
*,
type: Optional[Union[str, "ResourceIdentityType"]] = None,
user_assigned_identities: Optional[Dict[str, "UserAssignedIdentitiesValue"]] = None,
**kwargs
):
super(VirtualMachineIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
self.user_assigned_identities = user_assigned_identities
class VirtualMachineImageResource(SubResource):
"""Virtual machine image resource information.
All required parameters must be populated in order to send to Azure.
:param id: Resource Id.
:type id: str
:param name: Required. The name of the resource.
:type name: str
:param location: Required. The supported Azure location of the resource.
:type location: str
:param tags: A set of tags. Specifies the tags that are assigned to the virtual machine. For
more information about using tags, see `Using tags to organize your Azure resources
<https://docs.microsoft.com/azure/azure-resource-manager/resource-group-using-tags.md>`_.
:type tags: dict[str, str]
"""
_validation = {
'name': {'required': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
name: str,
location: str,
id: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(VirtualMachineImageResource, self).__init__(id=id, **kwargs)
self.name = name
self.location = location
self.tags = tags
class VirtualMachineImage(VirtualMachineImageResource):
"""Describes a Virtual Machine Image.
All required parameters must be populated in order to send to Azure.
:param id: Resource Id.
:type id: str
:param name: Required. The name of the resource.
:type name: str
:param location: Required. The supported Azure location of the resource.
:type location: str
:param tags: A set of tags. Specifies the tags that are assigned to the virtual machine. For
more information about using tags, see `Using tags to organize your Azure resources
<https://docs.microsoft.com/azure/azure-resource-manager/resource-group-using-tags.md>`_.
:type tags: dict[str, str]
:param plan: Used for establishing the purchase context of any 3rd Party artifact through
MarketPlace.
:type plan: ~azure.mgmt.compute.v2019_03_01.models.PurchasePlan
:param os_disk_image: Contains the os disk image information.
:type os_disk_image: ~azure.mgmt.compute.v2019_03_01.models.OSDiskImage
:param data_disk_images:
:type data_disk_images: list[~azure.mgmt.compute.v2019_03_01.models.DataDiskImage]
:param automatic_os_upgrade_properties: Describes automatic OS upgrade properties on the image.
:type automatic_os_upgrade_properties:
~azure.mgmt.compute.v2019_03_01.models.AutomaticOSUpgradeProperties
:param hyper_v_generation: Specifies the HyperVGeneration Type. Possible values include: "V1",
"V2".
:type hyper_v_generation: str or ~azure.mgmt.compute.v2019_03_01.models.HyperVGenerationTypes
"""
_validation = {
'name': {'required': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'plan': {'key': 'properties.plan', 'type': 'PurchasePlan'},
'os_disk_image': {'key': 'properties.osDiskImage', 'type': 'OSDiskImage'},
'data_disk_images': {'key': 'properties.dataDiskImages', 'type': '[DataDiskImage]'},
'automatic_os_upgrade_properties': {'key': 'properties.automaticOSUpgradeProperties', 'type': 'AutomaticOSUpgradeProperties'},
'hyper_v_generation': {'key': 'properties.hyperVGeneration', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
location: str,
id: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
plan: Optional["PurchasePlan"] = None,
os_disk_image: Optional["OSDiskImage"] = None,
data_disk_images: Optional[List["DataDiskImage"]] = None,
automatic_os_upgrade_properties: Optional["AutomaticOSUpgradeProperties"] = None,
hyper_v_generation: Optional[Union[str, "HyperVGenerationTypes"]] = None,
**kwargs
):
super(VirtualMachineImage, self).__init__(id=id, name=name, location=location, tags=tags, **kwargs)
self.plan = plan
self.os_disk_image = os_disk_image
self.data_disk_images = data_disk_images
self.automatic_os_upgrade_properties = automatic_os_upgrade_properties
self.hyper_v_generation = hyper_v_generation
class VirtualMachineInstanceView(msrest.serialization.Model):
"""The instance view of a virtual machine.
:param platform_update_domain: Specifies the update domain of the virtual machine.
:type platform_update_domain: int
:param platform_fault_domain: Specifies the fault domain of the virtual machine.
:type platform_fault_domain: int
:param computer_name: The computer name assigned to the virtual machine.
:type computer_name: str
:param os_name: The Operating System running on the virtual machine.
:type os_name: str
:param os_version: The version of Operating System running on the virtual machine.
:type os_version: str
:param hyper_v_generation: Specifies the HyperVGeneration Type associated with a resource.
Possible values include: "V1", "V2".
:type hyper_v_generation: str or ~azure.mgmt.compute.v2019_03_01.models.HyperVGenerationType
:param rdp_thumb_print: The Remote desktop certificate thumbprint.
:type rdp_thumb_print: str
:param vm_agent: The VM Agent running on the virtual machine.
:type vm_agent: ~azure.mgmt.compute.v2019_03_01.models.VirtualMachineAgentInstanceView
:param maintenance_redeploy_status: The Maintenance Operation status on the virtual machine.
:type maintenance_redeploy_status:
~azure.mgmt.compute.v2019_03_01.models.MaintenanceRedeployStatus
:param disks: The virtual machine disk information.
:type disks: list[~azure.mgmt.compute.v2019_03_01.models.DiskInstanceView]
:param extensions: The extensions information.
:type extensions:
list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineExtensionInstanceView]
:param boot_diagnostics: Boot Diagnostics is a debugging feature which allows you to view
Console Output and Screenshot to diagnose VM status. :code:`<br>`:code:`<br>` You can easily
view the output of your console log. :code:`<br>`:code:`<br>` Azure also enables you to see a
screenshot of the VM from the hypervisor.
:type boot_diagnostics: ~azure.mgmt.compute.v2019_03_01.models.BootDiagnosticsInstanceView
:param statuses: The resource status information.
:type statuses: list[~azure.mgmt.compute.v2019_03_01.models.InstanceViewStatus]
"""
_attribute_map = {
'platform_update_domain': {'key': 'platformUpdateDomain', 'type': 'int'},
'platform_fault_domain': {'key': 'platformFaultDomain', 'type': 'int'},
'computer_name': {'key': 'computerName', 'type': 'str'},
'os_name': {'key': 'osName', 'type': 'str'},
'os_version': {'key': 'osVersion', 'type': 'str'},
'hyper_v_generation': {'key': 'hyperVGeneration', 'type': 'str'},
'rdp_thumb_print': {'key': 'rdpThumbPrint', 'type': 'str'},
'vm_agent': {'key': 'vmAgent', 'type': 'VirtualMachineAgentInstanceView'},
'maintenance_redeploy_status': {'key': 'maintenanceRedeployStatus', 'type': 'MaintenanceRedeployStatus'},
'disks': {'key': 'disks', 'type': '[DiskInstanceView]'},
'extensions': {'key': 'extensions', 'type': '[VirtualMachineExtensionInstanceView]'},
'boot_diagnostics': {'key': 'bootDiagnostics', 'type': 'BootDiagnosticsInstanceView'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
platform_update_domain: Optional[int] = None,
platform_fault_domain: Optional[int] = None,
computer_name: Optional[str] = None,
os_name: Optional[str] = None,
os_version: Optional[str] = None,
hyper_v_generation: Optional[Union[str, "HyperVGenerationType"]] = None,
rdp_thumb_print: Optional[str] = None,
vm_agent: Optional["VirtualMachineAgentInstanceView"] = None,
maintenance_redeploy_status: Optional["MaintenanceRedeployStatus"] = None,
disks: Optional[List["DiskInstanceView"]] = None,
extensions: Optional[List["VirtualMachineExtensionInstanceView"]] = None,
boot_diagnostics: Optional["BootDiagnosticsInstanceView"] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
super(VirtualMachineInstanceView, self).__init__(**kwargs)
self.platform_update_domain = platform_update_domain
self.platform_fault_domain = platform_fault_domain
self.computer_name = computer_name
self.os_name = os_name
self.os_version = os_version
self.hyper_v_generation = hyper_v_generation
self.rdp_thumb_print = rdp_thumb_print
self.vm_agent = vm_agent
self.maintenance_redeploy_status = maintenance_redeploy_status
self.disks = disks
self.extensions = extensions
self.boot_diagnostics = boot_diagnostics
self.statuses = statuses
class VirtualMachineListResult(msrest.serialization.Model):
"""The List Virtual Machine operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. The list of virtual machines.
:type value: list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachine]
:param next_link: The URI to fetch the next page of VMs. Call ListNext() with this URI to fetch
the next page of Virtual Machines.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachine]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachine"],
next_link: Optional[str] = None,
**kwargs
):
super(VirtualMachineListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineReimageParameters(msrest.serialization.Model):
"""Parameters for Reimaging Virtual Machine. NOTE: Virtual Machine OS disk will always be reimaged.
:param temp_disk: Specifies whether to reimage temp disk. Default value: false. Note: This temp
disk reimage parameter is only supported for VM/VMSS with Ephemeral OS disk.
:type temp_disk: bool
"""
_attribute_map = {
'temp_disk': {'key': 'tempDisk', 'type': 'bool'},
}
def __init__(
self,
*,
temp_disk: Optional[bool] = None,
**kwargs
):
super(VirtualMachineReimageParameters, self).__init__(**kwargs)
self.temp_disk = temp_disk
class VirtualMachineScaleSet(Resource):
"""Describes a Virtual Machine Scale Set.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: The virtual machine scale set sku.
:type sku: ~azure.mgmt.compute.v2019_03_01.models.Sku
:param plan: Specifies information about the marketplace image used to create the virtual
machine. This element is only used for marketplace images. Before you can use a marketplace
image from an API, you must enable the image for programmatic use. In the Azure portal, find
the marketplace image that you want to use and then click **Want to deploy programmatically,
Get Started ->**. Enter any required information and then click **Save**.
:type plan: ~azure.mgmt.compute.v2019_03_01.models.Plan
:param identity: The identity of the virtual machine scale set, if configured.
:type identity: ~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetIdentity
:param zones: The virtual machine scale set zones. NOTE: Availability zones can only be set
when you create the scale set.
:type zones: list[str]
:param upgrade_policy: The upgrade policy.
:type upgrade_policy: ~azure.mgmt.compute.v2019_03_01.models.UpgradePolicy
:param automatic_repairs_policy: Policy for automatic repairs.
:type automatic_repairs_policy: ~azure.mgmt.compute.v2019_03_01.models.AutomaticRepairsPolicy
:param virtual_machine_profile: The virtual machine profile.
:type virtual_machine_profile:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetVMProfile
:ivar provisioning_state: The provisioning state, which only appears in the response.
:vartype provisioning_state: str
:param overprovision: Specifies whether the Virtual Machine Scale Set should be
overprovisioned.
:type overprovision: bool
:param do_not_run_extensions_on_overprovisioned_v_ms: When Overprovision is enabled, extensions
are launched only on the requested number of VMs which are finally kept. This property will
hence ensure that the extensions do not run on the extra overprovisioned VMs.
:type do_not_run_extensions_on_overprovisioned_v_ms: bool
:ivar unique_id: Specifies the ID which uniquely identifies a Virtual Machine Scale Set.
:vartype unique_id: str
:param single_placement_group: When true this limits the scale set to a single placement group,
of max size 100 virtual machines.
:type single_placement_group: bool
:param zone_balance: Whether to force strictly even Virtual Machine distribution cross x-zones
in case there is zone outage.
:type zone_balance: bool
:param platform_fault_domain_count: Fault Domain count for each placement group.
:type platform_fault_domain_count: int
:param proximity_placement_group: Specifies information about the proximity placement group
that the virtual machine scale set should be assigned to. :code:`<br>`:code:`<br>`Minimum api-
version: 2018-04-01.
:type proximity_placement_group: ~azure.mgmt.compute.v2019_03_01.models.SubResource
:param additional_capabilities: Specifies additional capabilities enabled or disabled on the
Virtual Machines in the Virtual Machine Scale Set. For instance: whether the Virtual Machines
have the capability to support attaching managed data disks with UltraSSD_LRS storage account
type.
:type additional_capabilities: ~azure.mgmt.compute.v2019_03_01.models.AdditionalCapabilities
:param scale_in_policy: Specifies the scale-in policy that decides which virtual machines are
chosen for removal when a Virtual Machine Scale Set is scaled-in.
:type scale_in_policy: ~azure.mgmt.compute.v2019_03_01.models.ScaleInPolicy
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'unique_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'plan': {'key': 'plan', 'type': 'Plan'},
'identity': {'key': 'identity', 'type': 'VirtualMachineScaleSetIdentity'},
'zones': {'key': 'zones', 'type': '[str]'},
'upgrade_policy': {'key': 'properties.upgradePolicy', 'type': 'UpgradePolicy'},
'automatic_repairs_policy': {'key': 'properties.automaticRepairsPolicy', 'type': 'AutomaticRepairsPolicy'},
'virtual_machine_profile': {'key': 'properties.virtualMachineProfile', 'type': 'VirtualMachineScaleSetVMProfile'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'overprovision': {'key': 'properties.overprovision', 'type': 'bool'},
'do_not_run_extensions_on_overprovisioned_v_ms': {'key': 'properties.doNotRunExtensionsOnOverprovisionedVMs', 'type': 'bool'},
'unique_id': {'key': 'properties.uniqueId', 'type': 'str'},
'single_placement_group': {'key': 'properties.singlePlacementGroup', 'type': 'bool'},
'zone_balance': {'key': 'properties.zoneBalance', 'type': 'bool'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'SubResource'},
'additional_capabilities': {'key': 'properties.additionalCapabilities', 'type': 'AdditionalCapabilities'},
'scale_in_policy': {'key': 'properties.scaleInPolicy', 'type': 'ScaleInPolicy'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
plan: Optional["Plan"] = None,
identity: Optional["VirtualMachineScaleSetIdentity"] = None,
zones: Optional[List[str]] = None,
upgrade_policy: Optional["UpgradePolicy"] = None,
automatic_repairs_policy: Optional["AutomaticRepairsPolicy"] = None,
virtual_machine_profile: Optional["VirtualMachineScaleSetVMProfile"] = None,
overprovision: Optional[bool] = None,
do_not_run_extensions_on_overprovisioned_v_ms: Optional[bool] = None,
single_placement_group: Optional[bool] = None,
zone_balance: Optional[bool] = None,
platform_fault_domain_count: Optional[int] = None,
proximity_placement_group: Optional["SubResource"] = None,
additional_capabilities: Optional["AdditionalCapabilities"] = None,
scale_in_policy: Optional["ScaleInPolicy"] = None,
**kwargs
):
super(VirtualMachineScaleSet, self).__init__(location=location, tags=tags, **kwargs)
self.sku = sku
self.plan = plan
self.identity = identity
self.zones = zones
self.upgrade_policy = upgrade_policy
self.automatic_repairs_policy = automatic_repairs_policy
self.virtual_machine_profile = virtual_machine_profile
self.provisioning_state = None
self.overprovision = overprovision
self.do_not_run_extensions_on_overprovisioned_v_ms = do_not_run_extensions_on_overprovisioned_v_ms
self.unique_id = None
self.single_placement_group = single_placement_group
self.zone_balance = zone_balance
self.platform_fault_domain_count = platform_fault_domain_count
self.proximity_placement_group = proximity_placement_group
self.additional_capabilities = additional_capabilities
self.scale_in_policy = scale_in_policy
class VirtualMachineScaleSetDataDisk(msrest.serialization.Model):
"""Describes a virtual machine scale set data disk.
All required parameters must be populated in order to send to Azure.
:param name: The disk name.
:type name: str
:param lun: Required. Specifies the logical unit number of the data disk. This value is used to
identify data disks within the VM and therefore must be unique for each data disk attached to a
VM.
:type lun: int
:param caching: Specifies the caching requirements. :code:`<br>`:code:`<br>` Possible values
are: :code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **ReadOnly**
:code:`<br>`:code:`<br>` **ReadWrite** :code:`<br>`:code:`<br>` Default: **None for Standard
storage. ReadOnly for Premium storage**. Possible values include: "None", "ReadOnly",
"ReadWrite".
:type caching: str or ~azure.mgmt.compute.v2019_03_01.models.CachingTypes
:param write_accelerator_enabled: Specifies whether writeAccelerator should be enabled or
disabled on the disk.
:type write_accelerator_enabled: bool
:param create_option: Required. The create option. Possible values include: "FromImage",
"Empty", "Attach".
:type create_option: str or ~azure.mgmt.compute.v2019_03_01.models.DiskCreateOptionTypes
:param disk_size_gb: Specifies the size of an empty data disk in gigabytes. This element can be
used to overwrite the size of the disk in a virtual machine image. :code:`<br>`:code:`<br>`
This value cannot be larger than 1023 GB.
:type disk_size_gb: int
:param managed_disk: The managed disk parameters.
:type managed_disk:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetManagedDiskParameters
"""
_validation = {
'lun': {'required': True},
'create_option': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'lun': {'key': 'lun', 'type': 'int'},
'caching': {'key': 'caching', 'type': 'str'},
'write_accelerator_enabled': {'key': 'writeAcceleratorEnabled', 'type': 'bool'},
'create_option': {'key': 'createOption', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'managed_disk': {'key': 'managedDisk', 'type': 'VirtualMachineScaleSetManagedDiskParameters'},
}
def __init__(
self,
*,
lun: int,
create_option: Union[str, "DiskCreateOptionTypes"],
name: Optional[str] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
write_accelerator_enabled: Optional[bool] = None,
disk_size_gb: Optional[int] = None,
managed_disk: Optional["VirtualMachineScaleSetManagedDiskParameters"] = None,
**kwargs
):
super(VirtualMachineScaleSetDataDisk, self).__init__(**kwargs)
self.name = name
self.lun = lun
self.caching = caching
self.write_accelerator_enabled = write_accelerator_enabled
self.create_option = create_option
self.disk_size_gb = disk_size_gb
self.managed_disk = managed_disk
class VirtualMachineScaleSetExtension(SubResourceReadOnly):
"""Describes a Virtual Machine Scale Set Extension.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:param name: The name of the extension.
:type name: str
:param force_update_tag: If a value is provided and is different from the previous value, the
extension handler will be forced to update even if the extension configuration has not changed.
:type force_update_tag: str
:param publisher: The name of the extension handler publisher.
:type publisher: str
:param type: Specifies the type of the extension; an example is "CustomScriptExtension".
:type type: str
:param type_handler_version: Specifies the version of the script handler.
:type type_handler_version: str
:param auto_upgrade_minor_version: Indicates whether the extension should use a newer minor
version if one is available at deployment time. Once deployed, however, the extension will not
upgrade minor versions unless redeployed, even with this property set to true.
:type auto_upgrade_minor_version: bool
:param settings: Json formatted public settings for the extension.
:type settings: object
:param protected_settings: The extension can contain either protectedSettings or
protectedSettingsFromKeyVault or no protected settings at all.
:type protected_settings: object
:ivar provisioning_state: The provisioning state, which only appears in the response.
:vartype provisioning_state: str
:param provision_after_extensions: Collection of extension names after which this extension
needs to be provisioned.
:type provision_after_extensions: list[str]
"""
_validation = {
'id': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
'publisher': {'key': 'properties.publisher', 'type': 'str'},
'type': {'key': 'properties.type', 'type': 'str'},
'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'},
'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': 'object'},
'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'provision_after_extensions': {'key': 'properties.provisionAfterExtensions', 'type': '[str]'},
}
def __init__(
self,
*,
name: Optional[str] = None,
force_update_tag: Optional[str] = None,
publisher: Optional[str] = None,
type: Optional[str] = None,
type_handler_version: Optional[str] = None,
auto_upgrade_minor_version: Optional[bool] = None,
settings: Optional[object] = None,
protected_settings: Optional[object] = None,
provision_after_extensions: Optional[List[str]] = None,
**kwargs
):
super(VirtualMachineScaleSetExtension, self).__init__(**kwargs)
self.name = name
self.force_update_tag = force_update_tag
self.publisher = publisher
self.type = type
self.type_handler_version = type_handler_version
self.auto_upgrade_minor_version = auto_upgrade_minor_version
self.settings = settings
self.protected_settings = protected_settings
self.provisioning_state = None
self.provision_after_extensions = provision_after_extensions
class VirtualMachineScaleSetExtensionListResult(msrest.serialization.Model):
"""The List VM scale set extension operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. The list of VM scale set extensions.
:type value: list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetExtension]
:param next_link: The uri to fetch the next page of VM scale set extensions. Call ListNext()
with this to fetch the next page of VM scale set extensions.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineScaleSetExtension]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachineScaleSetExtension"],
next_link: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetExtensionListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineScaleSetExtensionProfile(msrest.serialization.Model):
"""Describes a virtual machine scale set extension profile.
:param extensions: The virtual machine scale set child extension resources.
:type extensions: list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetExtension]
"""
_attribute_map = {
'extensions': {'key': 'extensions', 'type': '[VirtualMachineScaleSetExtension]'},
}
def __init__(
self,
*,
extensions: Optional[List["VirtualMachineScaleSetExtension"]] = None,
**kwargs
):
super(VirtualMachineScaleSetExtensionProfile, self).__init__(**kwargs)
self.extensions = extensions
class VirtualMachineScaleSetIdentity(msrest.serialization.Model):
"""Identity for the virtual machine scale set.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of virtual machine scale set identity. This property will
only be provided for a system assigned identity.
:vartype principal_id: str
:ivar tenant_id: The tenant id associated with the virtual machine scale set. This property
will only be provided for a system assigned identity.
:vartype tenant_id: str
:param type: The type of identity used for the virtual machine scale set. The type
'SystemAssigned, UserAssigned' includes both an implicitly created identity and a set of user
assigned identities. The type 'None' will remove any identities from the virtual machine scale
set. Possible values include: "SystemAssigned", "UserAssigned", "SystemAssigned, UserAssigned",
"None".
:type type: str or ~azure.mgmt.compute.v2019_03_01.models.ResourceIdentityType
:param user_assigned_identities: The list of user identities associated with the virtual
machine scale set. The user identity dictionary key references will be ARM resource ids in the
form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
:type user_assigned_identities: dict[str,
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue]
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue}'},
}
def __init__(
self,
*,
type: Optional[Union[str, "ResourceIdentityType"]] = None,
user_assigned_identities: Optional[Dict[str, "VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue"]] = None,
**kwargs
):
super(VirtualMachineScaleSetIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
self.user_assigned_identities = user_assigned_identities
class VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue(msrest.serialization.Model):
"""VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of user assigned identity.
:vartype principal_id: str
:ivar client_id: The client id of user assigned identity.
:vartype client_id: str
"""
_validation = {
'principal_id': {'readonly': True},
'client_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue, self).__init__(**kwargs)
self.principal_id = None
self.client_id = None
class VirtualMachineScaleSetInstanceView(msrest.serialization.Model):
"""The instance view of a virtual machine scale set.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar virtual_machine: The instance view status summary for the virtual machine scale set.
:vartype virtual_machine:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetInstanceViewStatusesSummary
:ivar extensions: The extensions information.
:vartype extensions:
list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetVMExtensionsSummary]
:param statuses: The resource status information.
:type statuses: list[~azure.mgmt.compute.v2019_03_01.models.InstanceViewStatus]
"""
_validation = {
'virtual_machine': {'readonly': True},
'extensions': {'readonly': True},
}
_attribute_map = {
'virtual_machine': {'key': 'virtualMachine', 'type': 'VirtualMachineScaleSetInstanceViewStatusesSummary'},
'extensions': {'key': 'extensions', 'type': '[VirtualMachineScaleSetVMExtensionsSummary]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
super(VirtualMachineScaleSetInstanceView, self).__init__(**kwargs)
self.virtual_machine = None
self.extensions = None
self.statuses = statuses
class VirtualMachineScaleSetInstanceViewStatusesSummary(msrest.serialization.Model):
"""Instance view statuses summary for virtual machines of a virtual machine scale set.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar statuses_summary: The extensions information.
:vartype statuses_summary:
list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineStatusCodeCount]
"""
_validation = {
'statuses_summary': {'readonly': True},
}
_attribute_map = {
'statuses_summary': {'key': 'statusesSummary', 'type': '[VirtualMachineStatusCodeCount]'},
}
def __init__(
self,
**kwargs
):
super(VirtualMachineScaleSetInstanceViewStatusesSummary, self).__init__(**kwargs)
self.statuses_summary = None
class VirtualMachineScaleSetIPConfiguration(SubResource):
"""Describes a virtual machine scale set network profile's IP configuration.
All required parameters must be populated in order to send to Azure.
:param id: Resource Id.
:type id: str
:param name: Required. The IP configuration name.
:type name: str
:param subnet: Specifies the identifier of the subnet.
:type subnet: ~azure.mgmt.compute.v2019_03_01.models.ApiEntityReference
:param primary: Specifies the primary network interface in case the virtual machine has more
than 1 network interface.
:type primary: bool
:param public_ip_address_configuration: The publicIPAddressConfiguration.
:type public_ip_address_configuration:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetPublicIPAddressConfiguration
:param private_ip_address_version: Available from Api-Version 2017-03-30 onwards, it represents
whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible
values are: 'IPv4' and 'IPv6'. Possible values include: "IPv4", "IPv6".
:type private_ip_address_version: str or ~azure.mgmt.compute.v2019_03_01.models.IPVersion
:param application_gateway_backend_address_pools: Specifies an array of references to backend
address pools of application gateways. A scale set can reference backend address pools of
multiple application gateways. Multiple scale sets cannot use the same application gateway.
:type application_gateway_backend_address_pools:
list[~azure.mgmt.compute.v2019_03_01.models.SubResource]
:param application_security_groups: Specifies an array of references to application security
group.
:type application_security_groups: list[~azure.mgmt.compute.v2019_03_01.models.SubResource]
:param load_balancer_backend_address_pools: Specifies an array of references to backend address
pools of load balancers. A scale set can reference backend address pools of one public and one
internal load balancer. Multiple scale sets cannot use the same load balancer.
:type load_balancer_backend_address_pools:
list[~azure.mgmt.compute.v2019_03_01.models.SubResource]
:param load_balancer_inbound_nat_pools: Specifies an array of references to inbound Nat pools
of the load balancers. A scale set can reference inbound nat pools of one public and one
internal load balancer. Multiple scale sets cannot use the same load balancer.
:type load_balancer_inbound_nat_pools: list[~azure.mgmt.compute.v2019_03_01.models.SubResource]
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'ApiEntityReference'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'public_ip_address_configuration': {'key': 'properties.publicIPAddressConfiguration', 'type': 'VirtualMachineScaleSetPublicIPAddressConfiguration'},
'private_ip_address_version': {'key': 'properties.privateIPAddressVersion', 'type': 'str'},
'application_gateway_backend_address_pools': {'key': 'properties.applicationGatewayBackendAddressPools', 'type': '[SubResource]'},
'application_security_groups': {'key': 'properties.applicationSecurityGroups', 'type': '[SubResource]'},
'load_balancer_backend_address_pools': {'key': 'properties.loadBalancerBackendAddressPools', 'type': '[SubResource]'},
'load_balancer_inbound_nat_pools': {'key': 'properties.loadBalancerInboundNatPools', 'type': '[SubResource]'},
}
def __init__(
self,
*,
name: str,
id: Optional[str] = None,
subnet: Optional["ApiEntityReference"] = None,
primary: Optional[bool] = None,
public_ip_address_configuration: Optional["VirtualMachineScaleSetPublicIPAddressConfiguration"] = None,
private_ip_address_version: Optional[Union[str, "IPVersion"]] = None,
application_gateway_backend_address_pools: Optional[List["SubResource"]] = None,
application_security_groups: Optional[List["SubResource"]] = None,
load_balancer_backend_address_pools: Optional[List["SubResource"]] = None,
load_balancer_inbound_nat_pools: Optional[List["SubResource"]] = None,
**kwargs
):
super(VirtualMachineScaleSetIPConfiguration, self).__init__(id=id, **kwargs)
self.name = name
self.subnet = subnet
self.primary = primary
self.public_ip_address_configuration = public_ip_address_configuration
self.private_ip_address_version = private_ip_address_version
self.application_gateway_backend_address_pools = application_gateway_backend_address_pools
self.application_security_groups = application_security_groups
self.load_balancer_backend_address_pools = load_balancer_backend_address_pools
self.load_balancer_inbound_nat_pools = load_balancer_inbound_nat_pools
class VirtualMachineScaleSetIpTag(msrest.serialization.Model):
"""Contains the IP tag associated with the public IP address.
:param ip_tag_type: IP tag type. Example: FirstPartyUsage.
:type ip_tag_type: str
:param tag: IP tag associated with the public IP. Example: SQL, Storage etc.
:type tag: str
"""
_attribute_map = {
'ip_tag_type': {'key': 'ipTagType', 'type': 'str'},
'tag': {'key': 'tag', 'type': 'str'},
}
def __init__(
self,
*,
ip_tag_type: Optional[str] = None,
tag: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetIpTag, self).__init__(**kwargs)
self.ip_tag_type = ip_tag_type
self.tag = tag
class VirtualMachineScaleSetListOSUpgradeHistory(msrest.serialization.Model):
"""List of Virtual Machine Scale Set OS Upgrade History operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. The list of OS upgrades performed on the virtual machine scale set.
:type value: list[~azure.mgmt.compute.v2019_03_01.models.UpgradeOperationHistoricalStatusInfo]
:param next_link: The uri to fetch the next page of OS Upgrade History. Call ListNext() with
this to fetch the next page of history of upgrades.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[UpgradeOperationHistoricalStatusInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["UpgradeOperationHistoricalStatusInfo"],
next_link: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetListOSUpgradeHistory, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineScaleSetListResult(msrest.serialization.Model):
"""The List Virtual Machine operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. The list of virtual machine scale sets.
:type value: list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSet]
:param next_link: The uri to fetch the next page of Virtual Machine Scale Sets. Call ListNext()
with this to fetch the next page of VMSS.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineScaleSet]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachineScaleSet"],
next_link: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineScaleSetListSkusResult(msrest.serialization.Model):
"""The Virtual Machine Scale Set List Skus operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. The list of skus available for the virtual machine scale set.
:type value: list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetSku]
:param next_link: The uri to fetch the next page of Virtual Machine Scale Set Skus. Call
ListNext() with this to fetch the next page of VMSS Skus.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineScaleSetSku]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachineScaleSetSku"],
next_link: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetListSkusResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineScaleSetListWithLinkResult(msrest.serialization.Model):
"""The List Virtual Machine operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. The list of virtual machine scale sets.
:type value: list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSet]
:param next_link: The uri to fetch the next page of Virtual Machine Scale Sets. Call ListNext()
with this to fetch the next page of Virtual Machine Scale Sets.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineScaleSet]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachineScaleSet"],
next_link: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetListWithLinkResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineScaleSetManagedDiskParameters(msrest.serialization.Model):
"""Describes the parameters of a ScaleSet managed disk.
:param storage_account_type: Specifies the storage account type for the managed disk. Managed
OS disk storage account type can only be set when you create the scale set. NOTE: UltraSSD_LRS
can only be used with data disks, it cannot be used with OS Disk. Possible values include:
"Standard_LRS", "Premium_LRS", "StandardSSD_LRS", "UltraSSD_LRS".
:type storage_account_type: str or ~azure.mgmt.compute.v2019_03_01.models.StorageAccountTypes
"""
_attribute_map = {
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
}
def __init__(
self,
*,
storage_account_type: Optional[Union[str, "StorageAccountTypes"]] = None,
**kwargs
):
super(VirtualMachineScaleSetManagedDiskParameters, self).__init__(**kwargs)
self.storage_account_type = storage_account_type
class VirtualMachineScaleSetNetworkConfiguration(SubResource):
"""Describes a virtual machine scale set network profile's network configurations.
All required parameters must be populated in order to send to Azure.
:param id: Resource Id.
:type id: str
:param name: Required. The network configuration name.
:type name: str
:param primary: Specifies the primary network interface in case the virtual machine has more
than 1 network interface.
:type primary: bool
:param enable_accelerated_networking: Specifies whether the network interface is accelerated
networking-enabled.
:type enable_accelerated_networking: bool
:param network_security_group: The network security group.
:type network_security_group: ~azure.mgmt.compute.v2019_03_01.models.SubResource
:param dns_settings: The dns settings to be applied on the network interfaces.
:type dns_settings:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetNetworkConfigurationDnsSettings
:param ip_configurations: Specifies the IP configurations of the network interface.
:type ip_configurations:
list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetIPConfiguration]
:param enable_ip_forwarding: Whether IP forwarding enabled on this NIC.
:type enable_ip_forwarding: bool
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'enable_accelerated_networking': {'key': 'properties.enableAcceleratedNetworking', 'type': 'bool'},
'network_security_group': {'key': 'properties.networkSecurityGroup', 'type': 'SubResource'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'VirtualMachineScaleSetNetworkConfigurationDnsSettings'},
'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[VirtualMachineScaleSetIPConfiguration]'},
'enable_ip_forwarding': {'key': 'properties.enableIPForwarding', 'type': 'bool'},
}
def __init__(
self,
*,
name: str,
id: Optional[str] = None,
primary: Optional[bool] = None,
enable_accelerated_networking: Optional[bool] = None,
network_security_group: Optional["SubResource"] = None,
dns_settings: Optional["VirtualMachineScaleSetNetworkConfigurationDnsSettings"] = None,
ip_configurations: Optional[List["VirtualMachineScaleSetIPConfiguration"]] = None,
enable_ip_forwarding: Optional[bool] = None,
**kwargs
):
super(VirtualMachineScaleSetNetworkConfiguration, self).__init__(id=id, **kwargs)
self.name = name
self.primary = primary
self.enable_accelerated_networking = enable_accelerated_networking
self.network_security_group = network_security_group
self.dns_settings = dns_settings
self.ip_configurations = ip_configurations
self.enable_ip_forwarding = enable_ip_forwarding
class VirtualMachineScaleSetNetworkConfigurationDnsSettings(msrest.serialization.Model):
"""Describes a virtual machines scale sets network configuration's DNS settings.
:param dns_servers: List of DNS servers IP addresses.
:type dns_servers: list[str]
"""
_attribute_map = {
'dns_servers': {'key': 'dnsServers', 'type': '[str]'},
}
def __init__(
self,
*,
dns_servers: Optional[List[str]] = None,
**kwargs
):
super(VirtualMachineScaleSetNetworkConfigurationDnsSettings, self).__init__(**kwargs)
self.dns_servers = dns_servers
class VirtualMachineScaleSetNetworkProfile(msrest.serialization.Model):
"""Describes a virtual machine scale set network profile.
:param health_probe: A reference to a load balancer probe used to determine the health of an
instance in the virtual machine scale set. The reference will be in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}'.
:type health_probe: ~azure.mgmt.compute.v2019_03_01.models.ApiEntityReference
:param network_interface_configurations: The list of network configurations.
:type network_interface_configurations:
list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetNetworkConfiguration]
"""
_attribute_map = {
'health_probe': {'key': 'healthProbe', 'type': 'ApiEntityReference'},
'network_interface_configurations': {'key': 'networkInterfaceConfigurations', 'type': '[VirtualMachineScaleSetNetworkConfiguration]'},
}
def __init__(
self,
*,
health_probe: Optional["ApiEntityReference"] = None,
network_interface_configurations: Optional[List["VirtualMachineScaleSetNetworkConfiguration"]] = None,
**kwargs
):
super(VirtualMachineScaleSetNetworkProfile, self).__init__(**kwargs)
self.health_probe = health_probe
self.network_interface_configurations = network_interface_configurations
class VirtualMachineScaleSetOSDisk(msrest.serialization.Model):
"""Describes a virtual machine scale set operating system disk.
All required parameters must be populated in order to send to Azure.
:param name: The disk name.
:type name: str
:param caching: Specifies the caching requirements. :code:`<br>`:code:`<br>` Possible values
are: :code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **ReadOnly**
:code:`<br>`:code:`<br>` **ReadWrite** :code:`<br>`:code:`<br>` Default: **None for Standard
storage. ReadOnly for Premium storage**. Possible values include: "None", "ReadOnly",
"ReadWrite".
:type caching: str or ~azure.mgmt.compute.v2019_03_01.models.CachingTypes
:param write_accelerator_enabled: Specifies whether writeAccelerator should be enabled or
disabled on the disk.
:type write_accelerator_enabled: bool
:param create_option: Required. Specifies how the virtual machines in the scale set should be
created.:code:`<br>`:code:`<br>` The only allowed value is: **FromImage** \u2013 This value is
used when you are using an image to create the virtual machine. If you are using a platform
image, you also use the imageReference element described above. If you are using a marketplace
image, you also use the plan element previously described. Possible values include:
"FromImage", "Empty", "Attach".
:type create_option: str or ~azure.mgmt.compute.v2019_03_01.models.DiskCreateOptionTypes
:param diff_disk_settings: Specifies the ephemeral disk Settings for the operating system disk
used by the virtual machine scale set.
:type diff_disk_settings: ~azure.mgmt.compute.v2019_03_01.models.DiffDiskSettings
:param disk_size_gb: Specifies the size of the operating system disk in gigabytes. This element
can be used to overwrite the size of the disk in a virtual machine image.
:code:`<br>`:code:`<br>` This value cannot be larger than 1023 GB.
:type disk_size_gb: int
:param os_type: This property allows you to specify the type of the OS that is included in the
disk if creating a VM from user-image or a specialized VHD. :code:`<br>`:code:`<br>` Possible
values are: :code:`<br>`:code:`<br>` **Windows** :code:`<br>`:code:`<br>` **Linux**. Possible
values include: "Windows", "Linux".
:type os_type: str or ~azure.mgmt.compute.v2019_03_01.models.OperatingSystemTypes
:param image: Specifies information about the unmanaged user image to base the scale set on.
:type image: ~azure.mgmt.compute.v2019_03_01.models.VirtualHardDisk
:param vhd_containers: Specifies the container urls that are used to store operating system
disks for the scale set.
:type vhd_containers: list[str]
:param managed_disk: The managed disk parameters.
:type managed_disk:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetManagedDiskParameters
"""
_validation = {
'create_option': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'str'},
'write_accelerator_enabled': {'key': 'writeAcceleratorEnabled', 'type': 'bool'},
'create_option': {'key': 'createOption', 'type': 'str'},
'diff_disk_settings': {'key': 'diffDiskSettings', 'type': 'DiffDiskSettings'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'os_type': {'key': 'osType', 'type': 'str'},
'image': {'key': 'image', 'type': 'VirtualHardDisk'},
'vhd_containers': {'key': 'vhdContainers', 'type': '[str]'},
'managed_disk': {'key': 'managedDisk', 'type': 'VirtualMachineScaleSetManagedDiskParameters'},
}
def __init__(
self,
*,
create_option: Union[str, "DiskCreateOptionTypes"],
name: Optional[str] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
write_accelerator_enabled: Optional[bool] = None,
diff_disk_settings: Optional["DiffDiskSettings"] = None,
disk_size_gb: Optional[int] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
image: Optional["VirtualHardDisk"] = None,
vhd_containers: Optional[List[str]] = None,
managed_disk: Optional["VirtualMachineScaleSetManagedDiskParameters"] = None,
**kwargs
):
super(VirtualMachineScaleSetOSDisk, self).__init__(**kwargs)
self.name = name
self.caching = caching
self.write_accelerator_enabled = write_accelerator_enabled
self.create_option = create_option
self.diff_disk_settings = diff_disk_settings
self.disk_size_gb = disk_size_gb
self.os_type = os_type
self.image = image
self.vhd_containers = vhd_containers
self.managed_disk = managed_disk
class VirtualMachineScaleSetOSProfile(msrest.serialization.Model):
"""Describes a virtual machine scale set OS profile.
:param computer_name_prefix: Specifies the computer name prefix for all of the virtual machines
in the scale set. Computer name prefixes must be 1 to 15 characters long.
:type computer_name_prefix: str
:param admin_username: Specifies the name of the administrator account.
:code:`<br>`:code:`<br>` **Windows-only restriction:** Cannot end in "."
:code:`<br>`:code:`<br>` **Disallowed values:** "administrator", "admin", "user", "user1",
"test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2",
"aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql",
"support", "support_388945a0", "sys", "test2", "test3", "user4", "user5".
:code:`<br>`:code:`<br>` **Minimum-length (Linux):** 1 character :code:`<br>`:code:`<br>`
**Max-length (Linux):** 64 characters :code:`<br>`:code:`<br>` **Max-length (Windows):** 20
characters :code:`<br>`:code:`<br>`:code:`<li>` For root access to the Linux VM, see `Using
root privileges on Linux virtual machines in Azure <https://docs.microsoft.com/azure/virtual-
machines/virtual-machines-linux-use-root-privileges?toc=%2fazure%2fvirtual-
machines%2flinux%2ftoc.json>`_\ :code:`<br>`:code:`<li>` For a list of built-in system users on
Linux that should not be used in this field, see `Selecting User Names for Linux on Azure
<https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-
usernames?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json>`_.
:type admin_username: str
:param admin_password: Specifies the password of the administrator account.
:code:`<br>`:code:`<br>` **Minimum-length (Windows):** 8 characters :code:`<br>`:code:`<br>`
**Minimum-length (Linux):** 6 characters :code:`<br>`:code:`<br>` **Max-length (Windows):** 123
characters :code:`<br>`:code:`<br>` **Max-length (Linux):** 72 characters
:code:`<br>`:code:`<br>` **Complexity requirements:** 3 out of 4 conditions below need to be
fulfilled :code:`<br>` Has lower characters :code:`<br>`Has upper characters :code:`<br>` Has a
digit :code:`<br>` Has a special character (Regex match [\W_]) :code:`<br>`:code:`<br>`
**Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word",
"pass@word1", "Password!", "Password1", "Password22", "iloveyou!" :code:`<br>`:code:`<br>` For
resetting the password, see `How to reset the Remote Desktop service or its login password in a
Windows VM <https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-reset-
rdp?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json>`_ :code:`<br>`:code:`<br>` For
resetting root password, see `Manage users, SSH, and check or repair disks on Azure Linux VMs
using the VMAccess Extension <https://docs.microsoft.com/azure/virtual-machines/virtual-
machines-linux-using-vmaccess-extension?toc=%2fazure%2fvirtual-
machines%2flinux%2ftoc.json#reset-root-password>`_.
:type admin_password: str
:param custom_data: Specifies a base-64 encoded string of custom data. The base-64 encoded
string is decoded to a binary array that is saved as a file on the Virtual Machine. The maximum
length of the binary array is 65535 bytes. :code:`<br>`:code:`<br>` For using cloud-init for
your VM, see `Using cloud-init to customize a Linux VM during creation
<https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-
init?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json>`_.
:type custom_data: str
:param windows_configuration: Specifies Windows operating system settings on the virtual
machine.
:type windows_configuration: ~azure.mgmt.compute.v2019_03_01.models.WindowsConfiguration
:param linux_configuration: Specifies the Linux operating system settings on the virtual
machine. :code:`<br>`:code:`<br>`For a list of supported Linux distributions, see `Linux on
Azure-Endorsed Distributions <https://docs.microsoft.com/azure/virtual-machines/virtual-
machines-linux-endorsed-distros?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json>`_
:code:`<br>`:code:`<br>` For running non-endorsed distributions, see `Information for Non-
Endorsed Distributions <https://docs.microsoft.com/azure/virtual-machines/virtual-machines-
linux-create-upload-generic?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json>`_.
:type linux_configuration: ~azure.mgmt.compute.v2019_03_01.models.LinuxConfiguration
:param secrets: Specifies set of certificates that should be installed onto the virtual
machines in the scale set.
:type secrets: list[~azure.mgmt.compute.v2019_03_01.models.VaultSecretGroup]
"""
_attribute_map = {
'computer_name_prefix': {'key': 'computerNamePrefix', 'type': 'str'},
'admin_username': {'key': 'adminUsername', 'type': 'str'},
'admin_password': {'key': 'adminPassword', 'type': 'str'},
'custom_data': {'key': 'customData', 'type': 'str'},
'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'},
'linux_configuration': {'key': 'linuxConfiguration', 'type': 'LinuxConfiguration'},
'secrets': {'key': 'secrets', 'type': '[VaultSecretGroup]'},
}
def __init__(
self,
*,
computer_name_prefix: Optional[str] = None,
admin_username: Optional[str] = None,
admin_password: Optional[str] = None,
custom_data: Optional[str] = None,
windows_configuration: Optional["WindowsConfiguration"] = None,
linux_configuration: Optional["LinuxConfiguration"] = None,
secrets: Optional[List["VaultSecretGroup"]] = None,
**kwargs
):
super(VirtualMachineScaleSetOSProfile, self).__init__(**kwargs)
self.computer_name_prefix = computer_name_prefix
self.admin_username = admin_username
self.admin_password = admin_password
self.custom_data = custom_data
self.windows_configuration = windows_configuration
self.linux_configuration = linux_configuration
self.secrets = secrets
class VirtualMachineScaleSetPublicIPAddressConfiguration(msrest.serialization.Model):
"""Describes a virtual machines scale set IP Configuration's PublicIPAddress configuration.
All required parameters must be populated in order to send to Azure.
:param name: Required. The publicIP address configuration name.
:type name: str
:param idle_timeout_in_minutes: The idle timeout of the public IP address.
:type idle_timeout_in_minutes: int
:param dns_settings: The dns settings to be applied on the publicIP addresses .
:type dns_settings:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings
:param ip_tags: The list of IP tags associated with the public IP address.
:type ip_tags: list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetIpTag]
:param public_ip_prefix: The PublicIPPrefix from which to allocate publicIP addresses.
:type public_ip_prefix: ~azure.mgmt.compute.v2019_03_01.models.SubResource
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings'},
'ip_tags': {'key': 'properties.ipTags', 'type': '[VirtualMachineScaleSetIpTag]'},
'public_ip_prefix': {'key': 'properties.publicIPPrefix', 'type': 'SubResource'},
}
def __init__(
self,
*,
name: str,
idle_timeout_in_minutes: Optional[int] = None,
dns_settings: Optional["VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings"] = None,
ip_tags: Optional[List["VirtualMachineScaleSetIpTag"]] = None,
public_ip_prefix: Optional["SubResource"] = None,
**kwargs
):
super(VirtualMachineScaleSetPublicIPAddressConfiguration, self).__init__(**kwargs)
self.name = name
self.idle_timeout_in_minutes = idle_timeout_in_minutes
self.dns_settings = dns_settings
self.ip_tags = ip_tags
self.public_ip_prefix = public_ip_prefix
class VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings(msrest.serialization.Model):
"""Describes a virtual machines scale sets network configuration's DNS settings.
All required parameters must be populated in order to send to Azure.
:param domain_name_label: Required. The Domain name label.The concatenation of the domain name
label and vm index will be the domain name labels of the PublicIPAddress resources that will be
created.
:type domain_name_label: str
"""
_validation = {
'domain_name_label': {'required': True},
}
_attribute_map = {
'domain_name_label': {'key': 'domainNameLabel', 'type': 'str'},
}
def __init__(
self,
*,
domain_name_label: str,
**kwargs
):
super(VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings, self).__init__(**kwargs)
self.domain_name_label = domain_name_label
class VirtualMachineScaleSetReimageParameters(VirtualMachineReimageParameters):
"""Describes a Virtual Machine Scale Set VM Reimage Parameters.
:param temp_disk: Specifies whether to reimage temp disk. Default value: false. Note: This temp
disk reimage parameter is only supported for VM/VMSS with Ephemeral OS disk.
:type temp_disk: bool
:param instance_ids: The virtual machine scale set instance ids. Omitting the virtual machine
scale set instance ids will result in the operation being performed on all virtual machines in
the virtual machine scale set.
:type instance_ids: list[str]
"""
_attribute_map = {
'temp_disk': {'key': 'tempDisk', 'type': 'bool'},
'instance_ids': {'key': 'instanceIds', 'type': '[str]'},
}
def __init__(
self,
*,
temp_disk: Optional[bool] = None,
instance_ids: Optional[List[str]] = None,
**kwargs
):
super(VirtualMachineScaleSetReimageParameters, self).__init__(temp_disk=temp_disk, **kwargs)
self.instance_ids = instance_ids
class VirtualMachineScaleSetSku(msrest.serialization.Model):
"""Describes an available virtual machine scale set sku.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar resource_type: The type of resource the sku applies to.
:vartype resource_type: str
:ivar sku: The Sku.
:vartype sku: ~azure.mgmt.compute.v2019_03_01.models.Sku
:ivar capacity: Specifies the number of virtual machines in the scale set.
:vartype capacity: ~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetSkuCapacity
"""
_validation = {
'resource_type': {'readonly': True},
'sku': {'readonly': True},
'capacity': {'readonly': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'capacity': {'key': 'capacity', 'type': 'VirtualMachineScaleSetSkuCapacity'},
}
def __init__(
self,
**kwargs
):
super(VirtualMachineScaleSetSku, self).__init__(**kwargs)
self.resource_type = None
self.sku = None
self.capacity = None
class VirtualMachineScaleSetSkuCapacity(msrest.serialization.Model):
"""Describes scaling information of a sku.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar minimum: The minimum capacity.
:vartype minimum: long
:ivar maximum: The maximum capacity that can be set.
:vartype maximum: long
:ivar default_capacity: The default capacity.
:vartype default_capacity: long
:ivar scale_type: The scale type applicable to the sku. Possible values include: "Automatic",
"None".
:vartype scale_type: str or
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetSkuScaleType
"""
_validation = {
'minimum': {'readonly': True},
'maximum': {'readonly': True},
'default_capacity': {'readonly': True},
'scale_type': {'readonly': True},
}
_attribute_map = {
'minimum': {'key': 'minimum', 'type': 'long'},
'maximum': {'key': 'maximum', 'type': 'long'},
'default_capacity': {'key': 'defaultCapacity', 'type': 'long'},
'scale_type': {'key': 'scaleType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualMachineScaleSetSkuCapacity, self).__init__(**kwargs)
self.minimum = None
self.maximum = None
self.default_capacity = None
self.scale_type = None
class VirtualMachineScaleSetStorageProfile(msrest.serialization.Model):
"""Describes a virtual machine scale set storage profile.
:param image_reference: Specifies information about the image to use. You can specify
information about platform images, marketplace images, or virtual machine images. This element
is required when you want to use a platform image, marketplace image, or virtual machine image,
but is not used in other creation operations.
:type image_reference: ~azure.mgmt.compute.v2019_03_01.models.ImageReference
:param os_disk: Specifies information about the operating system disk used by the virtual
machines in the scale set. :code:`<br>`:code:`<br>` For more information about disks, see
`About disks and VHDs for Azure virtual machines <https://docs.microsoft.com/azure/virtual-
machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-
machines%2fwindows%2ftoc.json>`_.
:type os_disk: ~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetOSDisk
:param data_disks: Specifies the parameters that are used to add data disks to the virtual
machines in the scale set. :code:`<br>`:code:`<br>` For more information about disks, see
`About disks and VHDs for Azure virtual machines <https://docs.microsoft.com/azure/virtual-
machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-
machines%2fwindows%2ftoc.json>`_.
:type data_disks: list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetDataDisk]
"""
_attribute_map = {
'image_reference': {'key': 'imageReference', 'type': 'ImageReference'},
'os_disk': {'key': 'osDisk', 'type': 'VirtualMachineScaleSetOSDisk'},
'data_disks': {'key': 'dataDisks', 'type': '[VirtualMachineScaleSetDataDisk]'},
}
def __init__(
self,
*,
image_reference: Optional["ImageReference"] = None,
os_disk: Optional["VirtualMachineScaleSetOSDisk"] = None,
data_disks: Optional[List["VirtualMachineScaleSetDataDisk"]] = None,
**kwargs
):
super(VirtualMachineScaleSetStorageProfile, self).__init__(**kwargs)
self.image_reference = image_reference
self.os_disk = os_disk
self.data_disks = data_disks
class VirtualMachineScaleSetUpdate(UpdateResource):
"""Describes a Virtual Machine Scale Set.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: The virtual machine scale set sku.
:type sku: ~azure.mgmt.compute.v2019_03_01.models.Sku
:param plan: The purchase plan when deploying a virtual machine scale set from VM Marketplace
images.
:type plan: ~azure.mgmt.compute.v2019_03_01.models.Plan
:param identity: The identity of the virtual machine scale set, if configured.
:type identity: ~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetIdentity
:param upgrade_policy: The upgrade policy.
:type upgrade_policy: ~azure.mgmt.compute.v2019_03_01.models.UpgradePolicy
:param automatic_repairs_policy: Policy for automatic repairs.
:type automatic_repairs_policy: ~azure.mgmt.compute.v2019_03_01.models.AutomaticRepairsPolicy
:param virtual_machine_profile: The virtual machine profile.
:type virtual_machine_profile:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetUpdateVMProfile
:param overprovision: Specifies whether the Virtual Machine Scale Set should be
overprovisioned.
:type overprovision: bool
:param do_not_run_extensions_on_overprovisioned_v_ms: When Overprovision is enabled, extensions
are launched only on the requested number of VMs which are finally kept. This property will
hence ensure that the extensions do not run on the extra overprovisioned VMs.
:type do_not_run_extensions_on_overprovisioned_v_ms: bool
:param single_placement_group: When true this limits the scale set to a single placement group,
of max size 100 virtual machines. NOTE: If singlePlacementGroup is true, it may be modified to
false. However, if singlePlacementGroup is false, it may not be modified to true.
:type single_placement_group: bool
:param additional_capabilities: Specifies additional capabilities enabled or disabled on the
Virtual Machines in the Virtual Machine Scale Set. For instance: whether the Virtual Machines
have the capability to support attaching managed data disks with UltraSSD_LRS storage account
type.
:type additional_capabilities: ~azure.mgmt.compute.v2019_03_01.models.AdditionalCapabilities
:param scale_in_policy: Specifies the scale-in policy that decides which virtual machines are
chosen for removal when a Virtual Machine Scale Set is scaled-in.
:type scale_in_policy: ~azure.mgmt.compute.v2019_03_01.models.ScaleInPolicy
:param proximity_placement_group: Specifies information about the proximity placement group
that the virtual machine scale set should be assigned to. :code:`<br>`:code:`<br>`Minimum api-
version: 2018-04-01.
:type proximity_placement_group: ~azure.mgmt.compute.v2019_03_01.models.SubResource
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'plan': {'key': 'plan', 'type': 'Plan'},
'identity': {'key': 'identity', 'type': 'VirtualMachineScaleSetIdentity'},
'upgrade_policy': {'key': 'properties.upgradePolicy', 'type': 'UpgradePolicy'},
'automatic_repairs_policy': {'key': 'properties.automaticRepairsPolicy', 'type': 'AutomaticRepairsPolicy'},
'virtual_machine_profile': {'key': 'properties.virtualMachineProfile', 'type': 'VirtualMachineScaleSetUpdateVMProfile'},
'overprovision': {'key': 'properties.overprovision', 'type': 'bool'},
'do_not_run_extensions_on_overprovisioned_v_ms': {'key': 'properties.doNotRunExtensionsOnOverprovisionedVMs', 'type': 'bool'},
'single_placement_group': {'key': 'properties.singlePlacementGroup', 'type': 'bool'},
'additional_capabilities': {'key': 'properties.additionalCapabilities', 'type': 'AdditionalCapabilities'},
'scale_in_policy': {'key': 'properties.scaleInPolicy', 'type': 'ScaleInPolicy'},
'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'SubResource'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
plan: Optional["Plan"] = None,
identity: Optional["VirtualMachineScaleSetIdentity"] = None,
upgrade_policy: Optional["UpgradePolicy"] = None,
automatic_repairs_policy: Optional["AutomaticRepairsPolicy"] = None,
virtual_machine_profile: Optional["VirtualMachineScaleSetUpdateVMProfile"] = None,
overprovision: Optional[bool] = None,
do_not_run_extensions_on_overprovisioned_v_ms: Optional[bool] = None,
single_placement_group: Optional[bool] = None,
additional_capabilities: Optional["AdditionalCapabilities"] = None,
scale_in_policy: Optional["ScaleInPolicy"] = None,
proximity_placement_group: Optional["SubResource"] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdate, self).__init__(tags=tags, **kwargs)
self.sku = sku
self.plan = plan
self.identity = identity
self.upgrade_policy = upgrade_policy
self.automatic_repairs_policy = automatic_repairs_policy
self.virtual_machine_profile = virtual_machine_profile
self.overprovision = overprovision
self.do_not_run_extensions_on_overprovisioned_v_ms = do_not_run_extensions_on_overprovisioned_v_ms
self.single_placement_group = single_placement_group
self.additional_capabilities = additional_capabilities
self.scale_in_policy = scale_in_policy
self.proximity_placement_group = proximity_placement_group
class VirtualMachineScaleSetUpdateIPConfiguration(SubResource):
"""Describes a virtual machine scale set network profile's IP configuration.
:param id: Resource Id.
:type id: str
:param name: The IP configuration name.
:type name: str
:param subnet: The subnet.
:type subnet: ~azure.mgmt.compute.v2019_03_01.models.ApiEntityReference
:param primary: Specifies the primary IP Configuration in case the network interface has more
than one IP Configuration.
:type primary: bool
:param public_ip_address_configuration: The publicIPAddressConfiguration.
:type public_ip_address_configuration:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetUpdatePublicIPAddressConfiguration
:param private_ip_address_version: Available from Api-Version 2017-03-30 onwards, it represents
whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible
values are: 'IPv4' and 'IPv6'. Possible values include: "IPv4", "IPv6".
:type private_ip_address_version: str or ~azure.mgmt.compute.v2019_03_01.models.IPVersion
:param application_gateway_backend_address_pools: The application gateway backend address
pools.
:type application_gateway_backend_address_pools:
list[~azure.mgmt.compute.v2019_03_01.models.SubResource]
:param application_security_groups: Specifies an array of references to application security
group.
:type application_security_groups: list[~azure.mgmt.compute.v2019_03_01.models.SubResource]
:param load_balancer_backend_address_pools: The load balancer backend address pools.
:type load_balancer_backend_address_pools:
list[~azure.mgmt.compute.v2019_03_01.models.SubResource]
:param load_balancer_inbound_nat_pools: The load balancer inbound nat pools.
:type load_balancer_inbound_nat_pools: list[~azure.mgmt.compute.v2019_03_01.models.SubResource]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'ApiEntityReference'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'public_ip_address_configuration': {'key': 'properties.publicIPAddressConfiguration', 'type': 'VirtualMachineScaleSetUpdatePublicIPAddressConfiguration'},
'private_ip_address_version': {'key': 'properties.privateIPAddressVersion', 'type': 'str'},
'application_gateway_backend_address_pools': {'key': 'properties.applicationGatewayBackendAddressPools', 'type': '[SubResource]'},
'application_security_groups': {'key': 'properties.applicationSecurityGroups', 'type': '[SubResource]'},
'load_balancer_backend_address_pools': {'key': 'properties.loadBalancerBackendAddressPools', 'type': '[SubResource]'},
'load_balancer_inbound_nat_pools': {'key': 'properties.loadBalancerInboundNatPools', 'type': '[SubResource]'},
}
def __init__(
self,
*,
id: Optional[str] = None,
name: Optional[str] = None,
subnet: Optional["ApiEntityReference"] = None,
primary: Optional[bool] = None,
public_ip_address_configuration: Optional["VirtualMachineScaleSetUpdatePublicIPAddressConfiguration"] = None,
private_ip_address_version: Optional[Union[str, "IPVersion"]] = None,
application_gateway_backend_address_pools: Optional[List["SubResource"]] = None,
application_security_groups: Optional[List["SubResource"]] = None,
load_balancer_backend_address_pools: Optional[List["SubResource"]] = None,
load_balancer_inbound_nat_pools: Optional[List["SubResource"]] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdateIPConfiguration, self).__init__(id=id, **kwargs)
self.name = name
self.subnet = subnet
self.primary = primary
self.public_ip_address_configuration = public_ip_address_configuration
self.private_ip_address_version = private_ip_address_version
self.application_gateway_backend_address_pools = application_gateway_backend_address_pools
self.application_security_groups = application_security_groups
self.load_balancer_backend_address_pools = load_balancer_backend_address_pools
self.load_balancer_inbound_nat_pools = load_balancer_inbound_nat_pools
class VirtualMachineScaleSetUpdateNetworkConfiguration(SubResource):
"""Describes a virtual machine scale set network profile's network configurations.
:param id: Resource Id.
:type id: str
:param name: The network configuration name.
:type name: str
:param primary: Whether this is a primary NIC on a virtual machine.
:type primary: bool
:param enable_accelerated_networking: Specifies whether the network interface is accelerated
networking-enabled.
:type enable_accelerated_networking: bool
:param network_security_group: The network security group.
:type network_security_group: ~azure.mgmt.compute.v2019_03_01.models.SubResource
:param dns_settings: The dns settings to be applied on the network interfaces.
:type dns_settings:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetNetworkConfigurationDnsSettings
:param ip_configurations: The virtual machine scale set IP Configuration.
:type ip_configurations:
list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetUpdateIPConfiguration]
:param enable_ip_forwarding: Whether IP forwarding enabled on this NIC.
:type enable_ip_forwarding: bool
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'enable_accelerated_networking': {'key': 'properties.enableAcceleratedNetworking', 'type': 'bool'},
'network_security_group': {'key': 'properties.networkSecurityGroup', 'type': 'SubResource'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'VirtualMachineScaleSetNetworkConfigurationDnsSettings'},
'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[VirtualMachineScaleSetUpdateIPConfiguration]'},
'enable_ip_forwarding': {'key': 'properties.enableIPForwarding', 'type': 'bool'},
}
def __init__(
self,
*,
id: Optional[str] = None,
name: Optional[str] = None,
primary: Optional[bool] = None,
enable_accelerated_networking: Optional[bool] = None,
network_security_group: Optional["SubResource"] = None,
dns_settings: Optional["VirtualMachineScaleSetNetworkConfigurationDnsSettings"] = None,
ip_configurations: Optional[List["VirtualMachineScaleSetUpdateIPConfiguration"]] = None,
enable_ip_forwarding: Optional[bool] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdateNetworkConfiguration, self).__init__(id=id, **kwargs)
self.name = name
self.primary = primary
self.enable_accelerated_networking = enable_accelerated_networking
self.network_security_group = network_security_group
self.dns_settings = dns_settings
self.ip_configurations = ip_configurations
self.enable_ip_forwarding = enable_ip_forwarding
class VirtualMachineScaleSetUpdateNetworkProfile(msrest.serialization.Model):
"""Describes a virtual machine scale set network profile.
:param health_probe: A reference to a load balancer probe used to determine the health of an
instance in the virtual machine scale set. The reference will be in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}'.
:type health_probe: ~azure.mgmt.compute.v2019_03_01.models.ApiEntityReference
:param network_interface_configurations: The list of network configurations.
:type network_interface_configurations:
list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetUpdateNetworkConfiguration]
"""
_attribute_map = {
'health_probe': {'key': 'healthProbe', 'type': 'ApiEntityReference'},
'network_interface_configurations': {'key': 'networkInterfaceConfigurations', 'type': '[VirtualMachineScaleSetUpdateNetworkConfiguration]'},
}
def __init__(
self,
*,
health_probe: Optional["ApiEntityReference"] = None,
network_interface_configurations: Optional[List["VirtualMachineScaleSetUpdateNetworkConfiguration"]] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdateNetworkProfile, self).__init__(**kwargs)
self.health_probe = health_probe
self.network_interface_configurations = network_interface_configurations
class VirtualMachineScaleSetUpdateOSDisk(msrest.serialization.Model):
"""Describes virtual machine scale set operating system disk Update Object. This should be used for Updating VMSS OS Disk.
:param caching: The caching type. Possible values include: "None", "ReadOnly", "ReadWrite".
:type caching: str or ~azure.mgmt.compute.v2019_03_01.models.CachingTypes
:param write_accelerator_enabled: Specifies whether writeAccelerator should be enabled or
disabled on the disk.
:type write_accelerator_enabled: bool
:param disk_size_gb: Specifies the size of the operating system disk in gigabytes. This element
can be used to overwrite the size of the disk in a virtual machine image.
:code:`<br>`:code:`<br>` This value cannot be larger than 1023 GB.
:type disk_size_gb: int
:param image: The Source User Image VirtualHardDisk. This VirtualHardDisk will be copied before
using it to attach to the Virtual Machine. If SourceImage is provided, the destination
VirtualHardDisk should not exist.
:type image: ~azure.mgmt.compute.v2019_03_01.models.VirtualHardDisk
:param vhd_containers: The list of virtual hard disk container uris.
:type vhd_containers: list[str]
:param managed_disk: The managed disk parameters.
:type managed_disk:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetManagedDiskParameters
"""
_attribute_map = {
'caching': {'key': 'caching', 'type': 'str'},
'write_accelerator_enabled': {'key': 'writeAcceleratorEnabled', 'type': 'bool'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'image': {'key': 'image', 'type': 'VirtualHardDisk'},
'vhd_containers': {'key': 'vhdContainers', 'type': '[str]'},
'managed_disk': {'key': 'managedDisk', 'type': 'VirtualMachineScaleSetManagedDiskParameters'},
}
def __init__(
self,
*,
caching: Optional[Union[str, "CachingTypes"]] = None,
write_accelerator_enabled: Optional[bool] = None,
disk_size_gb: Optional[int] = None,
image: Optional["VirtualHardDisk"] = None,
vhd_containers: Optional[List[str]] = None,
managed_disk: Optional["VirtualMachineScaleSetManagedDiskParameters"] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdateOSDisk, self).__init__(**kwargs)
self.caching = caching
self.write_accelerator_enabled = write_accelerator_enabled
self.disk_size_gb = disk_size_gb
self.image = image
self.vhd_containers = vhd_containers
self.managed_disk = managed_disk
class VirtualMachineScaleSetUpdateOSProfile(msrest.serialization.Model):
"""Describes a virtual machine scale set OS profile.
:param custom_data: A base-64 encoded string of custom data.
:type custom_data: str
:param windows_configuration: The Windows Configuration of the OS profile.
:type windows_configuration: ~azure.mgmt.compute.v2019_03_01.models.WindowsConfiguration
:param linux_configuration: The Linux Configuration of the OS profile.
:type linux_configuration: ~azure.mgmt.compute.v2019_03_01.models.LinuxConfiguration
:param secrets: The List of certificates for addition to the VM.
:type secrets: list[~azure.mgmt.compute.v2019_03_01.models.VaultSecretGroup]
"""
_attribute_map = {
'custom_data': {'key': 'customData', 'type': 'str'},
'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'},
'linux_configuration': {'key': 'linuxConfiguration', 'type': 'LinuxConfiguration'},
'secrets': {'key': 'secrets', 'type': '[VaultSecretGroup]'},
}
def __init__(
self,
*,
custom_data: Optional[str] = None,
windows_configuration: Optional["WindowsConfiguration"] = None,
linux_configuration: Optional["LinuxConfiguration"] = None,
secrets: Optional[List["VaultSecretGroup"]] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdateOSProfile, self).__init__(**kwargs)
self.custom_data = custom_data
self.windows_configuration = windows_configuration
self.linux_configuration = linux_configuration
self.secrets = secrets
class VirtualMachineScaleSetUpdatePublicIPAddressConfiguration(msrest.serialization.Model):
"""Describes a virtual machines scale set IP Configuration's PublicIPAddress configuration.
:param name: The publicIP address configuration name.
:type name: str
:param idle_timeout_in_minutes: The idle timeout of the public IP address.
:type idle_timeout_in_minutes: int
:param dns_settings: The dns settings to be applied on the publicIP addresses .
:type dns_settings:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings'},
}
def __init__(
self,
*,
name: Optional[str] = None,
idle_timeout_in_minutes: Optional[int] = None,
dns_settings: Optional["VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings"] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdatePublicIPAddressConfiguration, self).__init__(**kwargs)
self.name = name
self.idle_timeout_in_minutes = idle_timeout_in_minutes
self.dns_settings = dns_settings
class VirtualMachineScaleSetUpdateStorageProfile(msrest.serialization.Model):
"""Describes a virtual machine scale set storage profile.
:param image_reference: The image reference.
:type image_reference: ~azure.mgmt.compute.v2019_03_01.models.ImageReference
:param os_disk: The OS disk.
:type os_disk: ~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetUpdateOSDisk
:param data_disks: The data disks.
:type data_disks: list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetDataDisk]
"""
_attribute_map = {
'image_reference': {'key': 'imageReference', 'type': 'ImageReference'},
'os_disk': {'key': 'osDisk', 'type': 'VirtualMachineScaleSetUpdateOSDisk'},
'data_disks': {'key': 'dataDisks', 'type': '[VirtualMachineScaleSetDataDisk]'},
}
def __init__(
self,
*,
image_reference: Optional["ImageReference"] = None,
os_disk: Optional["VirtualMachineScaleSetUpdateOSDisk"] = None,
data_disks: Optional[List["VirtualMachineScaleSetDataDisk"]] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdateStorageProfile, self).__init__(**kwargs)
self.image_reference = image_reference
self.os_disk = os_disk
self.data_disks = data_disks
class VirtualMachineScaleSetUpdateVMProfile(msrest.serialization.Model):
"""Describes a virtual machine scale set virtual machine profile.
:param os_profile: The virtual machine scale set OS profile.
:type os_profile: ~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetUpdateOSProfile
:param storage_profile: The virtual machine scale set storage profile.
:type storage_profile:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetUpdateStorageProfile
:param network_profile: The virtual machine scale set network profile.
:type network_profile:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetUpdateNetworkProfile
:param diagnostics_profile: The virtual machine scale set diagnostics profile.
:type diagnostics_profile: ~azure.mgmt.compute.v2019_03_01.models.DiagnosticsProfile
:param extension_profile: The virtual machine scale set extension profile.
:type extension_profile:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetExtensionProfile
:param license_type: The license type, which is for bring your own license scenario.
:type license_type: str
:param billing_profile: Specifies the billing related details of a low priority VMSS.
:code:`<br>`:code:`<br>`Minimum api-version: 2019-03-01.
:type billing_profile: ~azure.mgmt.compute.v2019_03_01.models.BillingProfile
:param scheduled_events_profile: Specifies Scheduled Event related configurations.
:type scheduled_events_profile: ~azure.mgmt.compute.v2019_03_01.models.ScheduledEventsProfile
"""
_attribute_map = {
'os_profile': {'key': 'osProfile', 'type': 'VirtualMachineScaleSetUpdateOSProfile'},
'storage_profile': {'key': 'storageProfile', 'type': 'VirtualMachineScaleSetUpdateStorageProfile'},
'network_profile': {'key': 'networkProfile', 'type': 'VirtualMachineScaleSetUpdateNetworkProfile'},
'diagnostics_profile': {'key': 'diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'extension_profile': {'key': 'extensionProfile', 'type': 'VirtualMachineScaleSetExtensionProfile'},
'license_type': {'key': 'licenseType', 'type': 'str'},
'billing_profile': {'key': 'billingProfile', 'type': 'BillingProfile'},
'scheduled_events_profile': {'key': 'scheduledEventsProfile', 'type': 'ScheduledEventsProfile'},
}
def __init__(
self,
*,
os_profile: Optional["VirtualMachineScaleSetUpdateOSProfile"] = None,
storage_profile: Optional["VirtualMachineScaleSetUpdateStorageProfile"] = None,
network_profile: Optional["VirtualMachineScaleSetUpdateNetworkProfile"] = None,
diagnostics_profile: Optional["DiagnosticsProfile"] = None,
extension_profile: Optional["VirtualMachineScaleSetExtensionProfile"] = None,
license_type: Optional[str] = None,
billing_profile: Optional["BillingProfile"] = None,
scheduled_events_profile: Optional["ScheduledEventsProfile"] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdateVMProfile, self).__init__(**kwargs)
self.os_profile = os_profile
self.storage_profile = storage_profile
self.network_profile = network_profile
self.diagnostics_profile = diagnostics_profile
self.extension_profile = extension_profile
self.license_type = license_type
self.billing_profile = billing_profile
self.scheduled_events_profile = scheduled_events_profile
class VirtualMachineScaleSetVM(Resource):
"""Describes a virtual machine scale set virtual machine.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar instance_id: The virtual machine instance ID.
:vartype instance_id: str
:ivar sku: The virtual machine SKU.
:vartype sku: ~azure.mgmt.compute.v2019_03_01.models.Sku
:param plan: Specifies information about the marketplace image used to create the virtual
machine. This element is only used for marketplace images. Before you can use a marketplace
image from an API, you must enable the image for programmatic use. In the Azure portal, find
the marketplace image that you want to use and then click **Want to deploy programmatically,
Get Started ->**. Enter any required information and then click **Save**.
:type plan: ~azure.mgmt.compute.v2019_03_01.models.Plan
:ivar resources: The virtual machine child extension resources.
:vartype resources: list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineExtension]
:ivar zones: The virtual machine zones.
:vartype zones: list[str]
:ivar latest_model_applied: Specifies whether the latest model has been applied to the virtual
machine.
:vartype latest_model_applied: bool
:ivar vm_id: Azure VM unique ID.
:vartype vm_id: str
:ivar instance_view: The virtual machine instance view.
:vartype instance_view:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetVMInstanceView
:param hardware_profile: Specifies the hardware settings for the virtual machine.
:type hardware_profile: ~azure.mgmt.compute.v2019_03_01.models.HardwareProfile
:param storage_profile: Specifies the storage settings for the virtual machine disks.
:type storage_profile: ~azure.mgmt.compute.v2019_03_01.models.StorageProfile
:param additional_capabilities: Specifies additional capabilities enabled or disabled on the
virtual machine in the scale set. For instance: whether the virtual machine has the capability
to support attaching managed data disks with UltraSSD_LRS storage account type.
:type additional_capabilities: ~azure.mgmt.compute.v2019_03_01.models.AdditionalCapabilities
:param os_profile: Specifies the operating system settings for the virtual machine.
:type os_profile: ~azure.mgmt.compute.v2019_03_01.models.OSProfile
:param network_profile: Specifies the network interfaces of the virtual machine.
:type network_profile: ~azure.mgmt.compute.v2019_03_01.models.NetworkProfile
:param network_profile_configuration: Specifies the network profile configuration of the
virtual machine.
:type network_profile_configuration:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetVMNetworkProfileConfiguration
:param diagnostics_profile: Specifies the boot diagnostic settings state.
:code:`<br>`:code:`<br>`Minimum api-version: 2015-06-15.
:type diagnostics_profile: ~azure.mgmt.compute.v2019_03_01.models.DiagnosticsProfile
:param availability_set: Specifies information about the availability set that the virtual
machine should be assigned to. Virtual machines specified in the same availability set are
allocated to different nodes to maximize availability. For more information about availability
sets, see `Manage the availability of virtual machines
<https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-
availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json>`_. :code:`<br>`:code:`<br>`
For more information on Azure planned maintenance, see `Planned maintenance for virtual
machines in Azure <https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-
planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json>`_
:code:`<br>`:code:`<br>` Currently, a VM can only be added to availability set at creation
time. An existing VM cannot be added to an availability set.
:type availability_set: ~azure.mgmt.compute.v2019_03_01.models.SubResource
:ivar provisioning_state: The provisioning state, which only appears in the response.
:vartype provisioning_state: str
:param license_type: Specifies that the image or disk that is being used was licensed on-
premises. This element is only used for images that contain the Windows Server operating
system. :code:`<br>`:code:`<br>` Possible values are: :code:`<br>`:code:`<br>` Windows_Client
:code:`<br>`:code:`<br>` Windows_Server :code:`<br>`:code:`<br>` If this element is included in
a request for an update, the value must match the initial value. This value cannot be updated.
:code:`<br>`:code:`<br>` For more information, see `Azure Hybrid Use Benefit for Windows Server
<https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-
licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json>`_ :code:`<br>`:code:`<br>`
Minimum api-version: 2015-06-15.
:type license_type: str
:ivar model_definition_applied: Specifies whether the model applied to the virtual machine is
the model of the virtual machine scale set or the customized model for the virtual machine.
:vartype model_definition_applied: str
:param protection_policy: Specifies the protection policy of the virtual machine.
:type protection_policy:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetVMProtectionPolicy
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'instance_id': {'readonly': True},
'sku': {'readonly': True},
'resources': {'readonly': True},
'zones': {'readonly': True},
'latest_model_applied': {'readonly': True},
'vm_id': {'readonly': True},
'instance_view': {'readonly': True},
'provisioning_state': {'readonly': True},
'model_definition_applied': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'instance_id': {'key': 'instanceId', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'plan': {'key': 'plan', 'type': 'Plan'},
'resources': {'key': 'resources', 'type': '[VirtualMachineExtension]'},
'zones': {'key': 'zones', 'type': '[str]'},
'latest_model_applied': {'key': 'properties.latestModelApplied', 'type': 'bool'},
'vm_id': {'key': 'properties.vmId', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineScaleSetVMInstanceView'},
'hardware_profile': {'key': 'properties.hardwareProfile', 'type': 'HardwareProfile'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'StorageProfile'},
'additional_capabilities': {'key': 'properties.additionalCapabilities', 'type': 'AdditionalCapabilities'},
'os_profile': {'key': 'properties.osProfile', 'type': 'OSProfile'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'NetworkProfile'},
'network_profile_configuration': {'key': 'properties.networkProfileConfiguration', 'type': 'VirtualMachineScaleSetVMNetworkProfileConfiguration'},
'diagnostics_profile': {'key': 'properties.diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'availability_set': {'key': 'properties.availabilitySet', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'model_definition_applied': {'key': 'properties.modelDefinitionApplied', 'type': 'str'},
'protection_policy': {'key': 'properties.protectionPolicy', 'type': 'VirtualMachineScaleSetVMProtectionPolicy'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
plan: Optional["Plan"] = None,
hardware_profile: Optional["HardwareProfile"] = None,
storage_profile: Optional["StorageProfile"] = None,
additional_capabilities: Optional["AdditionalCapabilities"] = None,
os_profile: Optional["OSProfile"] = None,
network_profile: Optional["NetworkProfile"] = None,
network_profile_configuration: Optional["VirtualMachineScaleSetVMNetworkProfileConfiguration"] = None,
diagnostics_profile: Optional["DiagnosticsProfile"] = None,
availability_set: Optional["SubResource"] = None,
license_type: Optional[str] = None,
protection_policy: Optional["VirtualMachineScaleSetVMProtectionPolicy"] = None,
**kwargs
):
super(VirtualMachineScaleSetVM, self).__init__(location=location, tags=tags, **kwargs)
self.instance_id = None
self.sku = None
self.plan = plan
self.resources = None
self.zones = None
self.latest_model_applied = None
self.vm_id = None
self.instance_view = None
self.hardware_profile = hardware_profile
self.storage_profile = storage_profile
self.additional_capabilities = additional_capabilities
self.os_profile = os_profile
self.network_profile = network_profile
self.network_profile_configuration = network_profile_configuration
self.diagnostics_profile = diagnostics_profile
self.availability_set = availability_set
self.provisioning_state = None
self.license_type = license_type
self.model_definition_applied = None
self.protection_policy = protection_policy
class VirtualMachineScaleSetVMExtensionsSummary(msrest.serialization.Model):
"""Extensions summary for virtual machines of a virtual machine scale set.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The extension name.
:vartype name: str
:ivar statuses_summary: The extensions information.
:vartype statuses_summary:
list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineStatusCodeCount]
"""
_validation = {
'name': {'readonly': True},
'statuses_summary': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'statuses_summary': {'key': 'statusesSummary', 'type': '[VirtualMachineStatusCodeCount]'},
}
def __init__(
self,
**kwargs
):
super(VirtualMachineScaleSetVMExtensionsSummary, self).__init__(**kwargs)
self.name = None
self.statuses_summary = None
class VirtualMachineScaleSetVMInstanceIDs(msrest.serialization.Model):
"""Specifies a list of virtual machine instance IDs from the VM scale set.
:param instance_ids: The virtual machine scale set instance ids. Omitting the virtual machine
scale set instance ids will result in the operation being performed on all virtual machines in
the virtual machine scale set.
:type instance_ids: list[str]
"""
_attribute_map = {
'instance_ids': {'key': 'instanceIds', 'type': '[str]'},
}
def __init__(
self,
*,
instance_ids: Optional[List[str]] = None,
**kwargs
):
super(VirtualMachineScaleSetVMInstanceIDs, self).__init__(**kwargs)
self.instance_ids = instance_ids
class VirtualMachineScaleSetVMInstanceRequiredIDs(msrest.serialization.Model):
"""Specifies a list of virtual machine instance IDs from the VM scale set.
All required parameters must be populated in order to send to Azure.
:param instance_ids: Required. The virtual machine scale set instance ids.
:type instance_ids: list[str]
"""
_validation = {
'instance_ids': {'required': True},
}
_attribute_map = {
'instance_ids': {'key': 'instanceIds', 'type': '[str]'},
}
def __init__(
self,
*,
instance_ids: List[str],
**kwargs
):
super(VirtualMachineScaleSetVMInstanceRequiredIDs, self).__init__(**kwargs)
self.instance_ids = instance_ids
class VirtualMachineScaleSetVMInstanceView(msrest.serialization.Model):
"""The instance view of a virtual machine scale set VM.
Variables are only populated by the server, and will be ignored when sending a request.
:param platform_update_domain: The Update Domain count.
:type platform_update_domain: int
:param platform_fault_domain: The Fault Domain count.
:type platform_fault_domain: int
:param rdp_thumb_print: The Remote desktop certificate thumbprint.
:type rdp_thumb_print: str
:param vm_agent: The VM Agent running on the virtual machine.
:type vm_agent: ~azure.mgmt.compute.v2019_03_01.models.VirtualMachineAgentInstanceView
:param maintenance_redeploy_status: The Maintenance Operation status on the virtual machine.
:type maintenance_redeploy_status:
~azure.mgmt.compute.v2019_03_01.models.MaintenanceRedeployStatus
:param disks: The disks information.
:type disks: list[~azure.mgmt.compute.v2019_03_01.models.DiskInstanceView]
:param extensions: The extensions information.
:type extensions:
list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineExtensionInstanceView]
:ivar vm_health: The health status for the VM.
:vartype vm_health: ~azure.mgmt.compute.v2019_03_01.models.VirtualMachineHealthStatus
:param boot_diagnostics: Boot Diagnostics is a debugging feature which allows you to view
Console Output and Screenshot to diagnose VM status. :code:`<br>`:code:`<br>` You can easily
view the output of your console log. :code:`<br>`:code:`<br>` Azure also enables you to see a
screenshot of the VM from the hypervisor.
:type boot_diagnostics: ~azure.mgmt.compute.v2019_03_01.models.BootDiagnosticsInstanceView
:param statuses: The resource status information.
:type statuses: list[~azure.mgmt.compute.v2019_03_01.models.InstanceViewStatus]
:param placement_group_id: The placement group in which the VM is running. If the VM is
deallocated it will not have a placementGroupId.
:type placement_group_id: str
"""
_validation = {
'vm_health': {'readonly': True},
}
_attribute_map = {
'platform_update_domain': {'key': 'platformUpdateDomain', 'type': 'int'},
'platform_fault_domain': {'key': 'platformFaultDomain', 'type': 'int'},
'rdp_thumb_print': {'key': 'rdpThumbPrint', 'type': 'str'},
'vm_agent': {'key': 'vmAgent', 'type': 'VirtualMachineAgentInstanceView'},
'maintenance_redeploy_status': {'key': 'maintenanceRedeployStatus', 'type': 'MaintenanceRedeployStatus'},
'disks': {'key': 'disks', 'type': '[DiskInstanceView]'},
'extensions': {'key': 'extensions', 'type': '[VirtualMachineExtensionInstanceView]'},
'vm_health': {'key': 'vmHealth', 'type': 'VirtualMachineHealthStatus'},
'boot_diagnostics': {'key': 'bootDiagnostics', 'type': 'BootDiagnosticsInstanceView'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
'placement_group_id': {'key': 'placementGroupId', 'type': 'str'},
}
def __init__(
self,
*,
platform_update_domain: Optional[int] = None,
platform_fault_domain: Optional[int] = None,
rdp_thumb_print: Optional[str] = None,
vm_agent: Optional["VirtualMachineAgentInstanceView"] = None,
maintenance_redeploy_status: Optional["MaintenanceRedeployStatus"] = None,
disks: Optional[List["DiskInstanceView"]] = None,
extensions: Optional[List["VirtualMachineExtensionInstanceView"]] = None,
boot_diagnostics: Optional["BootDiagnosticsInstanceView"] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
placement_group_id: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetVMInstanceView, self).__init__(**kwargs)
self.platform_update_domain = platform_update_domain
self.platform_fault_domain = platform_fault_domain
self.rdp_thumb_print = rdp_thumb_print
self.vm_agent = vm_agent
self.maintenance_redeploy_status = maintenance_redeploy_status
self.disks = disks
self.extensions = extensions
self.vm_health = None
self.boot_diagnostics = boot_diagnostics
self.statuses = statuses
self.placement_group_id = placement_group_id
class VirtualMachineScaleSetVMListResult(msrest.serialization.Model):
"""The List Virtual Machine Scale Set VMs operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. The list of virtual machine scale sets VMs.
:type value: list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetVM]
:param next_link: The uri to fetch the next page of Virtual Machine Scale Set VMs. Call
ListNext() with this to fetch the next page of VMSS VMs.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineScaleSetVM]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachineScaleSetVM"],
next_link: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetVMListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineScaleSetVMNetworkProfileConfiguration(msrest.serialization.Model):
"""Describes a virtual machine scale set VM network profile.
:param network_interface_configurations: The list of network configurations.
:type network_interface_configurations:
list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetNetworkConfiguration]
"""
_attribute_map = {
'network_interface_configurations': {'key': 'networkInterfaceConfigurations', 'type': '[VirtualMachineScaleSetNetworkConfiguration]'},
}
def __init__(
self,
*,
network_interface_configurations: Optional[List["VirtualMachineScaleSetNetworkConfiguration"]] = None,
**kwargs
):
super(VirtualMachineScaleSetVMNetworkProfileConfiguration, self).__init__(**kwargs)
self.network_interface_configurations = network_interface_configurations
class VirtualMachineScaleSetVMProfile(msrest.serialization.Model):
"""Describes a virtual machine scale set virtual machine profile.
:param os_profile: Specifies the operating system settings for the virtual machines in the
scale set.
:type os_profile: ~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetOSProfile
:param storage_profile: Specifies the storage settings for the virtual machine disks.
:type storage_profile:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetStorageProfile
:param network_profile: Specifies properties of the network interfaces of the virtual machines
in the scale set.
:type network_profile:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetNetworkProfile
:param diagnostics_profile: Specifies the boot diagnostic settings state.
:code:`<br>`:code:`<br>`Minimum api-version: 2015-06-15.
:type diagnostics_profile: ~azure.mgmt.compute.v2019_03_01.models.DiagnosticsProfile
:param extension_profile: Specifies a collection of settings for extensions installed on
virtual machines in the scale set.
:type extension_profile:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetExtensionProfile
:param license_type: Specifies that the image or disk that is being used was licensed on-
premises. This element is only used for images that contain the Windows Server operating
system. :code:`<br>`:code:`<br>` Possible values are: :code:`<br>`:code:`<br>` Windows_Client
:code:`<br>`:code:`<br>` Windows_Server :code:`<br>`:code:`<br>` If this element is included in
a request for an update, the value must match the initial value. This value cannot be updated.
:code:`<br>`:code:`<br>` For more information, see `Azure Hybrid Use Benefit for Windows Server
<https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-
licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json>`_ :code:`<br>`:code:`<br>`
Minimum api-version: 2015-06-15.
:type license_type: str
:param priority: Specifies the priority for the virtual machines in the scale set.
:code:`<br>`:code:`<br>`Minimum api-version: 2017-10-30-preview. Possible values include:
"Regular", "Low", "Spot".
:type priority: str or ~azure.mgmt.compute.v2019_03_01.models.VirtualMachinePriorityTypes
:param eviction_policy: Specifies the eviction policy for virtual machines in a Azure Spot
scale set. :code:`<br>`:code:`<br>`Minimum api-version: 2017-10-30-preview. Possible values
include: "Deallocate", "Delete".
:type eviction_policy: str or
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineEvictionPolicyTypes
:param billing_profile: Specifies the billing related details of a Azure Spot VMSS.
:code:`<br>`:code:`<br>`Minimum api-version: 2019-03-01.
:type billing_profile: ~azure.mgmt.compute.v2019_03_01.models.BillingProfile
:param scheduled_events_profile: Specifies Scheduled Event related configurations.
:type scheduled_events_profile: ~azure.mgmt.compute.v2019_03_01.models.ScheduledEventsProfile
"""
_attribute_map = {
'os_profile': {'key': 'osProfile', 'type': 'VirtualMachineScaleSetOSProfile'},
'storage_profile': {'key': 'storageProfile', 'type': 'VirtualMachineScaleSetStorageProfile'},
'network_profile': {'key': 'networkProfile', 'type': 'VirtualMachineScaleSetNetworkProfile'},
'diagnostics_profile': {'key': 'diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'extension_profile': {'key': 'extensionProfile', 'type': 'VirtualMachineScaleSetExtensionProfile'},
'license_type': {'key': 'licenseType', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'str'},
'eviction_policy': {'key': 'evictionPolicy', 'type': 'str'},
'billing_profile': {'key': 'billingProfile', 'type': 'BillingProfile'},
'scheduled_events_profile': {'key': 'scheduledEventsProfile', 'type': 'ScheduledEventsProfile'},
}
def __init__(
self,
*,
os_profile: Optional["VirtualMachineScaleSetOSProfile"] = None,
storage_profile: Optional["VirtualMachineScaleSetStorageProfile"] = None,
network_profile: Optional["VirtualMachineScaleSetNetworkProfile"] = None,
diagnostics_profile: Optional["DiagnosticsProfile"] = None,
extension_profile: Optional["VirtualMachineScaleSetExtensionProfile"] = None,
license_type: Optional[str] = None,
priority: Optional[Union[str, "VirtualMachinePriorityTypes"]] = None,
eviction_policy: Optional[Union[str, "VirtualMachineEvictionPolicyTypes"]] = None,
billing_profile: Optional["BillingProfile"] = None,
scheduled_events_profile: Optional["ScheduledEventsProfile"] = None,
**kwargs
):
super(VirtualMachineScaleSetVMProfile, self).__init__(**kwargs)
self.os_profile = os_profile
self.storage_profile = storage_profile
self.network_profile = network_profile
self.diagnostics_profile = diagnostics_profile
self.extension_profile = extension_profile
self.license_type = license_type
self.priority = priority
self.eviction_policy = eviction_policy
self.billing_profile = billing_profile
self.scheduled_events_profile = scheduled_events_profile
class VirtualMachineScaleSetVMProtectionPolicy(msrest.serialization.Model):
"""The protection policy of a virtual machine scale set VM.
:param protect_from_scale_in: Indicates that the virtual machine scale set VM shouldn't be
considered for deletion during a scale-in operation.
:type protect_from_scale_in: bool
:param protect_from_scale_set_actions: Indicates that model updates or actions (including
scale-in) initiated on the virtual machine scale set should not be applied to the virtual
machine scale set VM.
:type protect_from_scale_set_actions: bool
"""
_attribute_map = {
'protect_from_scale_in': {'key': 'protectFromScaleIn', 'type': 'bool'},
'protect_from_scale_set_actions': {'key': 'protectFromScaleSetActions', 'type': 'bool'},
}
def __init__(
self,
*,
protect_from_scale_in: Optional[bool] = None,
protect_from_scale_set_actions: Optional[bool] = None,
**kwargs
):
super(VirtualMachineScaleSetVMProtectionPolicy, self).__init__(**kwargs)
self.protect_from_scale_in = protect_from_scale_in
self.protect_from_scale_set_actions = protect_from_scale_set_actions
class VirtualMachineScaleSetVMReimageParameters(VirtualMachineReimageParameters):
"""Describes a Virtual Machine Scale Set VM Reimage Parameters.
:param temp_disk: Specifies whether to reimage temp disk. Default value: false. Note: This temp
disk reimage parameter is only supported for VM/VMSS with Ephemeral OS disk.
:type temp_disk: bool
"""
_attribute_map = {
'temp_disk': {'key': 'tempDisk', 'type': 'bool'},
}
def __init__(
self,
*,
temp_disk: Optional[bool] = None,
**kwargs
):
super(VirtualMachineScaleSetVMReimageParameters, self).__init__(temp_disk=temp_disk, **kwargs)
class VirtualMachineSize(msrest.serialization.Model):
"""Describes the properties of a VM size.
:param name: The name of the virtual machine size.
:type name: str
:param number_of_cores: The number of cores supported by the virtual machine size.
:type number_of_cores: int
:param os_disk_size_in_mb: The OS disk size, in MB, allowed by the virtual machine size.
:type os_disk_size_in_mb: int
:param resource_disk_size_in_mb: The resource disk size, in MB, allowed by the virtual machine
size.
:type resource_disk_size_in_mb: int
:param memory_in_mb: The amount of memory, in MB, supported by the virtual machine size.
:type memory_in_mb: int
:param max_data_disk_count: The maximum number of data disks that can be attached to the
virtual machine size.
:type max_data_disk_count: int
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'number_of_cores': {'key': 'numberOfCores', 'type': 'int'},
'os_disk_size_in_mb': {'key': 'osDiskSizeInMB', 'type': 'int'},
'resource_disk_size_in_mb': {'key': 'resourceDiskSizeInMB', 'type': 'int'},
'memory_in_mb': {'key': 'memoryInMB', 'type': 'int'},
'max_data_disk_count': {'key': 'maxDataDiskCount', 'type': 'int'},
}
def __init__(
self,
*,
name: Optional[str] = None,
number_of_cores: Optional[int] = None,
os_disk_size_in_mb: Optional[int] = None,
resource_disk_size_in_mb: Optional[int] = None,
memory_in_mb: Optional[int] = None,
max_data_disk_count: Optional[int] = None,
**kwargs
):
super(VirtualMachineSize, self).__init__(**kwargs)
self.name = name
self.number_of_cores = number_of_cores
self.os_disk_size_in_mb = os_disk_size_in_mb
self.resource_disk_size_in_mb = resource_disk_size_in_mb
self.memory_in_mb = memory_in_mb
self.max_data_disk_count = max_data_disk_count
class VirtualMachineSizeListResult(msrest.serialization.Model):
"""The List Virtual Machine operation response.
:param value: The list of virtual machine sizes.
:type value: list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineSize]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineSize]'},
}
def __init__(
self,
*,
value: Optional[List["VirtualMachineSize"]] = None,
**kwargs
):
super(VirtualMachineSizeListResult, self).__init__(**kwargs)
self.value = value
class VirtualMachineStatusCodeCount(msrest.serialization.Model):
"""The status code and count of the virtual machine scale set instance view status summary.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The instance view status code.
:vartype code: str
:ivar count: The number of instances having a particular status code.
:vartype count: int
"""
_validation = {
'code': {'readonly': True},
'count': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'count': {'key': 'count', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(VirtualMachineStatusCodeCount, self).__init__(**kwargs)
self.code = None
self.count = None
class VirtualMachineUpdate(UpdateResource):
"""Describes a Virtual Machine Update.
Variables are only populated by the server, and will be ignored when sending a request.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param plan: Specifies information about the marketplace image used to create the virtual
machine. This element is only used for marketplace images. Before you can use a marketplace
image from an API, you must enable the image for programmatic use. In the Azure portal, find
the marketplace image that you want to use and then click **Want to deploy programmatically,
Get Started ->**. Enter any required information and then click **Save**.
:type plan: ~azure.mgmt.compute.v2019_03_01.models.Plan
:param identity: The identity of the virtual machine, if configured.
:type identity: ~azure.mgmt.compute.v2019_03_01.models.VirtualMachineIdentity
:param zones: The virtual machine zones.
:type zones: list[str]
:param hardware_profile: Specifies the hardware settings for the virtual machine.
:type hardware_profile: ~azure.mgmt.compute.v2019_03_01.models.HardwareProfile
:param storage_profile: Specifies the storage settings for the virtual machine disks.
:type storage_profile: ~azure.mgmt.compute.v2019_03_01.models.StorageProfile
:param additional_capabilities: Specifies additional capabilities enabled or disabled on the
virtual machine.
:type additional_capabilities: ~azure.mgmt.compute.v2019_03_01.models.AdditionalCapabilities
:param os_profile: Specifies the operating system settings for the virtual machine.
:type os_profile: ~azure.mgmt.compute.v2019_03_01.models.OSProfile
:param network_profile: Specifies the network interfaces of the virtual machine.
:type network_profile: ~azure.mgmt.compute.v2019_03_01.models.NetworkProfile
:param diagnostics_profile: Specifies the boot diagnostic settings state.
:code:`<br>`:code:`<br>`Minimum api-version: 2015-06-15.
:type diagnostics_profile: ~azure.mgmt.compute.v2019_03_01.models.DiagnosticsProfile
:param availability_set: Specifies information about the availability set that the virtual
machine should be assigned to. Virtual machines specified in the same availability set are
allocated to different nodes to maximize availability. For more information about availability
sets, see `Manage the availability of virtual machines
<https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-
availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json>`_. :code:`<br>`:code:`<br>`
For more information on Azure planned maintenance, see `Planned maintenance for virtual
machines in Azure <https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-
planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json>`_
:code:`<br>`:code:`<br>` Currently, a VM can only be added to availability set at creation
time. An existing VM cannot be added to an availability set. :code:`<br>`:code:`<br>`This
property cannot exist along with a non-null properties.virtualMachineScaleSet reference.
:type availability_set: ~azure.mgmt.compute.v2019_03_01.models.SubResource
:param virtual_machine_scale_set: Specifies information about the virtual machine scale set
that the virtual machine should be assigned to. Virtual machines specified in the same virtual
machine scale set are allocated to different nodes to maximize availability. Currently, a VM
can only be added to virtual machine scale set at creation time. An existing VM cannot be added
to a virtual machine scale set. :code:`<br>`:code:`<br>`This property cannot exist along with a
non-null properties.availabilitySet reference. :code:`<br>`:code:`<br>`Minimum api‐version:
2019‐03‐01.
:type virtual_machine_scale_set: ~azure.mgmt.compute.v2019_03_01.models.SubResource
:param proximity_placement_group: Specifies information about the proximity placement group
that the virtual machine should be assigned to. :code:`<br>`:code:`<br>`Minimum api-version:
2018-04-01.
:type proximity_placement_group: ~azure.mgmt.compute.v2019_03_01.models.SubResource
:param priority: Specifies the priority for the virtual machine.
:code:`<br>`:code:`<br>`Minimum api-version: 2019-03-01. Possible values include: "Regular",
"Low", "Spot".
:type priority: str or ~azure.mgmt.compute.v2019_03_01.models.VirtualMachinePriorityTypes
:param eviction_policy: Specifies the eviction policy for the Azure Spot virtual machine. Only
supported value is 'Deallocate'. :code:`<br>`:code:`<br>`Minimum api-version: 2019-03-01.
Possible values include: "Deallocate", "Delete".
:type eviction_policy: str or
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineEvictionPolicyTypes
:param billing_profile: Specifies the billing related details of a Azure Spot virtual machine.
:code:`<br>`:code:`<br>`Minimum api-version: 2019-03-01.
:type billing_profile: ~azure.mgmt.compute.v2019_03_01.models.BillingProfile
:param host: Specifies information about the dedicated host that the virtual machine resides
in. :code:`<br>`:code:`<br>`Minimum api-version: 2018-10-01.
:type host: ~azure.mgmt.compute.v2019_03_01.models.SubResource
:ivar provisioning_state: The provisioning state, which only appears in the response.
:vartype provisioning_state: str
:ivar instance_view: The virtual machine instance view.
:vartype instance_view: ~azure.mgmt.compute.v2019_03_01.models.VirtualMachineInstanceView
:param license_type: Specifies that the image or disk that is being used was licensed on-
premises. This element is only used for images that contain the Windows Server operating
system. :code:`<br>`:code:`<br>` Possible values are: :code:`<br>`:code:`<br>` Windows_Client
:code:`<br>`:code:`<br>` Windows_Server :code:`<br>`:code:`<br>` If this element is included in
a request for an update, the value must match the initial value. This value cannot be updated.
:code:`<br>`:code:`<br>` For more information, see `Azure Hybrid Use Benefit for Windows Server
<https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-
licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json>`_ :code:`<br>`:code:`<br>`
Minimum api-version: 2015-06-15.
:type license_type: str
:ivar vm_id: Specifies the VM unique ID which is a 128-bits identifier that is encoded and
stored in all Azure IaaS VMs SMBIOS and can be read using platform BIOS commands.
:vartype vm_id: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
'vm_id': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'plan': {'key': 'plan', 'type': 'Plan'},
'identity': {'key': 'identity', 'type': 'VirtualMachineIdentity'},
'zones': {'key': 'zones', 'type': '[str]'},
'hardware_profile': {'key': 'properties.hardwareProfile', 'type': 'HardwareProfile'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'StorageProfile'},
'additional_capabilities': {'key': 'properties.additionalCapabilities', 'type': 'AdditionalCapabilities'},
'os_profile': {'key': 'properties.osProfile', 'type': 'OSProfile'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'NetworkProfile'},
'diagnostics_profile': {'key': 'properties.diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'availability_set': {'key': 'properties.availabilitySet', 'type': 'SubResource'},
'virtual_machine_scale_set': {'key': 'properties.virtualMachineScaleSet', 'type': 'SubResource'},
'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'SubResource'},
'priority': {'key': 'properties.priority', 'type': 'str'},
'eviction_policy': {'key': 'properties.evictionPolicy', 'type': 'str'},
'billing_profile': {'key': 'properties.billingProfile', 'type': 'BillingProfile'},
'host': {'key': 'properties.host', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineInstanceView'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'vm_id': {'key': 'properties.vmId', 'type': 'str'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
plan: Optional["Plan"] = None,
identity: Optional["VirtualMachineIdentity"] = None,
zones: Optional[List[str]] = None,
hardware_profile: Optional["HardwareProfile"] = None,
storage_profile: Optional["StorageProfile"] = None,
additional_capabilities: Optional["AdditionalCapabilities"] = None,
os_profile: Optional["OSProfile"] = None,
network_profile: Optional["NetworkProfile"] = None,
diagnostics_profile: Optional["DiagnosticsProfile"] = None,
availability_set: Optional["SubResource"] = None,
virtual_machine_scale_set: Optional["SubResource"] = None,
proximity_placement_group: Optional["SubResource"] = None,
priority: Optional[Union[str, "VirtualMachinePriorityTypes"]] = None,
eviction_policy: Optional[Union[str, "VirtualMachineEvictionPolicyTypes"]] = None,
billing_profile: Optional["BillingProfile"] = None,
host: Optional["SubResource"] = None,
license_type: Optional[str] = None,
**kwargs
):
super(VirtualMachineUpdate, self).__init__(tags=tags, **kwargs)
self.plan = plan
self.identity = identity
self.zones = zones
self.hardware_profile = hardware_profile
self.storage_profile = storage_profile
self.additional_capabilities = additional_capabilities
self.os_profile = os_profile
self.network_profile = network_profile
self.diagnostics_profile = diagnostics_profile
self.availability_set = availability_set
self.virtual_machine_scale_set = virtual_machine_scale_set
self.proximity_placement_group = proximity_placement_group
self.priority = priority
self.eviction_policy = eviction_policy
self.billing_profile = billing_profile
self.host = host
self.provisioning_state = None
self.instance_view = None
self.license_type = license_type
self.vm_id = None
class VMScaleSetConvertToSinglePlacementGroupInput(msrest.serialization.Model):
"""VMScaleSetConvertToSinglePlacementGroupInput.
:param active_placement_group_id: Id of the placement group in which you want future virtual
machine instances to be placed. To query placement group Id, please use Virtual Machine Scale
Set VMs - Get API. If not provided, the platform will choose one with maximum number of virtual
machine instances.
:type active_placement_group_id: str
"""
_attribute_map = {
'active_placement_group_id': {'key': 'activePlacementGroupId', 'type': 'str'},
}
def __init__(
self,
*,
active_placement_group_id: Optional[str] = None,
**kwargs
):
super(VMScaleSetConvertToSinglePlacementGroupInput, self).__init__(**kwargs)
self.active_placement_group_id = active_placement_group_id
class WindowsConfiguration(msrest.serialization.Model):
"""Specifies Windows operating system settings on the virtual machine.
:param provision_vm_agent: Indicates whether virtual machine agent should be provisioned on the
virtual machine. :code:`<br>`:code:`<br>` When this property is not specified in the request
body, default behavior is to set it to true. This will ensure that VM Agent is installed on
the VM so that extensions can be added to the VM later.
:type provision_vm_agent: bool
:param enable_automatic_updates: Indicates whether Automatic Updates is enabled for the Windows
virtual machine. Default value is true. :code:`<br>`:code:`<br>` For virtual machine scale
sets, this property can be updated and updates will take effect on OS reprovisioning.
:type enable_automatic_updates: bool
:param time_zone: Specifies the time zone of the virtual machine. e.g. "Pacific Standard Time".
:type time_zone: str
:param additional_unattend_content: Specifies additional base-64 encoded XML formatted
information that can be included in the Unattend.xml file, which is used by Windows Setup.
:type additional_unattend_content:
list[~azure.mgmt.compute.v2019_03_01.models.AdditionalUnattendContent]
:param win_rm: Specifies the Windows Remote Management listeners. This enables remote Windows
PowerShell.
:type win_rm: ~azure.mgmt.compute.v2019_03_01.models.WinRMConfiguration
"""
_attribute_map = {
'provision_vm_agent': {'key': 'provisionVMAgent', 'type': 'bool'},
'enable_automatic_updates': {'key': 'enableAutomaticUpdates', 'type': 'bool'},
'time_zone': {'key': 'timeZone', 'type': 'str'},
'additional_unattend_content': {'key': 'additionalUnattendContent', 'type': '[AdditionalUnattendContent]'},
'win_rm': {'key': 'winRM', 'type': 'WinRMConfiguration'},
}
def __init__(
self,
*,
provision_vm_agent: Optional[bool] = None,
enable_automatic_updates: Optional[bool] = None,
time_zone: Optional[str] = None,
additional_unattend_content: Optional[List["AdditionalUnattendContent"]] = None,
win_rm: Optional["WinRMConfiguration"] = None,
**kwargs
):
super(WindowsConfiguration, self).__init__(**kwargs)
self.provision_vm_agent = provision_vm_agent
self.enable_automatic_updates = enable_automatic_updates
self.time_zone = time_zone
self.additional_unattend_content = additional_unattend_content
self.win_rm = win_rm
class WinRMConfiguration(msrest.serialization.Model):
"""Describes Windows Remote Management configuration of the VM.
:param listeners: The list of Windows Remote Management listeners.
:type listeners: list[~azure.mgmt.compute.v2019_03_01.models.WinRMListener]
"""
_attribute_map = {
'listeners': {'key': 'listeners', 'type': '[WinRMListener]'},
}
def __init__(
self,
*,
listeners: Optional[List["WinRMListener"]] = None,
**kwargs
):
super(WinRMConfiguration, self).__init__(**kwargs)
self.listeners = listeners
class WinRMListener(msrest.serialization.Model):
"""Describes Protocol and thumbprint of Windows Remote Management listener.
:param protocol: Specifies the protocol of listener. :code:`<br>`:code:`<br>` Possible values
are: :code:`<br>`\ **http** :code:`<br>`:code:`<br>` **https**. Possible values include:
"Http", "Https".
:type protocol: str or ~azure.mgmt.compute.v2019_03_01.models.ProtocolTypes
:param certificate_url: This is the URL of a certificate that has been uploaded to Key Vault as
a secret. For adding a secret to the Key Vault, see `Add a key or secret to the key vault
<https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add>`_. In this case, your
certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded
in UTF-8: :code:`<br>`:code:`<br>` {:code:`<br>` "data":":code:`<Base64-encoded-
certificate>`",:code:`<br>` "dataType":"pfx",:code:`<br>` "password":":code:`<pfx-file-
password>`":code:`<br>`}.
:type certificate_url: str
"""
_attribute_map = {
'protocol': {'key': 'protocol', 'type': 'str'},
'certificate_url': {'key': 'certificateUrl', 'type': 'str'},
}
def __init__(
self,
*,
protocol: Optional[Union[str, "ProtocolTypes"]] = None,
certificate_url: Optional[str] = None,
**kwargs
):
super(WinRMListener, self).__init__(**kwargs)
self.protocol = protocol
self.certificate_url = certificate_url
| 43.324651 | 901 | 0.678087 |
import datetime
from typing import Dict, List, Optional, Union
import msrest.serialization
from ._compute_management_client_enums import *
class AccessUri(msrest.serialization.Model):
_validation = {
'access_sas': {'readonly': True},
}
_attribute_map = {
'access_sas': {'key': 'accessSAS', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AccessUri, self).__init__(**kwargs)
self.access_sas = None
class AdditionalCapabilities(msrest.serialization.Model):
_attribute_map = {
'ultra_ssd_enabled': {'key': 'ultraSSDEnabled', 'type': 'bool'},
}
def __init__(
self,
*,
ultra_ssd_enabled: Optional[bool] = None,
**kwargs
):
super(AdditionalCapabilities, self).__init__(**kwargs)
self.ultra_ssd_enabled = ultra_ssd_enabled
class AdditionalUnattendContent(msrest.serialization.Model):
_validation = {
'pass_name': {'constant': True},
'component_name': {'constant': True},
}
_attribute_map = {
'pass_name': {'key': 'passName', 'type': 'str'},
'component_name': {'key': 'componentName', 'type': 'str'},
'setting_name': {'key': 'settingName', 'type': 'str'},
'content': {'key': 'content', 'type': 'str'},
}
pass_name = "OobeSystem"
component_name = "Microsoft-Windows-Shell-Setup"
def __init__(
self,
*,
setting_name: Optional[Union[str, "SettingNames"]] = None,
content: Optional[str] = None,
**kwargs
):
super(AdditionalUnattendContent, self).__init__(**kwargs)
self.setting_name = setting_name
self.content = content
class ApiEntityReference(msrest.serialization.Model):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(ApiEntityReference, self).__init__(**kwargs)
self.id = id
class ApiError(msrest.serialization.Model):
_attribute_map = {
'details': {'key': 'details', 'type': '[ApiErrorBase]'},
'innererror': {'key': 'innererror', 'type': 'InnerError'},
'code': {'key': 'code', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
details: Optional[List["ApiErrorBase"]] = None,
innererror: Optional["InnerError"] = None,
code: Optional[str] = None,
target: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(ApiError, self).__init__(**kwargs)
self.details = details
self.innererror = innererror
self.code = code
self.target = target
self.message = message
class ApiErrorBase(msrest.serialization.Model):
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: Optional[str] = None,
target: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(ApiErrorBase, self).__init__(**kwargs)
self.code = code
self.target = target
self.message = message
class AutomaticOSUpgradePolicy(msrest.serialization.Model):
_attribute_map = {
'enable_automatic_os_upgrade': {'key': 'enableAutomaticOSUpgrade', 'type': 'bool'},
'disable_automatic_rollback': {'key': 'disableAutomaticRollback', 'type': 'bool'},
}
def __init__(
self,
*,
enable_automatic_os_upgrade: Optional[bool] = None,
disable_automatic_rollback: Optional[bool] = None,
**kwargs
):
super(AutomaticOSUpgradePolicy, self).__init__(**kwargs)
self.enable_automatic_os_upgrade = enable_automatic_os_upgrade
self.disable_automatic_rollback = disable_automatic_rollback
class AutomaticOSUpgradeProperties(msrest.serialization.Model):
_validation = {
'automatic_os_upgrade_supported': {'required': True},
}
_attribute_map = {
'automatic_os_upgrade_supported': {'key': 'automaticOSUpgradeSupported', 'type': 'bool'},
}
def __init__(
self,
*,
automatic_os_upgrade_supported: bool,
**kwargs
):
super(AutomaticOSUpgradeProperties, self).__init__(**kwargs)
self.automatic_os_upgrade_supported = automatic_os_upgrade_supported
class AutomaticRepairsPolicy(msrest.serialization.Model):
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'grace_period': {'key': 'gracePeriod', 'type': 'str'},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
grace_period: Optional[str] = None,
**kwargs
):
super(AutomaticRepairsPolicy, self).__init__(**kwargs)
self.enabled = enabled
self.grace_period = grace_period
class Resource(msrest.serialization.Model):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
class AvailabilitySet(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'statuses': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'platform_update_domain_count': {'key': 'properties.platformUpdateDomainCount', 'type': 'int'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'virtual_machines': {'key': 'properties.virtualMachines', 'type': '[SubResource]'},
'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'SubResource'},
'statuses': {'key': 'properties.statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
platform_update_domain_count: Optional[int] = None,
platform_fault_domain_count: Optional[int] = None,
virtual_machines: Optional[List["SubResource"]] = None,
proximity_placement_group: Optional["SubResource"] = None,
**kwargs
):
super(AvailabilitySet, self).__init__(location=location, tags=tags, **kwargs)
self.sku = sku
self.platform_update_domain_count = platform_update_domain_count
self.platform_fault_domain_count = platform_fault_domain_count
self.virtual_machines = virtual_machines
self.proximity_placement_group = proximity_placement_group
self.statuses = None
class AvailabilitySetListResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[AvailabilitySet]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["AvailabilitySet"],
next_link: Optional[str] = None,
**kwargs
):
super(AvailabilitySetListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class UpdateResource(msrest.serialization.Model):
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(UpdateResource, self).__init__(**kwargs)
self.tags = tags
class AvailabilitySetUpdate(UpdateResource):
_validation = {
'statuses': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'platform_update_domain_count': {'key': 'properties.platformUpdateDomainCount', 'type': 'int'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'virtual_machines': {'key': 'properties.virtualMachines', 'type': '[SubResource]'},
'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'SubResource'},
'statuses': {'key': 'properties.statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
platform_update_domain_count: Optional[int] = None,
platform_fault_domain_count: Optional[int] = None,
virtual_machines: Optional[List["SubResource"]] = None,
proximity_placement_group: Optional["SubResource"] = None,
**kwargs
):
super(AvailabilitySetUpdate, self).__init__(tags=tags, **kwargs)
self.sku = sku
self.platform_update_domain_count = platform_update_domain_count
self.platform_fault_domain_count = platform_fault_domain_count
self.virtual_machines = virtual_machines
self.proximity_placement_group = proximity_placement_group
self.statuses = None
class BillingProfile(msrest.serialization.Model):
_attribute_map = {
'max_price': {'key': 'maxPrice', 'type': 'float'},
}
def __init__(
self,
*,
max_price: Optional[float] = None,
**kwargs
):
super(BillingProfile, self).__init__(**kwargs)
self.max_price = max_price
class BootDiagnostics(msrest.serialization.Model):
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'storage_uri': {'key': 'storageUri', 'type': 'str'},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
storage_uri: Optional[str] = None,
**kwargs
):
super(BootDiagnostics, self).__init__(**kwargs)
self.enabled = enabled
self.storage_uri = storage_uri
class BootDiagnosticsInstanceView(msrest.serialization.Model):
_validation = {
'console_screenshot_blob_uri': {'readonly': True},
'serial_console_log_blob_uri': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'console_screenshot_blob_uri': {'key': 'consoleScreenshotBlobUri', 'type': 'str'},
'serial_console_log_blob_uri': {'key': 'serialConsoleLogBlobUri', 'type': 'str'},
'status': {'key': 'status', 'type': 'InstanceViewStatus'},
}
def __init__(
self,
**kwargs
):
super(BootDiagnosticsInstanceView, self).__init__(**kwargs)
self.console_screenshot_blob_uri = None
self.serial_console_log_blob_uri = None
self.status = None
class ComputeOperationListResult(msrest.serialization.Model):
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ComputeOperationValue]'},
}
def __init__(
self,
**kwargs
):
super(ComputeOperationListResult, self).__init__(**kwargs)
self.value = None
class ComputeOperationValue(msrest.serialization.Model):
_validation = {
'origin': {'readonly': True},
'name': {'readonly': True},
'operation': {'readonly': True},
'resource': {'readonly': True},
'description': {'readonly': True},
'provider': {'readonly': True},
}
_attribute_map = {
'origin': {'key': 'origin', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'operation': {'key': 'display.operation', 'type': 'str'},
'resource': {'key': 'display.resource', 'type': 'str'},
'description': {'key': 'display.description', 'type': 'str'},
'provider': {'key': 'display.provider', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ComputeOperationValue, self).__init__(**kwargs)
self.origin = None
self.name = None
self.operation = None
self.resource = None
self.description = None
self.provider = None
class CreationData(msrest.serialization.Model):
_validation = {
'create_option': {'required': True},
'source_unique_id': {'readonly': True},
}
_attribute_map = {
'create_option': {'key': 'createOption', 'type': 'str'},
'storage_account_id': {'key': 'storageAccountId', 'type': 'str'},
'image_reference': {'key': 'imageReference', 'type': 'ImageDiskReference'},
'source_uri': {'key': 'sourceUri', 'type': 'str'},
'source_resource_id': {'key': 'sourceResourceId', 'type': 'str'},
'source_unique_id': {'key': 'sourceUniqueId', 'type': 'str'},
'upload_size_bytes': {'key': 'uploadSizeBytes', 'type': 'long'},
}
def __init__(
self,
*,
create_option: Union[str, "DiskCreateOption"],
storage_account_id: Optional[str] = None,
image_reference: Optional["ImageDiskReference"] = None,
source_uri: Optional[str] = None,
source_resource_id: Optional[str] = None,
upload_size_bytes: Optional[int] = None,
**kwargs
):
super(CreationData, self).__init__(**kwargs)
self.create_option = create_option
self.storage_account_id = storage_account_id
self.image_reference = image_reference
self.source_uri = source_uri
self.source_resource_id = source_resource_id
self.source_unique_id = None
self.upload_size_bytes = upload_size_bytes
class DataDisk(msrest.serialization.Model):
_validation = {
'lun': {'required': True},
'create_option': {'required': True},
}
_attribute_map = {
'lun': {'key': 'lun', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'vhd': {'key': 'vhd', 'type': 'VirtualHardDisk'},
'image': {'key': 'image', 'type': 'VirtualHardDisk'},
'caching': {'key': 'caching', 'type': 'str'},
'write_accelerator_enabled': {'key': 'writeAcceleratorEnabled', 'type': 'bool'},
'create_option': {'key': 'createOption', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'managed_disk': {'key': 'managedDisk', 'type': 'ManagedDiskParameters'},
'to_be_detached': {'key': 'toBeDetached', 'type': 'bool'},
}
def __init__(
self,
*,
lun: int,
create_option: Union[str, "DiskCreateOptionTypes"],
name: Optional[str] = None,
vhd: Optional["VirtualHardDisk"] = None,
image: Optional["VirtualHardDisk"] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
write_accelerator_enabled: Optional[bool] = None,
disk_size_gb: Optional[int] = None,
managed_disk: Optional["ManagedDiskParameters"] = None,
to_be_detached: Optional[bool] = None,
**kwargs
):
super(DataDisk, self).__init__(**kwargs)
self.lun = lun
self.name = name
self.vhd = vhd
self.image = image
self.caching = caching
self.write_accelerator_enabled = write_accelerator_enabled
self.create_option = create_option
self.disk_size_gb = disk_size_gb
self.managed_disk = managed_disk
self.to_be_detached = to_be_detached
class DataDiskImage(msrest.serialization.Model):
_validation = {
'lun': {'readonly': True},
}
_attribute_map = {
'lun': {'key': 'lun', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(DataDiskImage, self).__init__(**kwargs)
self.lun = None
class DedicatedHost(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'sku': {'required': True},
'platform_fault_domain': {'maximum': 2, 'minimum': 0},
'host_id': {'readonly': True},
'virtual_machines': {'readonly': True},
'provisioning_time': {'readonly': True},
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'platform_fault_domain': {'key': 'properties.platformFaultDomain', 'type': 'int'},
'auto_replace_on_failure': {'key': 'properties.autoReplaceOnFailure', 'type': 'bool'},
'host_id': {'key': 'properties.hostId', 'type': 'str'},
'virtual_machines': {'key': 'properties.virtualMachines', 'type': '[SubResourceReadOnly]'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'provisioning_time': {'key': 'properties.provisioningTime', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'DedicatedHostInstanceView'},
}
def __init__(
self,
*,
location: str,
sku: "Sku",
tags: Optional[Dict[str, str]] = None,
platform_fault_domain: Optional[int] = None,
auto_replace_on_failure: Optional[bool] = None,
license_type: Optional[Union[str, "DedicatedHostLicenseTypes"]] = None,
**kwargs
):
super(DedicatedHost, self).__init__(location=location, tags=tags, **kwargs)
self.sku = sku
self.platform_fault_domain = platform_fault_domain
self.auto_replace_on_failure = auto_replace_on_failure
self.host_id = None
self.virtual_machines = None
self.license_type = license_type
self.provisioning_time = None
self.provisioning_state = None
self.instance_view = None
class DedicatedHostAllocatableVM(msrest.serialization.Model):
_attribute_map = {
'vm_size': {'key': 'vmSize', 'type': 'str'},
'count': {'key': 'count', 'type': 'float'},
}
def __init__(
self,
*,
vm_size: Optional[str] = None,
count: Optional[float] = None,
**kwargs
):
super(DedicatedHostAllocatableVM, self).__init__(**kwargs)
self.vm_size = vm_size
self.count = count
class DedicatedHostAvailableCapacity(msrest.serialization.Model):
_attribute_map = {
'allocatable_v_ms': {'key': 'allocatableVMs', 'type': '[DedicatedHostAllocatableVM]'},
}
def __init__(
self,
*,
allocatable_v_ms: Optional[List["DedicatedHostAllocatableVM"]] = None,
**kwargs
):
super(DedicatedHostAvailableCapacity, self).__init__(**kwargs)
self.allocatable_v_ms = allocatable_v_ms
class DedicatedHostGroup(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'platform_fault_domain_count': {'maximum': 3, 'minimum': 1},
'hosts': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'zones': {'key': 'zones', 'type': '[str]'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'hosts': {'key': 'properties.hosts', 'type': '[SubResourceReadOnly]'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
zones: Optional[List[str]] = None,
platform_fault_domain_count: Optional[int] = None,
**kwargs
):
super(DedicatedHostGroup, self).__init__(location=location, tags=tags, **kwargs)
self.zones = zones
self.platform_fault_domain_count = platform_fault_domain_count
self.hosts = None
class DedicatedHostGroupListResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DedicatedHostGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["DedicatedHostGroup"],
next_link: Optional[str] = None,
**kwargs
):
super(DedicatedHostGroupListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DedicatedHostGroupUpdate(UpdateResource):
_validation = {
'platform_fault_domain_count': {'maximum': 3, 'minimum': 1},
'hosts': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'zones': {'key': 'zones', 'type': '[str]'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'hosts': {'key': 'properties.hosts', 'type': '[SubResourceReadOnly]'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
zones: Optional[List[str]] = None,
platform_fault_domain_count: Optional[int] = None,
**kwargs
):
super(DedicatedHostGroupUpdate, self).__init__(tags=tags, **kwargs)
self.zones = zones
self.platform_fault_domain_count = platform_fault_domain_count
self.hosts = None
class DedicatedHostInstanceView(msrest.serialization.Model):
_validation = {
'asset_id': {'readonly': True},
}
_attribute_map = {
'asset_id': {'key': 'assetId', 'type': 'str'},
'available_capacity': {'key': 'availableCapacity', 'type': 'DedicatedHostAvailableCapacity'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
available_capacity: Optional["DedicatedHostAvailableCapacity"] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
super(DedicatedHostInstanceView, self).__init__(**kwargs)
self.asset_id = None
self.available_capacity = available_capacity
self.statuses = statuses
class DedicatedHostListResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DedicatedHost]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["DedicatedHost"],
next_link: Optional[str] = None,
**kwargs
):
super(DedicatedHostListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DedicatedHostUpdate(UpdateResource):
_validation = {
'platform_fault_domain': {'maximum': 2, 'minimum': 0},
'host_id': {'readonly': True},
'virtual_machines': {'readonly': True},
'provisioning_time': {'readonly': True},
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'platform_fault_domain': {'key': 'properties.platformFaultDomain', 'type': 'int'},
'auto_replace_on_failure': {'key': 'properties.autoReplaceOnFailure', 'type': 'bool'},
'host_id': {'key': 'properties.hostId', 'type': 'str'},
'virtual_machines': {'key': 'properties.virtualMachines', 'type': '[SubResourceReadOnly]'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'provisioning_time': {'key': 'properties.provisioningTime', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'DedicatedHostInstanceView'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
platform_fault_domain: Optional[int] = None,
auto_replace_on_failure: Optional[bool] = None,
license_type: Optional[Union[str, "DedicatedHostLicenseTypes"]] = None,
**kwargs
):
super(DedicatedHostUpdate, self).__init__(tags=tags, **kwargs)
self.platform_fault_domain = platform_fault_domain
self.auto_replace_on_failure = auto_replace_on_failure
self.host_id = None
self.virtual_machines = None
self.license_type = license_type
self.provisioning_time = None
self.provisioning_state = None
self.instance_view = None
class DiagnosticsProfile(msrest.serialization.Model):
_attribute_map = {
'boot_diagnostics': {'key': 'bootDiagnostics', 'type': 'BootDiagnostics'},
}
def __init__(
self,
*,
boot_diagnostics: Optional["BootDiagnostics"] = None,
**kwargs
):
super(DiagnosticsProfile, self).__init__(**kwargs)
self.boot_diagnostics = boot_diagnostics
class DiffDiskSettings(msrest.serialization.Model):
_attribute_map = {
'option': {'key': 'option', 'type': 'str'},
}
def __init__(
self,
*,
option: Optional[Union[str, "DiffDiskOptions"]] = None,
**kwargs
):
super(DiffDiskSettings, self).__init__(**kwargs)
self.option = option
class Disallowed(msrest.serialization.Model):
_attribute_map = {
'disk_types': {'key': 'diskTypes', 'type': '[str]'},
}
def __init__(
self,
*,
disk_types: Optional[List[str]] = None,
**kwargs
):
super(Disallowed, self).__init__(**kwargs)
self.disk_types = disk_types
class Disk(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'managed_by': {'readonly': True},
'time_created': {'readonly': True},
'disk_size_bytes': {'readonly': True},
'unique_id': {'readonly': True},
'provisioning_state': {'readonly': True},
'disk_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'managed_by': {'key': 'managedBy', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'DiskSku'},
'zones': {'key': 'zones', 'type': '[str]'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'hyper_v_generation': {'key': 'properties.hyperVGeneration', 'type': 'str'},
'creation_data': {'key': 'properties.creationData', 'type': 'CreationData'},
'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'},
'disk_size_bytes': {'key': 'properties.diskSizeBytes', 'type': 'long'},
'unique_id': {'key': 'properties.uniqueId', 'type': 'str'},
'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'disk_iops_read_write': {'key': 'properties.diskIOPSReadWrite', 'type': 'long'},
'disk_m_bps_read_write': {'key': 'properties.diskMBpsReadWrite', 'type': 'int'},
'disk_state': {'key': 'properties.diskState', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
sku: Optional["DiskSku"] = None,
zones: Optional[List[str]] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
hyper_v_generation: Optional[Union[str, "HyperVGeneration"]] = None,
creation_data: Optional["CreationData"] = None,
disk_size_gb: Optional[int] = None,
encryption_settings_collection: Optional["EncryptionSettingsCollection"] = None,
disk_iops_read_write: Optional[int] = None,
disk_m_bps_read_write: Optional[int] = None,
**kwargs
):
super(Disk, self).__init__(location=location, tags=tags, **kwargs)
self.managed_by = None
self.sku = sku
self.zones = zones
self.time_created = None
self.os_type = os_type
self.hyper_v_generation = hyper_v_generation
self.creation_data = creation_data
self.disk_size_gb = disk_size_gb
self.disk_size_bytes = None
self.unique_id = None
self.encryption_settings_collection = encryption_settings_collection
self.provisioning_state = None
self.disk_iops_read_write = disk_iops_read_write
self.disk_m_bps_read_write = disk_m_bps_read_write
self.disk_state = None
class DiskEncryptionSettings(msrest.serialization.Model):
_attribute_map = {
'disk_encryption_key': {'key': 'diskEncryptionKey', 'type': 'KeyVaultSecretReference'},
'key_encryption_key': {'key': 'keyEncryptionKey', 'type': 'KeyVaultKeyReference'},
'enabled': {'key': 'enabled', 'type': 'bool'},
}
def __init__(
self,
*,
disk_encryption_key: Optional["KeyVaultSecretReference"] = None,
key_encryption_key: Optional["KeyVaultKeyReference"] = None,
enabled: Optional[bool] = None,
**kwargs
):
super(DiskEncryptionSettings, self).__init__(**kwargs)
self.disk_encryption_key = disk_encryption_key
self.key_encryption_key = key_encryption_key
self.enabled = enabled
class DiskInstanceView(msrest.serialization.Model):
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'encryption_settings': {'key': 'encryptionSettings', 'type': '[DiskEncryptionSettings]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
name: Optional[str] = None,
encryption_settings: Optional[List["DiskEncryptionSettings"]] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
super(DiskInstanceView, self).__init__(**kwargs)
self.name = name
self.encryption_settings = encryption_settings
self.statuses = statuses
class DiskList(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Disk]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Disk"],
next_link: Optional[str] = None,
**kwargs
):
super(DiskList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DiskSku(msrest.serialization.Model):
_validation = {
'tier': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[Union[str, "DiskStorageAccountTypes"]] = None,
**kwargs
):
super(DiskSku, self).__init__(**kwargs)
self.name = name
self.tier = None
class DiskUpdate(msrest.serialization.Model):
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'DiskSku'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'},
'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'},
'disk_iops_read_write': {'key': 'properties.diskIOPSReadWrite', 'type': 'long'},
'disk_m_bps_read_write': {'key': 'properties.diskMBpsReadWrite', 'type': 'int'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
sku: Optional["DiskSku"] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
disk_size_gb: Optional[int] = None,
encryption_settings_collection: Optional["EncryptionSettingsCollection"] = None,
disk_iops_read_write: Optional[int] = None,
disk_m_bps_read_write: Optional[int] = None,
**kwargs
):
super(DiskUpdate, self).__init__(**kwargs)
self.tags = tags
self.sku = sku
self.os_type = os_type
self.disk_size_gb = disk_size_gb
self.encryption_settings_collection = encryption_settings_collection
self.disk_iops_read_write = disk_iops_read_write
self.disk_m_bps_read_write = disk_m_bps_read_write
class EncryptionSettingsCollection(msrest.serialization.Model):
_validation = {
'enabled': {'required': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'encryption_settings': {'key': 'encryptionSettings', 'type': '[EncryptionSettingsElement]'},
'encryption_settings_version': {'key': 'encryptionSettingsVersion', 'type': 'str'},
}
def __init__(
self,
*,
enabled: bool,
encryption_settings: Optional[List["EncryptionSettingsElement"]] = None,
encryption_settings_version: Optional[str] = None,
**kwargs
):
super(EncryptionSettingsCollection, self).__init__(**kwargs)
self.enabled = enabled
self.encryption_settings = encryption_settings
self.encryption_settings_version = encryption_settings_version
class EncryptionSettingsElement(msrest.serialization.Model):
_attribute_map = {
'disk_encryption_key': {'key': 'diskEncryptionKey', 'type': 'KeyVaultAndSecretReference'},
'key_encryption_key': {'key': 'keyEncryptionKey', 'type': 'KeyVaultAndKeyReference'},
}
def __init__(
self,
*,
disk_encryption_key: Optional["KeyVaultAndSecretReference"] = None,
key_encryption_key: Optional["KeyVaultAndKeyReference"] = None,
**kwargs
):
super(EncryptionSettingsElement, self).__init__(**kwargs)
self.disk_encryption_key = disk_encryption_key
self.key_encryption_key = key_encryption_key
class Gallery(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'description': {'key': 'properties.description', 'type': 'str'},
'identifier': {'key': 'properties.identifier', 'type': 'GalleryIdentifier'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
identifier: Optional["GalleryIdentifier"] = None,
**kwargs
):
super(Gallery, self).__init__(location=location, tags=tags, **kwargs)
self.description = description
self.identifier = identifier
self.provisioning_state = None
class GalleryApplication(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'description': {'key': 'properties.description', 'type': 'str'},
'eula': {'key': 'properties.eula', 'type': 'str'},
'privacy_statement_uri': {'key': 'properties.privacyStatementUri', 'type': 'str'},
'release_note_uri': {'key': 'properties.releaseNoteUri', 'type': 'str'},
'end_of_life_date': {'key': 'properties.endOfLifeDate', 'type': 'iso-8601'},
'supported_os_type': {'key': 'properties.supportedOSType', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
eula: Optional[str] = None,
privacy_statement_uri: Optional[str] = None,
release_note_uri: Optional[str] = None,
end_of_life_date: Optional[datetime.datetime] = None,
supported_os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
**kwargs
):
super(GalleryApplication, self).__init__(location=location, tags=tags, **kwargs)
self.description = description
self.eula = eula
self.privacy_statement_uri = privacy_statement_uri
self.release_note_uri = release_note_uri
self.end_of_life_date = end_of_life_date
self.supported_os_type = supported_os_type
class GalleryApplicationList(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[GalleryApplication]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["GalleryApplication"],
next_link: Optional[str] = None,
**kwargs
):
super(GalleryApplicationList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class GalleryApplicationVersion(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'replication_status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'publishing_profile': {'key': 'properties.publishingProfile', 'type': 'GalleryApplicationVersionPublishingProfile'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'replication_status': {'key': 'properties.replicationStatus', 'type': 'ReplicationStatus'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
publishing_profile: Optional["GalleryApplicationVersionPublishingProfile"] = None,
**kwargs
):
super(GalleryApplicationVersion, self).__init__(location=location, tags=tags, **kwargs)
self.publishing_profile = publishing_profile
self.provisioning_state = None
self.replication_status = None
class GalleryApplicationVersionList(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[GalleryApplicationVersion]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["GalleryApplicationVersion"],
next_link: Optional[str] = None,
**kwargs
):
super(GalleryApplicationVersionList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class GalleryArtifactPublishingProfileBase(msrest.serialization.Model):
_validation = {
'published_date': {'readonly': True},
}
_attribute_map = {
'target_regions': {'key': 'targetRegions', 'type': '[TargetRegion]'},
'replica_count': {'key': 'replicaCount', 'type': 'int'},
'exclude_from_latest': {'key': 'excludeFromLatest', 'type': 'bool'},
'published_date': {'key': 'publishedDate', 'type': 'iso-8601'},
'end_of_life_date': {'key': 'endOfLifeDate', 'type': 'iso-8601'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
}
def __init__(
self,
*,
target_regions: Optional[List["TargetRegion"]] = None,
replica_count: Optional[int] = None,
exclude_from_latest: Optional[bool] = None,
end_of_life_date: Optional[datetime.datetime] = None,
storage_account_type: Optional[Union[str, "StorageAccountType"]] = None,
**kwargs
):
super(GalleryArtifactPublishingProfileBase, self).__init__(**kwargs)
self.target_regions = target_regions
self.replica_count = replica_count
self.exclude_from_latest = exclude_from_latest
self.published_date = None
self.end_of_life_date = end_of_life_date
self.storage_account_type = storage_account_type
class GalleryApplicationVersionPublishingProfile(GalleryArtifactPublishingProfileBase):
_validation = {
'published_date': {'readonly': True},
'source': {'required': True},
}
_attribute_map = {
'target_regions': {'key': 'targetRegions', 'type': '[TargetRegion]'},
'replica_count': {'key': 'replicaCount', 'type': 'int'},
'exclude_from_latest': {'key': 'excludeFromLatest', 'type': 'bool'},
'published_date': {'key': 'publishedDate', 'type': 'iso-8601'},
'end_of_life_date': {'key': 'endOfLifeDate', 'type': 'iso-8601'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
'source': {'key': 'source', 'type': 'UserArtifactSource'},
'manage_actions': {'key': 'manageActions', 'type': 'UserArtifactManage'},
'enable_health_check': {'key': 'enableHealthCheck', 'type': 'bool'},
}
def __init__(
self,
*,
source: "UserArtifactSource",
target_regions: Optional[List["TargetRegion"]] = None,
replica_count: Optional[int] = None,
exclude_from_latest: Optional[bool] = None,
end_of_life_date: Optional[datetime.datetime] = None,
storage_account_type: Optional[Union[str, "StorageAccountType"]] = None,
manage_actions: Optional["UserArtifactManage"] = None,
enable_health_check: Optional[bool] = None,
**kwargs
):
super(GalleryApplicationVersionPublishingProfile, self).__init__(target_regions=target_regions, replica_count=replica_count, exclude_from_latest=exclude_from_latest, end_of_life_date=end_of_life_date, storage_account_type=storage_account_type, **kwargs)
self.source = source
self.manage_actions = manage_actions
self.enable_health_check = enable_health_check
class GalleryArtifactSource(msrest.serialization.Model):
_validation = {
'managed_image': {'required': True},
}
_attribute_map = {
'managed_image': {'key': 'managedImage', 'type': 'ManagedArtifact'},
}
def __init__(
self,
*,
managed_image: "ManagedArtifact",
**kwargs
):
super(GalleryArtifactSource, self).__init__(**kwargs)
self.managed_image = managed_image
class GalleryDiskImage(msrest.serialization.Model):
_validation = {
'size_in_gb': {'readonly': True},
'host_caching': {'readonly': True},
}
_attribute_map = {
'size_in_gb': {'key': 'sizeInGB', 'type': 'int'},
'host_caching': {'key': 'hostCaching', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(GalleryDiskImage, self).__init__(**kwargs)
self.size_in_gb = None
self.host_caching = None
class GalleryDataDiskImage(GalleryDiskImage):
_validation = {
'size_in_gb': {'readonly': True},
'host_caching': {'readonly': True},
'lun': {'readonly': True},
}
_attribute_map = {
'size_in_gb': {'key': 'sizeInGB', 'type': 'int'},
'host_caching': {'key': 'hostCaching', 'type': 'str'},
'lun': {'key': 'lun', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(GalleryDataDiskImage, self).__init__(**kwargs)
self.lun = None
class GalleryIdentifier(msrest.serialization.Model):
_validation = {
'unique_name': {'readonly': True},
}
_attribute_map = {
'unique_name': {'key': 'uniqueName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(GalleryIdentifier, self).__init__(**kwargs)
self.unique_name = None
class GalleryImage(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'description': {'key': 'properties.description', 'type': 'str'},
'eula': {'key': 'properties.eula', 'type': 'str'},
'privacy_statement_uri': {'key': 'properties.privacyStatementUri', 'type': 'str'},
'release_note_uri': {'key': 'properties.releaseNoteUri', 'type': 'str'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'os_state': {'key': 'properties.osState', 'type': 'str'},
'end_of_life_date': {'key': 'properties.endOfLifeDate', 'type': 'iso-8601'},
'identifier': {'key': 'properties.identifier', 'type': 'GalleryImageIdentifier'},
'recommended': {'key': 'properties.recommended', 'type': 'RecommendedMachineConfiguration'},
'disallowed': {'key': 'properties.disallowed', 'type': 'Disallowed'},
'purchase_plan': {'key': 'properties.purchasePlan', 'type': 'ImagePurchasePlan'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
eula: Optional[str] = None,
privacy_statement_uri: Optional[str] = None,
release_note_uri: Optional[str] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
os_state: Optional[Union[str, "OperatingSystemStateTypes"]] = None,
end_of_life_date: Optional[datetime.datetime] = None,
identifier: Optional["GalleryImageIdentifier"] = None,
recommended: Optional["RecommendedMachineConfiguration"] = None,
disallowed: Optional["Disallowed"] = None,
purchase_plan: Optional["ImagePurchasePlan"] = None,
**kwargs
):
super(GalleryImage, self).__init__(location=location, tags=tags, **kwargs)
self.description = description
self.eula = eula
self.privacy_statement_uri = privacy_statement_uri
self.release_note_uri = release_note_uri
self.os_type = os_type
self.os_state = os_state
self.end_of_life_date = end_of_life_date
self.identifier = identifier
self.recommended = recommended
self.disallowed = disallowed
self.purchase_plan = purchase_plan
self.provisioning_state = None
class GalleryImageIdentifier(msrest.serialization.Model):
_validation = {
'publisher': {'required': True},
'offer': {'required': True},
'sku': {'required': True},
}
_attribute_map = {
'publisher': {'key': 'publisher', 'type': 'str'},
'offer': {'key': 'offer', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'str'},
}
def __init__(
self,
*,
publisher: str,
offer: str,
sku: str,
**kwargs
):
super(GalleryImageIdentifier, self).__init__(**kwargs)
self.publisher = publisher
self.offer = offer
self.sku = sku
class GalleryImageList(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[GalleryImage]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["GalleryImage"],
next_link: Optional[str] = None,
**kwargs
):
super(GalleryImageList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class GalleryImageVersion(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'storage_profile': {'readonly': True},
'replication_status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'publishing_profile': {'key': 'properties.publishingProfile', 'type': 'GalleryImageVersionPublishingProfile'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'GalleryImageVersionStorageProfile'},
'replication_status': {'key': 'properties.replicationStatus', 'type': 'ReplicationStatus'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
publishing_profile: Optional["GalleryImageVersionPublishingProfile"] = None,
**kwargs
):
super(GalleryImageVersion, self).__init__(location=location, tags=tags, **kwargs)
self.publishing_profile = publishing_profile
self.provisioning_state = None
self.storage_profile = None
self.replication_status = None
class GalleryImageVersionList(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[GalleryImageVersion]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["GalleryImageVersion"],
next_link: Optional[str] = None,
**kwargs
):
super(GalleryImageVersionList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class GalleryImageVersionPublishingProfile(GalleryArtifactPublishingProfileBase):
_validation = {
'published_date': {'readonly': True},
'source': {'required': True},
}
_attribute_map = {
'target_regions': {'key': 'targetRegions', 'type': '[TargetRegion]'},
'replica_count': {'key': 'replicaCount', 'type': 'int'},
'exclude_from_latest': {'key': 'excludeFromLatest', 'type': 'bool'},
'published_date': {'key': 'publishedDate', 'type': 'iso-8601'},
'end_of_life_date': {'key': 'endOfLifeDate', 'type': 'iso-8601'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
'source': {'key': 'source', 'type': 'GalleryArtifactSource'},
}
def __init__(
self,
*,
source: "GalleryArtifactSource",
target_regions: Optional[List["TargetRegion"]] = None,
replica_count: Optional[int] = None,
exclude_from_latest: Optional[bool] = None,
end_of_life_date: Optional[datetime.datetime] = None,
storage_account_type: Optional[Union[str, "StorageAccountType"]] = None,
**kwargs
):
super(GalleryImageVersionPublishingProfile, self).__init__(target_regions=target_regions, replica_count=replica_count, exclude_from_latest=exclude_from_latest, end_of_life_date=end_of_life_date, storage_account_type=storage_account_type, **kwargs)
self.source = source
class GalleryImageVersionStorageProfile(msrest.serialization.Model):
_validation = {
'os_disk_image': {'readonly': True},
'data_disk_images': {'readonly': True},
}
_attribute_map = {
'os_disk_image': {'key': 'osDiskImage', 'type': 'GalleryDiskImage'},
'data_disk_images': {'key': 'dataDiskImages', 'type': '[GalleryDataDiskImage]'},
}
def __init__(
self,
**kwargs
):
super(GalleryImageVersionStorageProfile, self).__init__(**kwargs)
self.os_disk_image = None
self.data_disk_images = None
class GalleryList(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Gallery]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Gallery"],
next_link: Optional[str] = None,
**kwargs
):
super(GalleryList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class GalleryOSDiskImage(GalleryDiskImage):
_validation = {
'size_in_gb': {'readonly': True},
'host_caching': {'readonly': True},
}
_attribute_map = {
'size_in_gb': {'key': 'sizeInGB', 'type': 'int'},
'host_caching': {'key': 'hostCaching', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(GalleryOSDiskImage, self).__init__(**kwargs)
class GrantAccessData(msrest.serialization.Model):
_validation = {
'access': {'required': True},
'duration_in_seconds': {'required': True},
}
_attribute_map = {
'access': {'key': 'access', 'type': 'str'},
'duration_in_seconds': {'key': 'durationInSeconds', 'type': 'int'},
}
def __init__(
self,
*,
access: Union[str, "AccessLevel"],
duration_in_seconds: int,
**kwargs
):
super(GrantAccessData, self).__init__(**kwargs)
self.access = access
self.duration_in_seconds = duration_in_seconds
class HardwareProfile(msrest.serialization.Model):
_attribute_map = {
'vm_size': {'key': 'vmSize', 'type': 'str'},
}
def __init__(
self,
*,
vm_size: Optional[Union[str, "VirtualMachineSizeTypes"]] = None,
**kwargs
):
super(HardwareProfile, self).__init__(**kwargs)
self.vm_size = vm_size
class Image(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'source_virtual_machine': {'key': 'properties.sourceVirtualMachine', 'type': 'SubResource'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'ImageStorageProfile'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'hyper_v_generation': {'key': 'properties.hyperVGeneration', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
source_virtual_machine: Optional["SubResource"] = None,
storage_profile: Optional["ImageStorageProfile"] = None,
hyper_v_generation: Optional[Union[str, "HyperVGenerationTypes"]] = None,
**kwargs
):
super(Image, self).__init__(location=location, tags=tags, **kwargs)
self.source_virtual_machine = source_virtual_machine
self.storage_profile = storage_profile
self.provisioning_state = None
self.hyper_v_generation = hyper_v_generation
class ImageDataDisk(msrest.serialization.Model):
_validation = {
'lun': {'required': True},
}
_attribute_map = {
'lun': {'key': 'lun', 'type': 'int'},
'snapshot': {'key': 'snapshot', 'type': 'SubResource'},
'managed_disk': {'key': 'managedDisk', 'type': 'SubResource'},
'blob_uri': {'key': 'blobUri', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
}
def __init__(
self,
*,
lun: int,
snapshot: Optional["SubResource"] = None,
managed_disk: Optional["SubResource"] = None,
blob_uri: Optional[str] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
disk_size_gb: Optional[int] = None,
storage_account_type: Optional[Union[str, "StorageAccountTypes"]] = None,
**kwargs
):
super(ImageDataDisk, self).__init__(**kwargs)
self.lun = lun
self.snapshot = snapshot
self.managed_disk = managed_disk
self.blob_uri = blob_uri
self.caching = caching
self.disk_size_gb = disk_size_gb
self.storage_account_type = storage_account_type
class ImageDiskReference(msrest.serialization.Model):
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'lun': {'key': 'lun', 'type': 'int'},
}
def __init__(
self,
*,
id: str,
lun: Optional[int] = None,
**kwargs
):
super(ImageDiskReference, self).__init__(**kwargs)
self.id = id
self.lun = lun
class ImageListResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Image]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Image"],
next_link: Optional[str] = None,
**kwargs
):
super(ImageListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ImageOSDisk(msrest.serialization.Model):
_validation = {
'os_type': {'required': True},
'os_state': {'required': True},
}
_attribute_map = {
'os_type': {'key': 'osType', 'type': 'str'},
'os_state': {'key': 'osState', 'type': 'str'},
'snapshot': {'key': 'snapshot', 'type': 'SubResource'},
'managed_disk': {'key': 'managedDisk', 'type': 'SubResource'},
'blob_uri': {'key': 'blobUri', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
}
def __init__(
self,
*,
os_type: Union[str, "OperatingSystemTypes"],
os_state: Union[str, "OperatingSystemStateTypes"],
snapshot: Optional["SubResource"] = None,
managed_disk: Optional["SubResource"] = None,
blob_uri: Optional[str] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
disk_size_gb: Optional[int] = None,
storage_account_type: Optional[Union[str, "StorageAccountTypes"]] = None,
**kwargs
):
super(ImageOSDisk, self).__init__(**kwargs)
self.os_type = os_type
self.os_state = os_state
self.snapshot = snapshot
self.managed_disk = managed_disk
self.blob_uri = blob_uri
self.caching = caching
self.disk_size_gb = disk_size_gb
self.storage_account_type = storage_account_type
class ImagePurchasePlan(msrest.serialization.Model):
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'publisher': {'key': 'publisher', 'type': 'str'},
'product': {'key': 'product', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
publisher: Optional[str] = None,
product: Optional[str] = None,
**kwargs
):
super(ImagePurchasePlan, self).__init__(**kwargs)
self.name = name
self.publisher = publisher
self.product = product
class SubResource(msrest.serialization.Model):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(SubResource, self).__init__(**kwargs)
self.id = id
class ImageReference(SubResource):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'publisher': {'key': 'publisher', 'type': 'str'},
'offer': {'key': 'offer', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
publisher: Optional[str] = None,
offer: Optional[str] = None,
sku: Optional[str] = None,
version: Optional[str] = None,
**kwargs
):
super(ImageReference, self).__init__(id=id, **kwargs)
self.publisher = publisher
self.offer = offer
self.sku = sku
self.version = version
class ImageStorageProfile(msrest.serialization.Model):
_attribute_map = {
'os_disk': {'key': 'osDisk', 'type': 'ImageOSDisk'},
'data_disks': {'key': 'dataDisks', 'type': '[ImageDataDisk]'},
'zone_resilient': {'key': 'zoneResilient', 'type': 'bool'},
}
def __init__(
self,
*,
os_disk: Optional["ImageOSDisk"] = None,
data_disks: Optional[List["ImageDataDisk"]] = None,
zone_resilient: Optional[bool] = None,
**kwargs
):
super(ImageStorageProfile, self).__init__(**kwargs)
self.os_disk = os_disk
self.data_disks = data_disks
self.zone_resilient = zone_resilient
class ImageUpdate(UpdateResource):
_validation = {
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'source_virtual_machine': {'key': 'properties.sourceVirtualMachine', 'type': 'SubResource'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'ImageStorageProfile'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'hyper_v_generation': {'key': 'properties.hyperVGeneration', 'type': 'str'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
source_virtual_machine: Optional["SubResource"] = None,
storage_profile: Optional["ImageStorageProfile"] = None,
hyper_v_generation: Optional[Union[str, "HyperVGenerationTypes"]] = None,
**kwargs
):
super(ImageUpdate, self).__init__(tags=tags, **kwargs)
self.source_virtual_machine = source_virtual_machine
self.storage_profile = storage_profile
self.provisioning_state = None
self.hyper_v_generation = hyper_v_generation
class InnerError(msrest.serialization.Model):
_attribute_map = {
'exceptiontype': {'key': 'exceptiontype', 'type': 'str'},
'errordetail': {'key': 'errordetail', 'type': 'str'},
}
def __init__(
self,
*,
exceptiontype: Optional[str] = None,
errordetail: Optional[str] = None,
**kwargs
):
super(InnerError, self).__init__(**kwargs)
self.exceptiontype = exceptiontype
self.errordetail = errordetail
class InstanceViewStatus(msrest.serialization.Model):
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'level': {'key': 'level', 'type': 'str'},
'display_status': {'key': 'displayStatus', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'time': {'key': 'time', 'type': 'iso-8601'},
}
def __init__(
self,
*,
code: Optional[str] = None,
level: Optional[Union[str, "StatusLevelTypes"]] = None,
display_status: Optional[str] = None,
message: Optional[str] = None,
time: Optional[datetime.datetime] = None,
**kwargs
):
super(InstanceViewStatus, self).__init__(**kwargs)
self.code = code
self.level = level
self.display_status = display_status
self.message = message
self.time = time
class KeyVaultAndKeyReference(msrest.serialization.Model):
_validation = {
'source_vault': {'required': True},
'key_url': {'required': True},
}
_attribute_map = {
'source_vault': {'key': 'sourceVault', 'type': 'SourceVault'},
'key_url': {'key': 'keyUrl', 'type': 'str'},
}
def __init__(
self,
*,
source_vault: "SourceVault",
key_url: str,
**kwargs
):
super(KeyVaultAndKeyReference, self).__init__(**kwargs)
self.source_vault = source_vault
self.key_url = key_url
class KeyVaultAndSecretReference(msrest.serialization.Model):
_validation = {
'source_vault': {'required': True},
'secret_url': {'required': True},
}
_attribute_map = {
'source_vault': {'key': 'sourceVault', 'type': 'SourceVault'},
'secret_url': {'key': 'secretUrl', 'type': 'str'},
}
def __init__(
self,
*,
source_vault: "SourceVault",
secret_url: str,
**kwargs
):
super(KeyVaultAndSecretReference, self).__init__(**kwargs)
self.source_vault = source_vault
self.secret_url = secret_url
class KeyVaultKeyReference(msrest.serialization.Model):
_validation = {
'key_url': {'required': True},
'source_vault': {'required': True},
}
_attribute_map = {
'key_url': {'key': 'keyUrl', 'type': 'str'},
'source_vault': {'key': 'sourceVault', 'type': 'SubResource'},
}
def __init__(
self,
*,
key_url: str,
source_vault: "SubResource",
**kwargs
):
super(KeyVaultKeyReference, self).__init__(**kwargs)
self.key_url = key_url
self.source_vault = source_vault
class KeyVaultSecretReference(msrest.serialization.Model):
_validation = {
'secret_url': {'required': True},
'source_vault': {'required': True},
}
_attribute_map = {
'secret_url': {'key': 'secretUrl', 'type': 'str'},
'source_vault': {'key': 'sourceVault', 'type': 'SubResource'},
}
def __init__(
self,
*,
secret_url: str,
source_vault: "SubResource",
**kwargs
):
super(KeyVaultSecretReference, self).__init__(**kwargs)
self.secret_url = secret_url
self.source_vault = source_vault
class LinuxConfiguration(msrest.serialization.Model):
_attribute_map = {
'disable_password_authentication': {'key': 'disablePasswordAuthentication', 'type': 'bool'},
'ssh': {'key': 'ssh', 'type': 'SshConfiguration'},
'provision_vm_agent': {'key': 'provisionVMAgent', 'type': 'bool'},
}
def __init__(
self,
*,
disable_password_authentication: Optional[bool] = None,
ssh: Optional["SshConfiguration"] = None,
provision_vm_agent: Optional[bool] = None,
**kwargs
):
super(LinuxConfiguration, self).__init__(**kwargs)
self.disable_password_authentication = disable_password_authentication
self.ssh = ssh
self.provision_vm_agent = provision_vm_agent
class ListUsagesResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Usage]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Usage"],
next_link: Optional[str] = None,
**kwargs
):
super(ListUsagesResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class LogAnalyticsInputBase(msrest.serialization.Model):
_validation = {
'blob_container_sas_uri': {'required': True},
'from_time': {'required': True},
'to_time': {'required': True},
}
_attribute_map = {
'blob_container_sas_uri': {'key': 'blobContainerSasUri', 'type': 'str'},
'from_time': {'key': 'fromTime', 'type': 'iso-8601'},
'to_time': {'key': 'toTime', 'type': 'iso-8601'},
'group_by_throttle_policy': {'key': 'groupByThrottlePolicy', 'type': 'bool'},
'group_by_operation_name': {'key': 'groupByOperationName', 'type': 'bool'},
'group_by_resource_name': {'key': 'groupByResourceName', 'type': 'bool'},
}
def __init__(
self,
*,
blob_container_sas_uri: str,
from_time: datetime.datetime,
to_time: datetime.datetime,
group_by_throttle_policy: Optional[bool] = None,
group_by_operation_name: Optional[bool] = None,
group_by_resource_name: Optional[bool] = None,
**kwargs
):
super(LogAnalyticsInputBase, self).__init__(**kwargs)
self.blob_container_sas_uri = blob_container_sas_uri
self.from_time = from_time
self.to_time = to_time
self.group_by_throttle_policy = group_by_throttle_policy
self.group_by_operation_name = group_by_operation_name
self.group_by_resource_name = group_by_resource_name
class LogAnalyticsOperationResult(msrest.serialization.Model):
_validation = {
'properties': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': 'LogAnalyticsOutput'},
}
def __init__(
self,
**kwargs
):
super(LogAnalyticsOperationResult, self).__init__(**kwargs)
self.properties = None
class LogAnalyticsOutput(msrest.serialization.Model):
_validation = {
'output': {'readonly': True},
}
_attribute_map = {
'output': {'key': 'output', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LogAnalyticsOutput, self).__init__(**kwargs)
self.output = None
class MaintenanceRedeployStatus(msrest.serialization.Model):
_attribute_map = {
'is_customer_initiated_maintenance_allowed': {'key': 'isCustomerInitiatedMaintenanceAllowed', 'type': 'bool'},
'pre_maintenance_window_start_time': {'key': 'preMaintenanceWindowStartTime', 'type': 'iso-8601'},
'pre_maintenance_window_end_time': {'key': 'preMaintenanceWindowEndTime', 'type': 'iso-8601'},
'maintenance_window_start_time': {'key': 'maintenanceWindowStartTime', 'type': 'iso-8601'},
'maintenance_window_end_time': {'key': 'maintenanceWindowEndTime', 'type': 'iso-8601'},
'last_operation_result_code': {'key': 'lastOperationResultCode', 'type': 'str'},
'last_operation_message': {'key': 'lastOperationMessage', 'type': 'str'},
}
def __init__(
self,
*,
is_customer_initiated_maintenance_allowed: Optional[bool] = None,
pre_maintenance_window_start_time: Optional[datetime.datetime] = None,
pre_maintenance_window_end_time: Optional[datetime.datetime] = None,
maintenance_window_start_time: Optional[datetime.datetime] = None,
maintenance_window_end_time: Optional[datetime.datetime] = None,
last_operation_result_code: Optional[Union[str, "MaintenanceOperationResultCodeTypes"]] = None,
last_operation_message: Optional[str] = None,
**kwargs
):
super(MaintenanceRedeployStatus, self).__init__(**kwargs)
self.is_customer_initiated_maintenance_allowed = is_customer_initiated_maintenance_allowed
self.pre_maintenance_window_start_time = pre_maintenance_window_start_time
self.pre_maintenance_window_end_time = pre_maintenance_window_end_time
self.maintenance_window_start_time = maintenance_window_start_time
self.maintenance_window_end_time = maintenance_window_end_time
self.last_operation_result_code = last_operation_result_code
self.last_operation_message = last_operation_message
class ManagedArtifact(msrest.serialization.Model):
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: str,
**kwargs
):
super(ManagedArtifact, self).__init__(**kwargs)
self.id = id
class ManagedDiskParameters(SubResource):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
storage_account_type: Optional[Union[str, "StorageAccountTypes"]] = None,
**kwargs
):
super(ManagedDiskParameters, self).__init__(id=id, **kwargs)
self.storage_account_type = storage_account_type
class NetworkInterfaceReference(SubResource):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
}
def __init__(
self,
*,
id: Optional[str] = None,
primary: Optional[bool] = None,
**kwargs
):
super(NetworkInterfaceReference, self).__init__(id=id, **kwargs)
self.primary = primary
class NetworkProfile(msrest.serialization.Model):
_attribute_map = {
'network_interfaces': {'key': 'networkInterfaces', 'type': '[NetworkInterfaceReference]'},
}
def __init__(
self,
*,
network_interfaces: Optional[List["NetworkInterfaceReference"]] = None,
**kwargs
):
super(NetworkProfile, self).__init__(**kwargs)
self.network_interfaces = network_interfaces
class OSDisk(msrest.serialization.Model):
_validation = {
'create_option': {'required': True},
}
_attribute_map = {
'os_type': {'key': 'osType', 'type': 'str'},
'encryption_settings': {'key': 'encryptionSettings', 'type': 'DiskEncryptionSettings'},
'name': {'key': 'name', 'type': 'str'},
'vhd': {'key': 'vhd', 'type': 'VirtualHardDisk'},
'image': {'key': 'image', 'type': 'VirtualHardDisk'},
'caching': {'key': 'caching', 'type': 'str'},
'write_accelerator_enabled': {'key': 'writeAcceleratorEnabled', 'type': 'bool'},
'diff_disk_settings': {'key': 'diffDiskSettings', 'type': 'DiffDiskSettings'},
'create_option': {'key': 'createOption', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'managed_disk': {'key': 'managedDisk', 'type': 'ManagedDiskParameters'},
}
def __init__(
self,
*,
create_option: Union[str, "DiskCreateOptionTypes"],
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
encryption_settings: Optional["DiskEncryptionSettings"] = None,
name: Optional[str] = None,
vhd: Optional["VirtualHardDisk"] = None,
image: Optional["VirtualHardDisk"] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
write_accelerator_enabled: Optional[bool] = None,
diff_disk_settings: Optional["DiffDiskSettings"] = None,
disk_size_gb: Optional[int] = None,
managed_disk: Optional["ManagedDiskParameters"] = None,
**kwargs
):
super(OSDisk, self).__init__(**kwargs)
self.os_type = os_type
self.encryption_settings = encryption_settings
self.name = name
self.vhd = vhd
self.image = image
self.caching = caching
self.write_accelerator_enabled = write_accelerator_enabled
self.diff_disk_settings = diff_disk_settings
self.create_option = create_option
self.disk_size_gb = disk_size_gb
self.managed_disk = managed_disk
class OSDiskImage(msrest.serialization.Model):
_validation = {
'operating_system': {'required': True},
}
_attribute_map = {
'operating_system': {'key': 'operatingSystem', 'type': 'str'},
}
def __init__(
self,
*,
operating_system: Union[str, "OperatingSystemTypes"],
**kwargs
):
super(OSDiskImage, self).__init__(**kwargs)
self.operating_system = operating_system
class OSProfile(msrest.serialization.Model):
_attribute_map = {
'computer_name': {'key': 'computerName', 'type': 'str'},
'admin_username': {'key': 'adminUsername', 'type': 'str'},
'admin_password': {'key': 'adminPassword', 'type': 'str'},
'custom_data': {'key': 'customData', 'type': 'str'},
'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'},
'linux_configuration': {'key': 'linuxConfiguration', 'type': 'LinuxConfiguration'},
'secrets': {'key': 'secrets', 'type': '[VaultSecretGroup]'},
'allow_extension_operations': {'key': 'allowExtensionOperations', 'type': 'bool'},
'require_guest_provision_signal': {'key': 'requireGuestProvisionSignal', 'type': 'bool'},
}
def __init__(
self,
*,
computer_name: Optional[str] = None,
admin_username: Optional[str] = None,
admin_password: Optional[str] = None,
custom_data: Optional[str] = None,
windows_configuration: Optional["WindowsConfiguration"] = None,
linux_configuration: Optional["LinuxConfiguration"] = None,
secrets: Optional[List["VaultSecretGroup"]] = None,
allow_extension_operations: Optional[bool] = None,
require_guest_provision_signal: Optional[bool] = None,
**kwargs
):
super(OSProfile, self).__init__(**kwargs)
self.computer_name = computer_name
self.admin_username = admin_username
self.admin_password = admin_password
self.custom_data = custom_data
self.windows_configuration = windows_configuration
self.linux_configuration = linux_configuration
self.secrets = secrets
self.allow_extension_operations = allow_extension_operations
self.require_guest_provision_signal = require_guest_provision_signal
class Plan(msrest.serialization.Model):
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'publisher': {'key': 'publisher', 'type': 'str'},
'product': {'key': 'product', 'type': 'str'},
'promotion_code': {'key': 'promotionCode', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
publisher: Optional[str] = None,
product: Optional[str] = None,
promotion_code: Optional[str] = None,
**kwargs
):
super(Plan, self).__init__(**kwargs)
self.name = name
self.publisher = publisher
self.product = product
self.promotion_code = promotion_code
class ProximityPlacementGroup(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'virtual_machines': {'readonly': True},
'virtual_machine_scale_sets': {'readonly': True},
'availability_sets': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'proximity_placement_group_type': {'key': 'properties.proximityPlacementGroupType', 'type': 'str'},
'virtual_machines': {'key': 'properties.virtualMachines', 'type': '[SubResource]'},
'virtual_machine_scale_sets': {'key': 'properties.virtualMachineScaleSets', 'type': '[SubResource]'},
'availability_sets': {'key': 'properties.availabilitySets', 'type': '[SubResource]'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
proximity_placement_group_type: Optional[Union[str, "ProximityPlacementGroupType"]] = None,
**kwargs
):
super(ProximityPlacementGroup, self).__init__(location=location, tags=tags, **kwargs)
self.proximity_placement_group_type = proximity_placement_group_type
self.virtual_machines = None
self.virtual_machine_scale_sets = None
self.availability_sets = None
class ProximityPlacementGroupListResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ProximityPlacementGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["ProximityPlacementGroup"],
next_link: Optional[str] = None,
**kwargs
):
super(ProximityPlacementGroupListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ProximityPlacementGroupUpdate(UpdateResource):
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(ProximityPlacementGroupUpdate, self).__init__(tags=tags, **kwargs)
class PurchasePlan(msrest.serialization.Model):
_validation = {
'publisher': {'required': True},
'name': {'required': True},
'product': {'required': True},
}
_attribute_map = {
'publisher': {'key': 'publisher', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'product': {'key': 'product', 'type': 'str'},
}
def __init__(
self,
*,
publisher: str,
name: str,
product: str,
**kwargs
):
super(PurchasePlan, self).__init__(**kwargs)
self.publisher = publisher
self.name = name
self.product = product
class RecommendedMachineConfiguration(msrest.serialization.Model):
_attribute_map = {
'v_cp_us': {'key': 'vCPUs', 'type': 'ResourceRange'},
'memory': {'key': 'memory', 'type': 'ResourceRange'},
}
def __init__(
self,
*,
v_cp_us: Optional["ResourceRange"] = None,
memory: Optional["ResourceRange"] = None,
**kwargs
):
super(RecommendedMachineConfiguration, self).__init__(**kwargs)
self.v_cp_us = v_cp_us
self.memory = memory
class RecoveryWalkResponse(msrest.serialization.Model):
_validation = {
'walk_performed': {'readonly': True},
'next_platform_update_domain': {'readonly': True},
}
_attribute_map = {
'walk_performed': {'key': 'walkPerformed', 'type': 'bool'},
'next_platform_update_domain': {'key': 'nextPlatformUpdateDomain', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(RecoveryWalkResponse, self).__init__(**kwargs)
self.walk_performed = None
self.next_platform_update_domain = None
class RegionalReplicationStatus(msrest.serialization.Model):
_validation = {
'region': {'readonly': True},
'state': {'readonly': True},
'details': {'readonly': True},
'progress': {'readonly': True},
}
_attribute_map = {
'region': {'key': 'region', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'details': {'key': 'details', 'type': 'str'},
'progress': {'key': 'progress', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(RegionalReplicationStatus, self).__init__(**kwargs)
self.region = None
self.state = None
self.details = None
self.progress = None
class ReplicationStatus(msrest.serialization.Model):
_validation = {
'aggregated_state': {'readonly': True},
'summary': {'readonly': True},
}
_attribute_map = {
'aggregated_state': {'key': 'aggregatedState', 'type': 'str'},
'summary': {'key': 'summary', 'type': '[RegionalReplicationStatus]'},
}
def __init__(
self,
**kwargs
):
super(ReplicationStatus, self).__init__(**kwargs)
self.aggregated_state = None
self.summary = None
class RequestRateByIntervalInput(LogAnalyticsInputBase):
_validation = {
'blob_container_sas_uri': {'required': True},
'from_time': {'required': True},
'to_time': {'required': True},
'interval_length': {'required': True},
}
_attribute_map = {
'blob_container_sas_uri': {'key': 'blobContainerSasUri', 'type': 'str'},
'from_time': {'key': 'fromTime', 'type': 'iso-8601'},
'to_time': {'key': 'toTime', 'type': 'iso-8601'},
'group_by_throttle_policy': {'key': 'groupByThrottlePolicy', 'type': 'bool'},
'group_by_operation_name': {'key': 'groupByOperationName', 'type': 'bool'},
'group_by_resource_name': {'key': 'groupByResourceName', 'type': 'bool'},
'interval_length': {'key': 'intervalLength', 'type': 'str'},
}
def __init__(
self,
*,
blob_container_sas_uri: str,
from_time: datetime.datetime,
to_time: datetime.datetime,
interval_length: Union[str, "IntervalInMins"],
group_by_throttle_policy: Optional[bool] = None,
group_by_operation_name: Optional[bool] = None,
group_by_resource_name: Optional[bool] = None,
**kwargs
):
super(RequestRateByIntervalInput, self).__init__(blob_container_sas_uri=blob_container_sas_uri, from_time=from_time, to_time=to_time, group_by_throttle_policy=group_by_throttle_policy, group_by_operation_name=group_by_operation_name, group_by_resource_name=group_by_resource_name, **kwargs)
self.interval_length = interval_length
class ResourceRange(msrest.serialization.Model):
_attribute_map = {
'min': {'key': 'min', 'type': 'int'},
'max': {'key': 'max', 'type': 'int'},
}
def __init__(
self,
*,
min: Optional[int] = None,
max: Optional[int] = None,
**kwargs
):
super(ResourceRange, self).__init__(**kwargs)
self.min = min
self.max = max
class RollbackStatusInfo(msrest.serialization.Model):
_validation = {
'successfully_rolledback_instance_count': {'readonly': True},
'failed_rolledback_instance_count': {'readonly': True},
'rollback_error': {'readonly': True},
}
_attribute_map = {
'successfully_rolledback_instance_count': {'key': 'successfullyRolledbackInstanceCount', 'type': 'int'},
'failed_rolledback_instance_count': {'key': 'failedRolledbackInstanceCount', 'type': 'int'},
'rollback_error': {'key': 'rollbackError', 'type': 'ApiError'},
}
def __init__(
self,
**kwargs
):
super(RollbackStatusInfo, self).__init__(**kwargs)
self.successfully_rolledback_instance_count = None
self.failed_rolledback_instance_count = None
self.rollback_error = None
class RollingUpgradePolicy(msrest.serialization.Model):
_validation = {
'max_batch_instance_percent': {'maximum': 100, 'minimum': 5},
'max_unhealthy_instance_percent': {'maximum': 100, 'minimum': 5},
'max_unhealthy_upgraded_instance_percent': {'maximum': 100, 'minimum': 0},
}
_attribute_map = {
'max_batch_instance_percent': {'key': 'maxBatchInstancePercent', 'type': 'int'},
'max_unhealthy_instance_percent': {'key': 'maxUnhealthyInstancePercent', 'type': 'int'},
'max_unhealthy_upgraded_instance_percent': {'key': 'maxUnhealthyUpgradedInstancePercent', 'type': 'int'},
'pause_time_between_batches': {'key': 'pauseTimeBetweenBatches', 'type': 'str'},
}
def __init__(
self,
*,
max_batch_instance_percent: Optional[int] = None,
max_unhealthy_instance_percent: Optional[int] = None,
max_unhealthy_upgraded_instance_percent: Optional[int] = None,
pause_time_between_batches: Optional[str] = None,
**kwargs
):
super(RollingUpgradePolicy, self).__init__(**kwargs)
self.max_batch_instance_percent = max_batch_instance_percent
self.max_unhealthy_instance_percent = max_unhealthy_instance_percent
self.max_unhealthy_upgraded_instance_percent = max_unhealthy_upgraded_instance_percent
self.pause_time_between_batches = pause_time_between_batches
class RollingUpgradeProgressInfo(msrest.serialization.Model):
_validation = {
'successful_instance_count': {'readonly': True},
'failed_instance_count': {'readonly': True},
'in_progress_instance_count': {'readonly': True},
'pending_instance_count': {'readonly': True},
}
_attribute_map = {
'successful_instance_count': {'key': 'successfulInstanceCount', 'type': 'int'},
'failed_instance_count': {'key': 'failedInstanceCount', 'type': 'int'},
'in_progress_instance_count': {'key': 'inProgressInstanceCount', 'type': 'int'},
'pending_instance_count': {'key': 'pendingInstanceCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(RollingUpgradeProgressInfo, self).__init__(**kwargs)
self.successful_instance_count = None
self.failed_instance_count = None
self.in_progress_instance_count = None
self.pending_instance_count = None
class RollingUpgradeRunningStatus(msrest.serialization.Model):
_validation = {
'code': {'readonly': True},
'start_time': {'readonly': True},
'last_action': {'readonly': True},
'last_action_time': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'last_action': {'key': 'lastAction', 'type': 'str'},
'last_action_time': {'key': 'lastActionTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(RollingUpgradeRunningStatus, self).__init__(**kwargs)
self.code = None
self.start_time = None
self.last_action = None
self.last_action_time = None
class RollingUpgradeStatusInfo(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'policy': {'readonly': True},
'running_status': {'readonly': True},
'progress': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'policy': {'key': 'properties.policy', 'type': 'RollingUpgradePolicy'},
'running_status': {'key': 'properties.runningStatus', 'type': 'RollingUpgradeRunningStatus'},
'progress': {'key': 'properties.progress', 'type': 'RollingUpgradeProgressInfo'},
'error': {'key': 'properties.error', 'type': 'ApiError'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(RollingUpgradeStatusInfo, self).__init__(location=location, tags=tags, **kwargs)
self.policy = None
self.running_status = None
self.progress = None
self.error = None
class RunCommandDocumentBase(msrest.serialization.Model):
_validation = {
'schema': {'required': True},
'id': {'required': True},
'os_type': {'required': True},
'label': {'required': True},
'description': {'required': True},
}
_attribute_map = {
'schema': {'key': '$schema', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'os_type': {'key': 'osType', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
schema: str,
id: str,
os_type: Union[str, "OperatingSystemTypes"],
label: str,
description: str,
**kwargs
):
super(RunCommandDocumentBase, self).__init__(**kwargs)
self.schema = schema
self.id = id
self.os_type = os_type
self.label = label
self.description = description
class RunCommandDocument(RunCommandDocumentBase):
_validation = {
'schema': {'required': True},
'id': {'required': True},
'os_type': {'required': True},
'label': {'required': True},
'description': {'required': True},
'script': {'required': True},
}
_attribute_map = {
'schema': {'key': '$schema', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'os_type': {'key': 'osType', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'script': {'key': 'script', 'type': '[str]'},
'parameters': {'key': 'parameters', 'type': '[RunCommandParameterDefinition]'},
}
def __init__(
self,
*,
schema: str,
id: str,
os_type: Union[str, "OperatingSystemTypes"],
label: str,
description: str,
script: List[str],
parameters: Optional[List["RunCommandParameterDefinition"]] = None,
**kwargs
):
super(RunCommandDocument, self).__init__(schema=schema, id=id, os_type=os_type, label=label, description=description, **kwargs)
self.script = script
self.parameters = parameters
class RunCommandInput(msrest.serialization.Model):
_validation = {
'command_id': {'required': True},
}
_attribute_map = {
'command_id': {'key': 'commandId', 'type': 'str'},
'script': {'key': 'script', 'type': '[str]'},
'parameters': {'key': 'parameters', 'type': '[RunCommandInputParameter]'},
}
def __init__(
self,
*,
command_id: str,
script: Optional[List[str]] = None,
parameters: Optional[List["RunCommandInputParameter"]] = None,
**kwargs
):
super(RunCommandInput, self).__init__(**kwargs)
self.command_id = command_id
self.script = script
self.parameters = parameters
class RunCommandInputParameter(msrest.serialization.Model):
_validation = {
'name': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
value: str,
**kwargs
):
super(RunCommandInputParameter, self).__init__(**kwargs)
self.name = name
self.value = value
class RunCommandListResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[RunCommandDocumentBase]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["RunCommandDocumentBase"],
next_link: Optional[str] = None,
**kwargs
):
super(RunCommandListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class RunCommandParameterDefinition(msrest.serialization.Model):
_validation = {
'name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'default_value': {'key': 'defaultValue', 'type': 'str'},
'required': {'key': 'required', 'type': 'bool'},
}
def __init__(
self,
*,
name: str,
type: str,
default_value: Optional[str] = None,
required: Optional[bool] = False,
**kwargs
):
super(RunCommandParameterDefinition, self).__init__(**kwargs)
self.name = name
self.type = type
self.default_value = default_value
self.required = required
class RunCommandResult(msrest.serialization.Model):
_attribute_map = {
'value': {'key': 'value', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
value: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
super(RunCommandResult, self).__init__(**kwargs)
self.value = value
class ScaleInPolicy(msrest.serialization.Model):
_attribute_map = {
'rules': {'key': 'rules', 'type': '[str]'},
}
def __init__(
self,
*,
rules: Optional[List[Union[str, "VirtualMachineScaleSetScaleInRules"]]] = None,
**kwargs
):
super(ScaleInPolicy, self).__init__(**kwargs)
self.rules = rules
class ScheduledEventsProfile(msrest.serialization.Model):
_attribute_map = {
'terminate_notification_profile': {'key': 'terminateNotificationProfile', 'type': 'TerminateNotificationProfile'},
}
def __init__(
self,
*,
terminate_notification_profile: Optional["TerminateNotificationProfile"] = None,
**kwargs
):
super(ScheduledEventsProfile, self).__init__(**kwargs)
self.terminate_notification_profile = terminate_notification_profile
class Sku(msrest.serialization.Model):
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'long'},
}
def __init__(
self,
*,
name: Optional[str] = None,
tier: Optional[str] = None,
capacity: Optional[int] = None,
**kwargs
):
super(Sku, self).__init__(**kwargs)
self.name = name
self.tier = tier
self.capacity = capacity
class Snapshot(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'managed_by': {'readonly': True},
'time_created': {'readonly': True},
'disk_size_bytes': {'readonly': True},
'unique_id': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'managed_by': {'key': 'managedBy', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'SnapshotSku'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'hyper_v_generation': {'key': 'properties.hyperVGeneration', 'type': 'str'},
'creation_data': {'key': 'properties.creationData', 'type': 'CreationData'},
'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'},
'disk_size_bytes': {'key': 'properties.diskSizeBytes', 'type': 'long'},
'unique_id': {'key': 'properties.uniqueId', 'type': 'str'},
'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'incremental': {'key': 'properties.incremental', 'type': 'bool'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
sku: Optional["SnapshotSku"] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
hyper_v_generation: Optional[Union[str, "HyperVGeneration"]] = None,
creation_data: Optional["CreationData"] = None,
disk_size_gb: Optional[int] = None,
encryption_settings_collection: Optional["EncryptionSettingsCollection"] = None,
incremental: Optional[bool] = None,
**kwargs
):
super(Snapshot, self).__init__(location=location, tags=tags, **kwargs)
self.managed_by = None
self.sku = sku
self.time_created = None
self.os_type = os_type
self.hyper_v_generation = hyper_v_generation
self.creation_data = creation_data
self.disk_size_gb = disk_size_gb
self.disk_size_bytes = None
self.unique_id = None
self.encryption_settings_collection = encryption_settings_collection
self.provisioning_state = None
self.incremental = incremental
class SnapshotList(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Snapshot]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Snapshot"],
next_link: Optional[str] = None,
**kwargs
):
super(SnapshotList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class SnapshotSku(msrest.serialization.Model):
_validation = {
'tier': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[Union[str, "SnapshotStorageAccountTypes"]] = None,
**kwargs
):
super(SnapshotSku, self).__init__(**kwargs)
self.name = name
self.tier = None
class SnapshotUpdate(msrest.serialization.Model):
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'SnapshotSku'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'},
'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
sku: Optional["SnapshotSku"] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
disk_size_gb: Optional[int] = None,
encryption_settings_collection: Optional["EncryptionSettingsCollection"] = None,
**kwargs
):
super(SnapshotUpdate, self).__init__(**kwargs)
self.tags = tags
self.sku = sku
self.os_type = os_type
self.disk_size_gb = disk_size_gb
self.encryption_settings_collection = encryption_settings_collection
class SourceVault(msrest.serialization.Model):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(SourceVault, self).__init__(**kwargs)
self.id = id
class SshConfiguration(msrest.serialization.Model):
_attribute_map = {
'public_keys': {'key': 'publicKeys', 'type': '[SshPublicKey]'},
}
def __init__(
self,
*,
public_keys: Optional[List["SshPublicKey"]] = None,
**kwargs
):
super(SshConfiguration, self).__init__(**kwargs)
self.public_keys = public_keys
class SshPublicKey(msrest.serialization.Model):
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'key_data': {'key': 'keyData', 'type': 'str'},
}
def __init__(
self,
*,
path: Optional[str] = None,
key_data: Optional[str] = None,
**kwargs
):
super(SshPublicKey, self).__init__(**kwargs)
self.path = path
self.key_data = key_data
class StorageProfile(msrest.serialization.Model):
_attribute_map = {
'image_reference': {'key': 'imageReference', 'type': 'ImageReference'},
'os_disk': {'key': 'osDisk', 'type': 'OSDisk'},
'data_disks': {'key': 'dataDisks', 'type': '[DataDisk]'},
}
def __init__(
self,
*,
image_reference: Optional["ImageReference"] = None,
os_disk: Optional["OSDisk"] = None,
data_disks: Optional[List["DataDisk"]] = None,
**kwargs
):
super(StorageProfile, self).__init__(**kwargs)
self.image_reference = image_reference
self.os_disk = os_disk
self.data_disks = data_disks
class SubResourceReadOnly(msrest.serialization.Model):
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SubResourceReadOnly, self).__init__(**kwargs)
self.id = None
class TargetRegion(msrest.serialization.Model):
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'regional_replica_count': {'key': 'regionalReplicaCount', 'type': 'int'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
regional_replica_count: Optional[int] = None,
storage_account_type: Optional[Union[str, "StorageAccountType"]] = None,
**kwargs
):
super(TargetRegion, self).__init__(**kwargs)
self.name = name
self.regional_replica_count = regional_replica_count
self.storage_account_type = storage_account_type
class TerminateNotificationProfile(msrest.serialization.Model):
_attribute_map = {
'not_before_timeout': {'key': 'notBeforeTimeout', 'type': 'str'},
'enable': {'key': 'enable', 'type': 'bool'},
}
def __init__(
self,
*,
not_before_timeout: Optional[str] = None,
enable: Optional[bool] = None,
**kwargs
):
super(TerminateNotificationProfile, self).__init__(**kwargs)
self.not_before_timeout = not_before_timeout
self.enable = enable
class ThrottledRequestsInput(LogAnalyticsInputBase):
_validation = {
'blob_container_sas_uri': {'required': True},
'from_time': {'required': True},
'to_time': {'required': True},
}
_attribute_map = {
'blob_container_sas_uri': {'key': 'blobContainerSasUri', 'type': 'str'},
'from_time': {'key': 'fromTime', 'type': 'iso-8601'},
'to_time': {'key': 'toTime', 'type': 'iso-8601'},
'group_by_throttle_policy': {'key': 'groupByThrottlePolicy', 'type': 'bool'},
'group_by_operation_name': {'key': 'groupByOperationName', 'type': 'bool'},
'group_by_resource_name': {'key': 'groupByResourceName', 'type': 'bool'},
}
def __init__(
self,
*,
blob_container_sas_uri: str,
from_time: datetime.datetime,
to_time: datetime.datetime,
group_by_throttle_policy: Optional[bool] = None,
group_by_operation_name: Optional[bool] = None,
group_by_resource_name: Optional[bool] = None,
**kwargs
):
super(ThrottledRequestsInput, self).__init__(blob_container_sas_uri=blob_container_sas_uri, from_time=from_time, to_time=to_time, group_by_throttle_policy=group_by_throttle_policy, group_by_operation_name=group_by_operation_name, group_by_resource_name=group_by_resource_name, **kwargs)
class UpgradeOperationHistoricalStatusInfo(msrest.serialization.Model):
_validation = {
'properties': {'readonly': True},
'type': {'readonly': True},
'location': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': 'UpgradeOperationHistoricalStatusInfoProperties'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UpgradeOperationHistoricalStatusInfo, self).__init__(**kwargs)
self.properties = None
self.type = None
self.location = None
class UpgradeOperationHistoricalStatusInfoProperties(msrest.serialization.Model):
_validation = {
'running_status': {'readonly': True},
'progress': {'readonly': True},
'error': {'readonly': True},
'started_by': {'readonly': True},
'target_image_reference': {'readonly': True},
'rollback_info': {'readonly': True},
}
_attribute_map = {
'running_status': {'key': 'runningStatus', 'type': 'UpgradeOperationHistoryStatus'},
'progress': {'key': 'progress', 'type': 'RollingUpgradeProgressInfo'},
'error': {'key': 'error', 'type': 'ApiError'},
'started_by': {'key': 'startedBy', 'type': 'str'},
'target_image_reference': {'key': 'targetImageReference', 'type': 'ImageReference'},
'rollback_info': {'key': 'rollbackInfo', 'type': 'RollbackStatusInfo'},
}
def __init__(
self,
**kwargs
):
super(UpgradeOperationHistoricalStatusInfoProperties, self).__init__(**kwargs)
self.running_status = None
self.progress = None
self.error = None
self.started_by = None
self.target_image_reference = None
self.rollback_info = None
class UpgradeOperationHistoryStatus(msrest.serialization.Model):
_validation = {
'code': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(UpgradeOperationHistoryStatus, self).__init__(**kwargs)
self.code = None
self.start_time = None
self.end_time = None
class UpgradePolicy(msrest.serialization.Model):
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'rolling_upgrade_policy': {'key': 'rollingUpgradePolicy', 'type': 'RollingUpgradePolicy'},
'automatic_os_upgrade_policy': {'key': 'automaticOSUpgradePolicy', 'type': 'AutomaticOSUpgradePolicy'},
}
def __init__(
self,
*,
mode: Optional[Union[str, "UpgradeMode"]] = None,
rolling_upgrade_policy: Optional["RollingUpgradePolicy"] = None,
automatic_os_upgrade_policy: Optional["AutomaticOSUpgradePolicy"] = None,
**kwargs
):
super(UpgradePolicy, self).__init__(**kwargs)
self.mode = mode
self.rolling_upgrade_policy = rolling_upgrade_policy
self.automatic_os_upgrade_policy = automatic_os_upgrade_policy
class Usage(msrest.serialization.Model):
_validation = {
'unit': {'required': True, 'constant': True},
'current_value': {'required': True},
'limit': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'long'},
'name': {'key': 'name', 'type': 'UsageName'},
}
unit = "Count"
def __init__(
self,
*,
current_value: int,
limit: int,
name: "UsageName",
**kwargs
):
super(Usage, self).__init__(**kwargs)
self.current_value = current_value
self.limit = limit
self.name = name
class UsageName(msrest.serialization.Model):
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[str] = None,
localized_value: Optional[str] = None,
**kwargs
):
super(UsageName, self).__init__(**kwargs)
self.value = value
self.localized_value = localized_value
class UserArtifactManage(msrest.serialization.Model):
_validation = {
'install': {'required': True},
'remove': {'required': True},
}
_attribute_map = {
'install': {'key': 'install', 'type': 'str'},
'remove': {'key': 'remove', 'type': 'str'},
'update': {'key': 'update', 'type': 'str'},
}
def __init__(
self,
*,
install: str,
remove: str,
update: Optional[str] = None,
**kwargs
):
super(UserArtifactManage, self).__init__(**kwargs)
self.install = install
self.remove = remove
self.update = update
class UserArtifactSource(msrest.serialization.Model):
_validation = {
'media_link': {'required': True},
}
_attribute_map = {
'media_link': {'key': 'mediaLink', 'type': 'str'},
'default_configuration_link': {'key': 'defaultConfigurationLink', 'type': 'str'},
}
def __init__(
self,
*,
media_link: str,
default_configuration_link: Optional[str] = None,
**kwargs
):
super(UserArtifactSource, self).__init__(**kwargs)
self.media_link = media_link
self.default_configuration_link = default_configuration_link
class UserAssignedIdentitiesValue(msrest.serialization.Model):
_validation = {
'principal_id': {'readonly': True},
'client_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UserAssignedIdentitiesValue, self).__init__(**kwargs)
self.principal_id = None
self.client_id = None
class VaultCertificate(msrest.serialization.Model):
_attribute_map = {
'certificate_url': {'key': 'certificateUrl', 'type': 'str'},
'certificate_store': {'key': 'certificateStore', 'type': 'str'},
}
def __init__(
self,
*,
certificate_url: Optional[str] = None,
certificate_store: Optional[str] = None,
**kwargs
):
super(VaultCertificate, self).__init__(**kwargs)
self.certificate_url = certificate_url
self.certificate_store = certificate_store
class VaultSecretGroup(msrest.serialization.Model):
_attribute_map = {
'source_vault': {'key': 'sourceVault', 'type': 'SubResource'},
'vault_certificates': {'key': 'vaultCertificates', 'type': '[VaultCertificate]'},
}
def __init__(
self,
*,
source_vault: Optional["SubResource"] = None,
vault_certificates: Optional[List["VaultCertificate"]] = None,
**kwargs
):
super(VaultSecretGroup, self).__init__(**kwargs)
self.source_vault = source_vault
self.vault_certificates = vault_certificates
class VirtualHardDisk(msrest.serialization.Model):
_attribute_map = {
'uri': {'key': 'uri', 'type': 'str'},
}
def __init__(
self,
*,
uri: Optional[str] = None,
**kwargs
):
super(VirtualHardDisk, self).__init__(**kwargs)
self.uri = uri
class VirtualMachine(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'resources': {'readonly': True},
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
'vm_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'plan': {'key': 'plan', 'type': 'Plan'},
'resources': {'key': 'resources', 'type': '[VirtualMachineExtension]'},
'identity': {'key': 'identity', 'type': 'VirtualMachineIdentity'},
'zones': {'key': 'zones', 'type': '[str]'},
'hardware_profile': {'key': 'properties.hardwareProfile', 'type': 'HardwareProfile'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'StorageProfile'},
'additional_capabilities': {'key': 'properties.additionalCapabilities', 'type': 'AdditionalCapabilities'},
'os_profile': {'key': 'properties.osProfile', 'type': 'OSProfile'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'NetworkProfile'},
'diagnostics_profile': {'key': 'properties.diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'availability_set': {'key': 'properties.availabilitySet', 'type': 'SubResource'},
'virtual_machine_scale_set': {'key': 'properties.virtualMachineScaleSet', 'type': 'SubResource'},
'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'SubResource'},
'priority': {'key': 'properties.priority', 'type': 'str'},
'eviction_policy': {'key': 'properties.evictionPolicy', 'type': 'str'},
'billing_profile': {'key': 'properties.billingProfile', 'type': 'BillingProfile'},
'host': {'key': 'properties.host', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineInstanceView'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'vm_id': {'key': 'properties.vmId', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
plan: Optional["Plan"] = None,
identity: Optional["VirtualMachineIdentity"] = None,
zones: Optional[List[str]] = None,
hardware_profile: Optional["HardwareProfile"] = None,
storage_profile: Optional["StorageProfile"] = None,
additional_capabilities: Optional["AdditionalCapabilities"] = None,
os_profile: Optional["OSProfile"] = None,
network_profile: Optional["NetworkProfile"] = None,
diagnostics_profile: Optional["DiagnosticsProfile"] = None,
availability_set: Optional["SubResource"] = None,
virtual_machine_scale_set: Optional["SubResource"] = None,
proximity_placement_group: Optional["SubResource"] = None,
priority: Optional[Union[str, "VirtualMachinePriorityTypes"]] = None,
eviction_policy: Optional[Union[str, "VirtualMachineEvictionPolicyTypes"]] = None,
billing_profile: Optional["BillingProfile"] = None,
host: Optional["SubResource"] = None,
license_type: Optional[str] = None,
**kwargs
):
super(VirtualMachine, self).__init__(location=location, tags=tags, **kwargs)
self.plan = plan
self.resources = None
self.identity = identity
self.zones = zones
self.hardware_profile = hardware_profile
self.storage_profile = storage_profile
self.additional_capabilities = additional_capabilities
self.os_profile = os_profile
self.network_profile = network_profile
self.diagnostics_profile = diagnostics_profile
self.availability_set = availability_set
self.virtual_machine_scale_set = virtual_machine_scale_set
self.proximity_placement_group = proximity_placement_group
self.priority = priority
self.eviction_policy = eviction_policy
self.billing_profile = billing_profile
self.host = host
self.provisioning_state = None
self.instance_view = None
self.license_type = license_type
self.vm_id = None
class VirtualMachineAgentInstanceView(msrest.serialization.Model):
_attribute_map = {
'vm_agent_version': {'key': 'vmAgentVersion', 'type': 'str'},
'extension_handlers': {'key': 'extensionHandlers', 'type': '[VirtualMachineExtensionHandlerInstanceView]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
vm_agent_version: Optional[str] = None,
extension_handlers: Optional[List["VirtualMachineExtensionHandlerInstanceView"]] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
super(VirtualMachineAgentInstanceView, self).__init__(**kwargs)
self.vm_agent_version = vm_agent_version
self.extension_handlers = extension_handlers
self.statuses = statuses
class VirtualMachineCaptureParameters(msrest.serialization.Model):
_validation = {
'vhd_prefix': {'required': True},
'destination_container_name': {'required': True},
'overwrite_vhds': {'required': True},
}
_attribute_map = {
'vhd_prefix': {'key': 'vhdPrefix', 'type': 'str'},
'destination_container_name': {'key': 'destinationContainerName', 'type': 'str'},
'overwrite_vhds': {'key': 'overwriteVhds', 'type': 'bool'},
}
def __init__(
self,
*,
vhd_prefix: str,
destination_container_name: str,
overwrite_vhds: bool,
**kwargs
):
super(VirtualMachineCaptureParameters, self).__init__(**kwargs)
self.vhd_prefix = vhd_prefix
self.destination_container_name = destination_container_name
self.overwrite_vhds = overwrite_vhds
class VirtualMachineCaptureResult(SubResource):
_validation = {
'schema': {'readonly': True},
'content_version': {'readonly': True},
'parameters': {'readonly': True},
'resources': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'schema': {'key': '$schema', 'type': 'str'},
'content_version': {'key': 'contentVersion', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'object'},
'resources': {'key': 'resources', 'type': '[object]'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(VirtualMachineCaptureResult, self).__init__(id=id, **kwargs)
self.schema = None
self.content_version = None
self.parameters = None
self.resources = None
class VirtualMachineExtension(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
'publisher': {'key': 'properties.publisher', 'type': 'str'},
'type_properties_type': {'key': 'properties.type', 'type': 'str'},
'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'},
'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': 'object'},
'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineExtensionInstanceView'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
force_update_tag: Optional[str] = None,
publisher: Optional[str] = None,
type_properties_type: Optional[str] = None,
type_handler_version: Optional[str] = None,
auto_upgrade_minor_version: Optional[bool] = None,
settings: Optional[object] = None,
protected_settings: Optional[object] = None,
instance_view: Optional["VirtualMachineExtensionInstanceView"] = None,
**kwargs
):
super(VirtualMachineExtension, self).__init__(location=location, tags=tags, **kwargs)
self.force_update_tag = force_update_tag
self.publisher = publisher
self.type_properties_type = type_properties_type
self.type_handler_version = type_handler_version
self.auto_upgrade_minor_version = auto_upgrade_minor_version
self.settings = settings
self.protected_settings = protected_settings
self.provisioning_state = None
self.instance_view = instance_view
class VirtualMachineExtensionHandlerInstanceView(msrest.serialization.Model):
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'type_handler_version': {'key': 'typeHandlerVersion', 'type': 'str'},
'status': {'key': 'status', 'type': 'InstanceViewStatus'},
}
def __init__(
self,
*,
type: Optional[str] = None,
type_handler_version: Optional[str] = None,
status: Optional["InstanceViewStatus"] = None,
**kwargs
):
super(VirtualMachineExtensionHandlerInstanceView, self).__init__(**kwargs)
self.type = type
self.type_handler_version = type_handler_version
self.status = status
class VirtualMachineExtensionImage(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'operating_system': {'key': 'properties.operatingSystem', 'type': 'str'},
'compute_role': {'key': 'properties.computeRole', 'type': 'str'},
'handler_schema': {'key': 'properties.handlerSchema', 'type': 'str'},
'vm_scale_set_enabled': {'key': 'properties.vmScaleSetEnabled', 'type': 'bool'},
'supports_multiple_extensions': {'key': 'properties.supportsMultipleExtensions', 'type': 'bool'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
operating_system: Optional[str] = None,
compute_role: Optional[str] = None,
handler_schema: Optional[str] = None,
vm_scale_set_enabled: Optional[bool] = None,
supports_multiple_extensions: Optional[bool] = None,
**kwargs
):
super(VirtualMachineExtensionImage, self).__init__(location=location, tags=tags, **kwargs)
self.operating_system = operating_system
self.compute_role = compute_role
self.handler_schema = handler_schema
self.vm_scale_set_enabled = vm_scale_set_enabled
self.supports_multiple_extensions = supports_multiple_extensions
class VirtualMachineExtensionInstanceView(msrest.serialization.Model):
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'type_handler_version': {'key': 'typeHandlerVersion', 'type': 'str'},
'substatuses': {'key': 'substatuses', 'type': '[InstanceViewStatus]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
name: Optional[str] = None,
type: Optional[str] = None,
type_handler_version: Optional[str] = None,
substatuses: Optional[List["InstanceViewStatus"]] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
super(VirtualMachineExtensionInstanceView, self).__init__(**kwargs)
self.name = name
self.type = type
self.type_handler_version = type_handler_version
self.substatuses = substatuses
self.statuses = statuses
class VirtualMachineExtensionsListResult(msrest.serialization.Model):
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineExtension]'},
}
def __init__(
self,
*,
value: Optional[List["VirtualMachineExtension"]] = None,
**kwargs
):
super(VirtualMachineExtensionsListResult, self).__init__(**kwargs)
self.value = value
class VirtualMachineExtensionUpdate(UpdateResource):
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
'publisher': {'key': 'properties.publisher', 'type': 'str'},
'type': {'key': 'properties.type', 'type': 'str'},
'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'},
'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': 'object'},
'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
force_update_tag: Optional[str] = None,
publisher: Optional[str] = None,
type: Optional[str] = None,
type_handler_version: Optional[str] = None,
auto_upgrade_minor_version: Optional[bool] = None,
settings: Optional[object] = None,
protected_settings: Optional[object] = None,
**kwargs
):
super(VirtualMachineExtensionUpdate, self).__init__(tags=tags, **kwargs)
self.force_update_tag = force_update_tag
self.publisher = publisher
self.type = type
self.type_handler_version = type_handler_version
self.auto_upgrade_minor_version = auto_upgrade_minor_version
self.settings = settings
self.protected_settings = protected_settings
class VirtualMachineHealthStatus(msrest.serialization.Model):
_validation = {
'status': {'readonly': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'InstanceViewStatus'},
}
def __init__(
self,
**kwargs
):
super(VirtualMachineHealthStatus, self).__init__(**kwargs)
self.status = None
class VirtualMachineIdentity(msrest.serialization.Model):
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedIdentitiesValue}'},
}
def __init__(
self,
*,
type: Optional[Union[str, "ResourceIdentityType"]] = None,
user_assigned_identities: Optional[Dict[str, "UserAssignedIdentitiesValue"]] = None,
**kwargs
):
super(VirtualMachineIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
self.user_assigned_identities = user_assigned_identities
class VirtualMachineImageResource(SubResource):
_validation = {
'name': {'required': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
name: str,
location: str,
id: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(VirtualMachineImageResource, self).__init__(id=id, **kwargs)
self.name = name
self.location = location
self.tags = tags
class VirtualMachineImage(VirtualMachineImageResource):
_validation = {
'name': {'required': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'plan': {'key': 'properties.plan', 'type': 'PurchasePlan'},
'os_disk_image': {'key': 'properties.osDiskImage', 'type': 'OSDiskImage'},
'data_disk_images': {'key': 'properties.dataDiskImages', 'type': '[DataDiskImage]'},
'automatic_os_upgrade_properties': {'key': 'properties.automaticOSUpgradeProperties', 'type': 'AutomaticOSUpgradeProperties'},
'hyper_v_generation': {'key': 'properties.hyperVGeneration', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
location: str,
id: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
plan: Optional["PurchasePlan"] = None,
os_disk_image: Optional["OSDiskImage"] = None,
data_disk_images: Optional[List["DataDiskImage"]] = None,
automatic_os_upgrade_properties: Optional["AutomaticOSUpgradeProperties"] = None,
hyper_v_generation: Optional[Union[str, "HyperVGenerationTypes"]] = None,
**kwargs
):
super(VirtualMachineImage, self).__init__(id=id, name=name, location=location, tags=tags, **kwargs)
self.plan = plan
self.os_disk_image = os_disk_image
self.data_disk_images = data_disk_images
self.automatic_os_upgrade_properties = automatic_os_upgrade_properties
self.hyper_v_generation = hyper_v_generation
class VirtualMachineInstanceView(msrest.serialization.Model):
_attribute_map = {
'platform_update_domain': {'key': 'platformUpdateDomain', 'type': 'int'},
'platform_fault_domain': {'key': 'platformFaultDomain', 'type': 'int'},
'computer_name': {'key': 'computerName', 'type': 'str'},
'os_name': {'key': 'osName', 'type': 'str'},
'os_version': {'key': 'osVersion', 'type': 'str'},
'hyper_v_generation': {'key': 'hyperVGeneration', 'type': 'str'},
'rdp_thumb_print': {'key': 'rdpThumbPrint', 'type': 'str'},
'vm_agent': {'key': 'vmAgent', 'type': 'VirtualMachineAgentInstanceView'},
'maintenance_redeploy_status': {'key': 'maintenanceRedeployStatus', 'type': 'MaintenanceRedeployStatus'},
'disks': {'key': 'disks', 'type': '[DiskInstanceView]'},
'extensions': {'key': 'extensions', 'type': '[VirtualMachineExtensionInstanceView]'},
'boot_diagnostics': {'key': 'bootDiagnostics', 'type': 'BootDiagnosticsInstanceView'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
platform_update_domain: Optional[int] = None,
platform_fault_domain: Optional[int] = None,
computer_name: Optional[str] = None,
os_name: Optional[str] = None,
os_version: Optional[str] = None,
hyper_v_generation: Optional[Union[str, "HyperVGenerationType"]] = None,
rdp_thumb_print: Optional[str] = None,
vm_agent: Optional["VirtualMachineAgentInstanceView"] = None,
maintenance_redeploy_status: Optional["MaintenanceRedeployStatus"] = None,
disks: Optional[List["DiskInstanceView"]] = None,
extensions: Optional[List["VirtualMachineExtensionInstanceView"]] = None,
boot_diagnostics: Optional["BootDiagnosticsInstanceView"] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
super(VirtualMachineInstanceView, self).__init__(**kwargs)
self.platform_update_domain = platform_update_domain
self.platform_fault_domain = platform_fault_domain
self.computer_name = computer_name
self.os_name = os_name
self.os_version = os_version
self.hyper_v_generation = hyper_v_generation
self.rdp_thumb_print = rdp_thumb_print
self.vm_agent = vm_agent
self.maintenance_redeploy_status = maintenance_redeploy_status
self.disks = disks
self.extensions = extensions
self.boot_diagnostics = boot_diagnostics
self.statuses = statuses
class VirtualMachineListResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachine]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachine"],
next_link: Optional[str] = None,
**kwargs
):
super(VirtualMachineListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineReimageParameters(msrest.serialization.Model):
_attribute_map = {
'temp_disk': {'key': 'tempDisk', 'type': 'bool'},
}
def __init__(
self,
*,
temp_disk: Optional[bool] = None,
**kwargs
):
super(VirtualMachineReimageParameters, self).__init__(**kwargs)
self.temp_disk = temp_disk
class VirtualMachineScaleSet(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'unique_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'plan': {'key': 'plan', 'type': 'Plan'},
'identity': {'key': 'identity', 'type': 'VirtualMachineScaleSetIdentity'},
'zones': {'key': 'zones', 'type': '[str]'},
'upgrade_policy': {'key': 'properties.upgradePolicy', 'type': 'UpgradePolicy'},
'automatic_repairs_policy': {'key': 'properties.automaticRepairsPolicy', 'type': 'AutomaticRepairsPolicy'},
'virtual_machine_profile': {'key': 'properties.virtualMachineProfile', 'type': 'VirtualMachineScaleSetVMProfile'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'overprovision': {'key': 'properties.overprovision', 'type': 'bool'},
'do_not_run_extensions_on_overprovisioned_v_ms': {'key': 'properties.doNotRunExtensionsOnOverprovisionedVMs', 'type': 'bool'},
'unique_id': {'key': 'properties.uniqueId', 'type': 'str'},
'single_placement_group': {'key': 'properties.singlePlacementGroup', 'type': 'bool'},
'zone_balance': {'key': 'properties.zoneBalance', 'type': 'bool'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'SubResource'},
'additional_capabilities': {'key': 'properties.additionalCapabilities', 'type': 'AdditionalCapabilities'},
'scale_in_policy': {'key': 'properties.scaleInPolicy', 'type': 'ScaleInPolicy'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
plan: Optional["Plan"] = None,
identity: Optional["VirtualMachineScaleSetIdentity"] = None,
zones: Optional[List[str]] = None,
upgrade_policy: Optional["UpgradePolicy"] = None,
automatic_repairs_policy: Optional["AutomaticRepairsPolicy"] = None,
virtual_machine_profile: Optional["VirtualMachineScaleSetVMProfile"] = None,
overprovision: Optional[bool] = None,
do_not_run_extensions_on_overprovisioned_v_ms: Optional[bool] = None,
single_placement_group: Optional[bool] = None,
zone_balance: Optional[bool] = None,
platform_fault_domain_count: Optional[int] = None,
proximity_placement_group: Optional["SubResource"] = None,
additional_capabilities: Optional["AdditionalCapabilities"] = None,
scale_in_policy: Optional["ScaleInPolicy"] = None,
**kwargs
):
super(VirtualMachineScaleSet, self).__init__(location=location, tags=tags, **kwargs)
self.sku = sku
self.plan = plan
self.identity = identity
self.zones = zones
self.upgrade_policy = upgrade_policy
self.automatic_repairs_policy = automatic_repairs_policy
self.virtual_machine_profile = virtual_machine_profile
self.provisioning_state = None
self.overprovision = overprovision
self.do_not_run_extensions_on_overprovisioned_v_ms = do_not_run_extensions_on_overprovisioned_v_ms
self.unique_id = None
self.single_placement_group = single_placement_group
self.zone_balance = zone_balance
self.platform_fault_domain_count = platform_fault_domain_count
self.proximity_placement_group = proximity_placement_group
self.additional_capabilities = additional_capabilities
self.scale_in_policy = scale_in_policy
class VirtualMachineScaleSetDataDisk(msrest.serialization.Model):
_validation = {
'lun': {'required': True},
'create_option': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'lun': {'key': 'lun', 'type': 'int'},
'caching': {'key': 'caching', 'type': 'str'},
'write_accelerator_enabled': {'key': 'writeAcceleratorEnabled', 'type': 'bool'},
'create_option': {'key': 'createOption', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'managed_disk': {'key': 'managedDisk', 'type': 'VirtualMachineScaleSetManagedDiskParameters'},
}
def __init__(
self,
*,
lun: int,
create_option: Union[str, "DiskCreateOptionTypes"],
name: Optional[str] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
write_accelerator_enabled: Optional[bool] = None,
disk_size_gb: Optional[int] = None,
managed_disk: Optional["VirtualMachineScaleSetManagedDiskParameters"] = None,
**kwargs
):
super(VirtualMachineScaleSetDataDisk, self).__init__(**kwargs)
self.name = name
self.lun = lun
self.caching = caching
self.write_accelerator_enabled = write_accelerator_enabled
self.create_option = create_option
self.disk_size_gb = disk_size_gb
self.managed_disk = managed_disk
class VirtualMachineScaleSetExtension(SubResourceReadOnly):
_validation = {
'id': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
'publisher': {'key': 'properties.publisher', 'type': 'str'},
'type': {'key': 'properties.type', 'type': 'str'},
'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'},
'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': 'object'},
'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'provision_after_extensions': {'key': 'properties.provisionAfterExtensions', 'type': '[str]'},
}
def __init__(
self,
*,
name: Optional[str] = None,
force_update_tag: Optional[str] = None,
publisher: Optional[str] = None,
type: Optional[str] = None,
type_handler_version: Optional[str] = None,
auto_upgrade_minor_version: Optional[bool] = None,
settings: Optional[object] = None,
protected_settings: Optional[object] = None,
provision_after_extensions: Optional[List[str]] = None,
**kwargs
):
super(VirtualMachineScaleSetExtension, self).__init__(**kwargs)
self.name = name
self.force_update_tag = force_update_tag
self.publisher = publisher
self.type = type
self.type_handler_version = type_handler_version
self.auto_upgrade_minor_version = auto_upgrade_minor_version
self.settings = settings
self.protected_settings = protected_settings
self.provisioning_state = None
self.provision_after_extensions = provision_after_extensions
class VirtualMachineScaleSetExtensionListResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineScaleSetExtension]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachineScaleSetExtension"],
next_link: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetExtensionListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineScaleSetExtensionProfile(msrest.serialization.Model):
_attribute_map = {
'extensions': {'key': 'extensions', 'type': '[VirtualMachineScaleSetExtension]'},
}
def __init__(
self,
*,
extensions: Optional[List["VirtualMachineScaleSetExtension"]] = None,
**kwargs
):
super(VirtualMachineScaleSetExtensionProfile, self).__init__(**kwargs)
self.extensions = extensions
class VirtualMachineScaleSetIdentity(msrest.serialization.Model):
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue}'},
}
def __init__(
self,
*,
type: Optional[Union[str, "ResourceIdentityType"]] = None,
user_assigned_identities: Optional[Dict[str, "VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue"]] = None,
**kwargs
):
super(VirtualMachineScaleSetIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
self.user_assigned_identities = user_assigned_identities
class VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue(msrest.serialization.Model):
_validation = {
'principal_id': {'readonly': True},
'client_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue, self).__init__(**kwargs)
self.principal_id = None
self.client_id = None
class VirtualMachineScaleSetInstanceView(msrest.serialization.Model):
_validation = {
'virtual_machine': {'readonly': True},
'extensions': {'readonly': True},
}
_attribute_map = {
'virtual_machine': {'key': 'virtualMachine', 'type': 'VirtualMachineScaleSetInstanceViewStatusesSummary'},
'extensions': {'key': 'extensions', 'type': '[VirtualMachineScaleSetVMExtensionsSummary]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
super(VirtualMachineScaleSetInstanceView, self).__init__(**kwargs)
self.virtual_machine = None
self.extensions = None
self.statuses = statuses
class VirtualMachineScaleSetInstanceViewStatusesSummary(msrest.serialization.Model):
_validation = {
'statuses_summary': {'readonly': True},
}
_attribute_map = {
'statuses_summary': {'key': 'statusesSummary', 'type': '[VirtualMachineStatusCodeCount]'},
}
def __init__(
self,
**kwargs
):
super(VirtualMachineScaleSetInstanceViewStatusesSummary, self).__init__(**kwargs)
self.statuses_summary = None
class VirtualMachineScaleSetIPConfiguration(SubResource):
_validation = {
'name': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'ApiEntityReference'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'public_ip_address_configuration': {'key': 'properties.publicIPAddressConfiguration', 'type': 'VirtualMachineScaleSetPublicIPAddressConfiguration'},
'private_ip_address_version': {'key': 'properties.privateIPAddressVersion', 'type': 'str'},
'application_gateway_backend_address_pools': {'key': 'properties.applicationGatewayBackendAddressPools', 'type': '[SubResource]'},
'application_security_groups': {'key': 'properties.applicationSecurityGroups', 'type': '[SubResource]'},
'load_balancer_backend_address_pools': {'key': 'properties.loadBalancerBackendAddressPools', 'type': '[SubResource]'},
'load_balancer_inbound_nat_pools': {'key': 'properties.loadBalancerInboundNatPools', 'type': '[SubResource]'},
}
def __init__(
self,
*,
name: str,
id: Optional[str] = None,
subnet: Optional["ApiEntityReference"] = None,
primary: Optional[bool] = None,
public_ip_address_configuration: Optional["VirtualMachineScaleSetPublicIPAddressConfiguration"] = None,
private_ip_address_version: Optional[Union[str, "IPVersion"]] = None,
application_gateway_backend_address_pools: Optional[List["SubResource"]] = None,
application_security_groups: Optional[List["SubResource"]] = None,
load_balancer_backend_address_pools: Optional[List["SubResource"]] = None,
load_balancer_inbound_nat_pools: Optional[List["SubResource"]] = None,
**kwargs
):
super(VirtualMachineScaleSetIPConfiguration, self).__init__(id=id, **kwargs)
self.name = name
self.subnet = subnet
self.primary = primary
self.public_ip_address_configuration = public_ip_address_configuration
self.private_ip_address_version = private_ip_address_version
self.application_gateway_backend_address_pools = application_gateway_backend_address_pools
self.application_security_groups = application_security_groups
self.load_balancer_backend_address_pools = load_balancer_backend_address_pools
self.load_balancer_inbound_nat_pools = load_balancer_inbound_nat_pools
class VirtualMachineScaleSetIpTag(msrest.serialization.Model):
_attribute_map = {
'ip_tag_type': {'key': 'ipTagType', 'type': 'str'},
'tag': {'key': 'tag', 'type': 'str'},
}
def __init__(
self,
*,
ip_tag_type: Optional[str] = None,
tag: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetIpTag, self).__init__(**kwargs)
self.ip_tag_type = ip_tag_type
self.tag = tag
class VirtualMachineScaleSetListOSUpgradeHistory(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[UpgradeOperationHistoricalStatusInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["UpgradeOperationHistoricalStatusInfo"],
next_link: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetListOSUpgradeHistory, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineScaleSetListResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineScaleSet]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachineScaleSet"],
next_link: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineScaleSetListSkusResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineScaleSetSku]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachineScaleSetSku"],
next_link: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetListSkusResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineScaleSetListWithLinkResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineScaleSet]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachineScaleSet"],
next_link: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetListWithLinkResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineScaleSetManagedDiskParameters(msrest.serialization.Model):
_attribute_map = {
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
}
def __init__(
self,
*,
storage_account_type: Optional[Union[str, "StorageAccountTypes"]] = None,
**kwargs
):
super(VirtualMachineScaleSetManagedDiskParameters, self).__init__(**kwargs)
self.storage_account_type = storage_account_type
class VirtualMachineScaleSetNetworkConfiguration(SubResource):
_validation = {
'name': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'enable_accelerated_networking': {'key': 'properties.enableAcceleratedNetworking', 'type': 'bool'},
'network_security_group': {'key': 'properties.networkSecurityGroup', 'type': 'SubResource'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'VirtualMachineScaleSetNetworkConfigurationDnsSettings'},
'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[VirtualMachineScaleSetIPConfiguration]'},
'enable_ip_forwarding': {'key': 'properties.enableIPForwarding', 'type': 'bool'},
}
def __init__(
self,
*,
name: str,
id: Optional[str] = None,
primary: Optional[bool] = None,
enable_accelerated_networking: Optional[bool] = None,
network_security_group: Optional["SubResource"] = None,
dns_settings: Optional["VirtualMachineScaleSetNetworkConfigurationDnsSettings"] = None,
ip_configurations: Optional[List["VirtualMachineScaleSetIPConfiguration"]] = None,
enable_ip_forwarding: Optional[bool] = None,
**kwargs
):
super(VirtualMachineScaleSetNetworkConfiguration, self).__init__(id=id, **kwargs)
self.name = name
self.primary = primary
self.enable_accelerated_networking = enable_accelerated_networking
self.network_security_group = network_security_group
self.dns_settings = dns_settings
self.ip_configurations = ip_configurations
self.enable_ip_forwarding = enable_ip_forwarding
class VirtualMachineScaleSetNetworkConfigurationDnsSettings(msrest.serialization.Model):
_attribute_map = {
'dns_servers': {'key': 'dnsServers', 'type': '[str]'},
}
def __init__(
self,
*,
dns_servers: Optional[List[str]] = None,
**kwargs
):
super(VirtualMachineScaleSetNetworkConfigurationDnsSettings, self).__init__(**kwargs)
self.dns_servers = dns_servers
class VirtualMachineScaleSetNetworkProfile(msrest.serialization.Model):
_attribute_map = {
'health_probe': {'key': 'healthProbe', 'type': 'ApiEntityReference'},
'network_interface_configurations': {'key': 'networkInterfaceConfigurations', 'type': '[VirtualMachineScaleSetNetworkConfiguration]'},
}
def __init__(
self,
*,
health_probe: Optional["ApiEntityReference"] = None,
network_interface_configurations: Optional[List["VirtualMachineScaleSetNetworkConfiguration"]] = None,
**kwargs
):
super(VirtualMachineScaleSetNetworkProfile, self).__init__(**kwargs)
self.health_probe = health_probe
self.network_interface_configurations = network_interface_configurations
class VirtualMachineScaleSetOSDisk(msrest.serialization.Model):
_validation = {
'create_option': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'str'},
'write_accelerator_enabled': {'key': 'writeAcceleratorEnabled', 'type': 'bool'},
'create_option': {'key': 'createOption', 'type': 'str'},
'diff_disk_settings': {'key': 'diffDiskSettings', 'type': 'DiffDiskSettings'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'os_type': {'key': 'osType', 'type': 'str'},
'image': {'key': 'image', 'type': 'VirtualHardDisk'},
'vhd_containers': {'key': 'vhdContainers', 'type': '[str]'},
'managed_disk': {'key': 'managedDisk', 'type': 'VirtualMachineScaleSetManagedDiskParameters'},
}
def __init__(
self,
*,
create_option: Union[str, "DiskCreateOptionTypes"],
name: Optional[str] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
write_accelerator_enabled: Optional[bool] = None,
diff_disk_settings: Optional["DiffDiskSettings"] = None,
disk_size_gb: Optional[int] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
image: Optional["VirtualHardDisk"] = None,
vhd_containers: Optional[List[str]] = None,
managed_disk: Optional["VirtualMachineScaleSetManagedDiskParameters"] = None,
**kwargs
):
super(VirtualMachineScaleSetOSDisk, self).__init__(**kwargs)
self.name = name
self.caching = caching
self.write_accelerator_enabled = write_accelerator_enabled
self.create_option = create_option
self.diff_disk_settings = diff_disk_settings
self.disk_size_gb = disk_size_gb
self.os_type = os_type
self.image = image
self.vhd_containers = vhd_containers
self.managed_disk = managed_disk
class VirtualMachineScaleSetOSProfile(msrest.serialization.Model):
_attribute_map = {
'computer_name_prefix': {'key': 'computerNamePrefix', 'type': 'str'},
'admin_username': {'key': 'adminUsername', 'type': 'str'},
'admin_password': {'key': 'adminPassword', 'type': 'str'},
'custom_data': {'key': 'customData', 'type': 'str'},
'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'},
'linux_configuration': {'key': 'linuxConfiguration', 'type': 'LinuxConfiguration'},
'secrets': {'key': 'secrets', 'type': '[VaultSecretGroup]'},
}
def __init__(
self,
*,
computer_name_prefix: Optional[str] = None,
admin_username: Optional[str] = None,
admin_password: Optional[str] = None,
custom_data: Optional[str] = None,
windows_configuration: Optional["WindowsConfiguration"] = None,
linux_configuration: Optional["LinuxConfiguration"] = None,
secrets: Optional[List["VaultSecretGroup"]] = None,
**kwargs
):
super(VirtualMachineScaleSetOSProfile, self).__init__(**kwargs)
self.computer_name_prefix = computer_name_prefix
self.admin_username = admin_username
self.admin_password = admin_password
self.custom_data = custom_data
self.windows_configuration = windows_configuration
self.linux_configuration = linux_configuration
self.secrets = secrets
class VirtualMachineScaleSetPublicIPAddressConfiguration(msrest.serialization.Model):
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings'},
'ip_tags': {'key': 'properties.ipTags', 'type': '[VirtualMachineScaleSetIpTag]'},
'public_ip_prefix': {'key': 'properties.publicIPPrefix', 'type': 'SubResource'},
}
def __init__(
self,
*,
name: str,
idle_timeout_in_minutes: Optional[int] = None,
dns_settings: Optional["VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings"] = None,
ip_tags: Optional[List["VirtualMachineScaleSetIpTag"]] = None,
public_ip_prefix: Optional["SubResource"] = None,
**kwargs
):
super(VirtualMachineScaleSetPublicIPAddressConfiguration, self).__init__(**kwargs)
self.name = name
self.idle_timeout_in_minutes = idle_timeout_in_minutes
self.dns_settings = dns_settings
self.ip_tags = ip_tags
self.public_ip_prefix = public_ip_prefix
class VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings(msrest.serialization.Model):
_validation = {
'domain_name_label': {'required': True},
}
_attribute_map = {
'domain_name_label': {'key': 'domainNameLabel', 'type': 'str'},
}
def __init__(
self,
*,
domain_name_label: str,
**kwargs
):
super(VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings, self).__init__(**kwargs)
self.domain_name_label = domain_name_label
class VirtualMachineScaleSetReimageParameters(VirtualMachineReimageParameters):
_attribute_map = {
'temp_disk': {'key': 'tempDisk', 'type': 'bool'},
'instance_ids': {'key': 'instanceIds', 'type': '[str]'},
}
def __init__(
self,
*,
temp_disk: Optional[bool] = None,
instance_ids: Optional[List[str]] = None,
**kwargs
):
super(VirtualMachineScaleSetReimageParameters, self).__init__(temp_disk=temp_disk, **kwargs)
self.instance_ids = instance_ids
class VirtualMachineScaleSetSku(msrest.serialization.Model):
_validation = {
'resource_type': {'readonly': True},
'sku': {'readonly': True},
'capacity': {'readonly': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'capacity': {'key': 'capacity', 'type': 'VirtualMachineScaleSetSkuCapacity'},
}
def __init__(
self,
**kwargs
):
super(VirtualMachineScaleSetSku, self).__init__(**kwargs)
self.resource_type = None
self.sku = None
self.capacity = None
class VirtualMachineScaleSetSkuCapacity(msrest.serialization.Model):
_validation = {
'minimum': {'readonly': True},
'maximum': {'readonly': True},
'default_capacity': {'readonly': True},
'scale_type': {'readonly': True},
}
_attribute_map = {
'minimum': {'key': 'minimum', 'type': 'long'},
'maximum': {'key': 'maximum', 'type': 'long'},
'default_capacity': {'key': 'defaultCapacity', 'type': 'long'},
'scale_type': {'key': 'scaleType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualMachineScaleSetSkuCapacity, self).__init__(**kwargs)
self.minimum = None
self.maximum = None
self.default_capacity = None
self.scale_type = None
class VirtualMachineScaleSetStorageProfile(msrest.serialization.Model):
_attribute_map = {
'image_reference': {'key': 'imageReference', 'type': 'ImageReference'},
'os_disk': {'key': 'osDisk', 'type': 'VirtualMachineScaleSetOSDisk'},
'data_disks': {'key': 'dataDisks', 'type': '[VirtualMachineScaleSetDataDisk]'},
}
def __init__(
self,
*,
image_reference: Optional["ImageReference"] = None,
os_disk: Optional["VirtualMachineScaleSetOSDisk"] = None,
data_disks: Optional[List["VirtualMachineScaleSetDataDisk"]] = None,
**kwargs
):
super(VirtualMachineScaleSetStorageProfile, self).__init__(**kwargs)
self.image_reference = image_reference
self.os_disk = os_disk
self.data_disks = data_disks
class VirtualMachineScaleSetUpdate(UpdateResource):
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'plan': {'key': 'plan', 'type': 'Plan'},
'identity': {'key': 'identity', 'type': 'VirtualMachineScaleSetIdentity'},
'upgrade_policy': {'key': 'properties.upgradePolicy', 'type': 'UpgradePolicy'},
'automatic_repairs_policy': {'key': 'properties.automaticRepairsPolicy', 'type': 'AutomaticRepairsPolicy'},
'virtual_machine_profile': {'key': 'properties.virtualMachineProfile', 'type': 'VirtualMachineScaleSetUpdateVMProfile'},
'overprovision': {'key': 'properties.overprovision', 'type': 'bool'},
'do_not_run_extensions_on_overprovisioned_v_ms': {'key': 'properties.doNotRunExtensionsOnOverprovisionedVMs', 'type': 'bool'},
'single_placement_group': {'key': 'properties.singlePlacementGroup', 'type': 'bool'},
'additional_capabilities': {'key': 'properties.additionalCapabilities', 'type': 'AdditionalCapabilities'},
'scale_in_policy': {'key': 'properties.scaleInPolicy', 'type': 'ScaleInPolicy'},
'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'SubResource'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
plan: Optional["Plan"] = None,
identity: Optional["VirtualMachineScaleSetIdentity"] = None,
upgrade_policy: Optional["UpgradePolicy"] = None,
automatic_repairs_policy: Optional["AutomaticRepairsPolicy"] = None,
virtual_machine_profile: Optional["VirtualMachineScaleSetUpdateVMProfile"] = None,
overprovision: Optional[bool] = None,
do_not_run_extensions_on_overprovisioned_v_ms: Optional[bool] = None,
single_placement_group: Optional[bool] = None,
additional_capabilities: Optional["AdditionalCapabilities"] = None,
scale_in_policy: Optional["ScaleInPolicy"] = None,
proximity_placement_group: Optional["SubResource"] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdate, self).__init__(tags=tags, **kwargs)
self.sku = sku
self.plan = plan
self.identity = identity
self.upgrade_policy = upgrade_policy
self.automatic_repairs_policy = automatic_repairs_policy
self.virtual_machine_profile = virtual_machine_profile
self.overprovision = overprovision
self.do_not_run_extensions_on_overprovisioned_v_ms = do_not_run_extensions_on_overprovisioned_v_ms
self.single_placement_group = single_placement_group
self.additional_capabilities = additional_capabilities
self.scale_in_policy = scale_in_policy
self.proximity_placement_group = proximity_placement_group
class VirtualMachineScaleSetUpdateIPConfiguration(SubResource):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'ApiEntityReference'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'public_ip_address_configuration': {'key': 'properties.publicIPAddressConfiguration', 'type': 'VirtualMachineScaleSetUpdatePublicIPAddressConfiguration'},
'private_ip_address_version': {'key': 'properties.privateIPAddressVersion', 'type': 'str'},
'application_gateway_backend_address_pools': {'key': 'properties.applicationGatewayBackendAddressPools', 'type': '[SubResource]'},
'application_security_groups': {'key': 'properties.applicationSecurityGroups', 'type': '[SubResource]'},
'load_balancer_backend_address_pools': {'key': 'properties.loadBalancerBackendAddressPools', 'type': '[SubResource]'},
'load_balancer_inbound_nat_pools': {'key': 'properties.loadBalancerInboundNatPools', 'type': '[SubResource]'},
}
def __init__(
self,
*,
id: Optional[str] = None,
name: Optional[str] = None,
subnet: Optional["ApiEntityReference"] = None,
primary: Optional[bool] = None,
public_ip_address_configuration: Optional["VirtualMachineScaleSetUpdatePublicIPAddressConfiguration"] = None,
private_ip_address_version: Optional[Union[str, "IPVersion"]] = None,
application_gateway_backend_address_pools: Optional[List["SubResource"]] = None,
application_security_groups: Optional[List["SubResource"]] = None,
load_balancer_backend_address_pools: Optional[List["SubResource"]] = None,
load_balancer_inbound_nat_pools: Optional[List["SubResource"]] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdateIPConfiguration, self).__init__(id=id, **kwargs)
self.name = name
self.subnet = subnet
self.primary = primary
self.public_ip_address_configuration = public_ip_address_configuration
self.private_ip_address_version = private_ip_address_version
self.application_gateway_backend_address_pools = application_gateway_backend_address_pools
self.application_security_groups = application_security_groups
self.load_balancer_backend_address_pools = load_balancer_backend_address_pools
self.load_balancer_inbound_nat_pools = load_balancer_inbound_nat_pools
class VirtualMachineScaleSetUpdateNetworkConfiguration(SubResource):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'enable_accelerated_networking': {'key': 'properties.enableAcceleratedNetworking', 'type': 'bool'},
'network_security_group': {'key': 'properties.networkSecurityGroup', 'type': 'SubResource'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'VirtualMachineScaleSetNetworkConfigurationDnsSettings'},
'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[VirtualMachineScaleSetUpdateIPConfiguration]'},
'enable_ip_forwarding': {'key': 'properties.enableIPForwarding', 'type': 'bool'},
}
def __init__(
self,
*,
id: Optional[str] = None,
name: Optional[str] = None,
primary: Optional[bool] = None,
enable_accelerated_networking: Optional[bool] = None,
network_security_group: Optional["SubResource"] = None,
dns_settings: Optional["VirtualMachineScaleSetNetworkConfigurationDnsSettings"] = None,
ip_configurations: Optional[List["VirtualMachineScaleSetUpdateIPConfiguration"]] = None,
enable_ip_forwarding: Optional[bool] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdateNetworkConfiguration, self).__init__(id=id, **kwargs)
self.name = name
self.primary = primary
self.enable_accelerated_networking = enable_accelerated_networking
self.network_security_group = network_security_group
self.dns_settings = dns_settings
self.ip_configurations = ip_configurations
self.enable_ip_forwarding = enable_ip_forwarding
class VirtualMachineScaleSetUpdateNetworkProfile(msrest.serialization.Model):
_attribute_map = {
'health_probe': {'key': 'healthProbe', 'type': 'ApiEntityReference'},
'network_interface_configurations': {'key': 'networkInterfaceConfigurations', 'type': '[VirtualMachineScaleSetUpdateNetworkConfiguration]'},
}
def __init__(
self,
*,
health_probe: Optional["ApiEntityReference"] = None,
network_interface_configurations: Optional[List["VirtualMachineScaleSetUpdateNetworkConfiguration"]] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdateNetworkProfile, self).__init__(**kwargs)
self.health_probe = health_probe
self.network_interface_configurations = network_interface_configurations
class VirtualMachineScaleSetUpdateOSDisk(msrest.serialization.Model):
_attribute_map = {
'caching': {'key': 'caching', 'type': 'str'},
'write_accelerator_enabled': {'key': 'writeAcceleratorEnabled', 'type': 'bool'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'image': {'key': 'image', 'type': 'VirtualHardDisk'},
'vhd_containers': {'key': 'vhdContainers', 'type': '[str]'},
'managed_disk': {'key': 'managedDisk', 'type': 'VirtualMachineScaleSetManagedDiskParameters'},
}
def __init__(
self,
*,
caching: Optional[Union[str, "CachingTypes"]] = None,
write_accelerator_enabled: Optional[bool] = None,
disk_size_gb: Optional[int] = None,
image: Optional["VirtualHardDisk"] = None,
vhd_containers: Optional[List[str]] = None,
managed_disk: Optional["VirtualMachineScaleSetManagedDiskParameters"] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdateOSDisk, self).__init__(**kwargs)
self.caching = caching
self.write_accelerator_enabled = write_accelerator_enabled
self.disk_size_gb = disk_size_gb
self.image = image
self.vhd_containers = vhd_containers
self.managed_disk = managed_disk
class VirtualMachineScaleSetUpdateOSProfile(msrest.serialization.Model):
_attribute_map = {
'custom_data': {'key': 'customData', 'type': 'str'},
'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'},
'linux_configuration': {'key': 'linuxConfiguration', 'type': 'LinuxConfiguration'},
'secrets': {'key': 'secrets', 'type': '[VaultSecretGroup]'},
}
def __init__(
self,
*,
custom_data: Optional[str] = None,
windows_configuration: Optional["WindowsConfiguration"] = None,
linux_configuration: Optional["LinuxConfiguration"] = None,
secrets: Optional[List["VaultSecretGroup"]] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdateOSProfile, self).__init__(**kwargs)
self.custom_data = custom_data
self.windows_configuration = windows_configuration
self.linux_configuration = linux_configuration
self.secrets = secrets
class VirtualMachineScaleSetUpdatePublicIPAddressConfiguration(msrest.serialization.Model):
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings'},
}
def __init__(
self,
*,
name: Optional[str] = None,
idle_timeout_in_minutes: Optional[int] = None,
dns_settings: Optional["VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings"] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdatePublicIPAddressConfiguration, self).__init__(**kwargs)
self.name = name
self.idle_timeout_in_minutes = idle_timeout_in_minutes
self.dns_settings = dns_settings
class VirtualMachineScaleSetUpdateStorageProfile(msrest.serialization.Model):
_attribute_map = {
'image_reference': {'key': 'imageReference', 'type': 'ImageReference'},
'os_disk': {'key': 'osDisk', 'type': 'VirtualMachineScaleSetUpdateOSDisk'},
'data_disks': {'key': 'dataDisks', 'type': '[VirtualMachineScaleSetDataDisk]'},
}
def __init__(
self,
*,
image_reference: Optional["ImageReference"] = None,
os_disk: Optional["VirtualMachineScaleSetUpdateOSDisk"] = None,
data_disks: Optional[List["VirtualMachineScaleSetDataDisk"]] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdateStorageProfile, self).__init__(**kwargs)
self.image_reference = image_reference
self.os_disk = os_disk
self.data_disks = data_disks
class VirtualMachineScaleSetUpdateVMProfile(msrest.serialization.Model):
_attribute_map = {
'os_profile': {'key': 'osProfile', 'type': 'VirtualMachineScaleSetUpdateOSProfile'},
'storage_profile': {'key': 'storageProfile', 'type': 'VirtualMachineScaleSetUpdateStorageProfile'},
'network_profile': {'key': 'networkProfile', 'type': 'VirtualMachineScaleSetUpdateNetworkProfile'},
'diagnostics_profile': {'key': 'diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'extension_profile': {'key': 'extensionProfile', 'type': 'VirtualMachineScaleSetExtensionProfile'},
'license_type': {'key': 'licenseType', 'type': 'str'},
'billing_profile': {'key': 'billingProfile', 'type': 'BillingProfile'},
'scheduled_events_profile': {'key': 'scheduledEventsProfile', 'type': 'ScheduledEventsProfile'},
}
def __init__(
self,
*,
os_profile: Optional["VirtualMachineScaleSetUpdateOSProfile"] = None,
storage_profile: Optional["VirtualMachineScaleSetUpdateStorageProfile"] = None,
network_profile: Optional["VirtualMachineScaleSetUpdateNetworkProfile"] = None,
diagnostics_profile: Optional["DiagnosticsProfile"] = None,
extension_profile: Optional["VirtualMachineScaleSetExtensionProfile"] = None,
license_type: Optional[str] = None,
billing_profile: Optional["BillingProfile"] = None,
scheduled_events_profile: Optional["ScheduledEventsProfile"] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdateVMProfile, self).__init__(**kwargs)
self.os_profile = os_profile
self.storage_profile = storage_profile
self.network_profile = network_profile
self.diagnostics_profile = diagnostics_profile
self.extension_profile = extension_profile
self.license_type = license_type
self.billing_profile = billing_profile
self.scheduled_events_profile = scheduled_events_profile
class VirtualMachineScaleSetVM(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'instance_id': {'readonly': True},
'sku': {'readonly': True},
'resources': {'readonly': True},
'zones': {'readonly': True},
'latest_model_applied': {'readonly': True},
'vm_id': {'readonly': True},
'instance_view': {'readonly': True},
'provisioning_state': {'readonly': True},
'model_definition_applied': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'instance_id': {'key': 'instanceId', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'plan': {'key': 'plan', 'type': 'Plan'},
'resources': {'key': 'resources', 'type': '[VirtualMachineExtension]'},
'zones': {'key': 'zones', 'type': '[str]'},
'latest_model_applied': {'key': 'properties.latestModelApplied', 'type': 'bool'},
'vm_id': {'key': 'properties.vmId', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineScaleSetVMInstanceView'},
'hardware_profile': {'key': 'properties.hardwareProfile', 'type': 'HardwareProfile'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'StorageProfile'},
'additional_capabilities': {'key': 'properties.additionalCapabilities', 'type': 'AdditionalCapabilities'},
'os_profile': {'key': 'properties.osProfile', 'type': 'OSProfile'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'NetworkProfile'},
'network_profile_configuration': {'key': 'properties.networkProfileConfiguration', 'type': 'VirtualMachineScaleSetVMNetworkProfileConfiguration'},
'diagnostics_profile': {'key': 'properties.diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'availability_set': {'key': 'properties.availabilitySet', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'model_definition_applied': {'key': 'properties.modelDefinitionApplied', 'type': 'str'},
'protection_policy': {'key': 'properties.protectionPolicy', 'type': 'VirtualMachineScaleSetVMProtectionPolicy'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
plan: Optional["Plan"] = None,
hardware_profile: Optional["HardwareProfile"] = None,
storage_profile: Optional["StorageProfile"] = None,
additional_capabilities: Optional["AdditionalCapabilities"] = None,
os_profile: Optional["OSProfile"] = None,
network_profile: Optional["NetworkProfile"] = None,
network_profile_configuration: Optional["VirtualMachineScaleSetVMNetworkProfileConfiguration"] = None,
diagnostics_profile: Optional["DiagnosticsProfile"] = None,
availability_set: Optional["SubResource"] = None,
license_type: Optional[str] = None,
protection_policy: Optional["VirtualMachineScaleSetVMProtectionPolicy"] = None,
**kwargs
):
super(VirtualMachineScaleSetVM, self).__init__(location=location, tags=tags, **kwargs)
self.instance_id = None
self.sku = None
self.plan = plan
self.resources = None
self.zones = None
self.latest_model_applied = None
self.vm_id = None
self.instance_view = None
self.hardware_profile = hardware_profile
self.storage_profile = storage_profile
self.additional_capabilities = additional_capabilities
self.os_profile = os_profile
self.network_profile = network_profile
self.network_profile_configuration = network_profile_configuration
self.diagnostics_profile = diagnostics_profile
self.availability_set = availability_set
self.provisioning_state = None
self.license_type = license_type
self.model_definition_applied = None
self.protection_policy = protection_policy
class VirtualMachineScaleSetVMExtensionsSummary(msrest.serialization.Model):
_validation = {
'name': {'readonly': True},
'statuses_summary': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'statuses_summary': {'key': 'statusesSummary', 'type': '[VirtualMachineStatusCodeCount]'},
}
def __init__(
self,
**kwargs
):
super(VirtualMachineScaleSetVMExtensionsSummary, self).__init__(**kwargs)
self.name = None
self.statuses_summary = None
class VirtualMachineScaleSetVMInstanceIDs(msrest.serialization.Model):
_attribute_map = {
'instance_ids': {'key': 'instanceIds', 'type': '[str]'},
}
def __init__(
self,
*,
instance_ids: Optional[List[str]] = None,
**kwargs
):
super(VirtualMachineScaleSetVMInstanceIDs, self).__init__(**kwargs)
self.instance_ids = instance_ids
class VirtualMachineScaleSetVMInstanceRequiredIDs(msrest.serialization.Model):
_validation = {
'instance_ids': {'required': True},
}
_attribute_map = {
'instance_ids': {'key': 'instanceIds', 'type': '[str]'},
}
def __init__(
self,
*,
instance_ids: List[str],
**kwargs
):
super(VirtualMachineScaleSetVMInstanceRequiredIDs, self).__init__(**kwargs)
self.instance_ids = instance_ids
class VirtualMachineScaleSetVMInstanceView(msrest.serialization.Model):
_validation = {
'vm_health': {'readonly': True},
}
_attribute_map = {
'platform_update_domain': {'key': 'platformUpdateDomain', 'type': 'int'},
'platform_fault_domain': {'key': 'platformFaultDomain', 'type': 'int'},
'rdp_thumb_print': {'key': 'rdpThumbPrint', 'type': 'str'},
'vm_agent': {'key': 'vmAgent', 'type': 'VirtualMachineAgentInstanceView'},
'maintenance_redeploy_status': {'key': 'maintenanceRedeployStatus', 'type': 'MaintenanceRedeployStatus'},
'disks': {'key': 'disks', 'type': '[DiskInstanceView]'},
'extensions': {'key': 'extensions', 'type': '[VirtualMachineExtensionInstanceView]'},
'vm_health': {'key': 'vmHealth', 'type': 'VirtualMachineHealthStatus'},
'boot_diagnostics': {'key': 'bootDiagnostics', 'type': 'BootDiagnosticsInstanceView'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
'placement_group_id': {'key': 'placementGroupId', 'type': 'str'},
}
def __init__(
self,
*,
platform_update_domain: Optional[int] = None,
platform_fault_domain: Optional[int] = None,
rdp_thumb_print: Optional[str] = None,
vm_agent: Optional["VirtualMachineAgentInstanceView"] = None,
maintenance_redeploy_status: Optional["MaintenanceRedeployStatus"] = None,
disks: Optional[List["DiskInstanceView"]] = None,
extensions: Optional[List["VirtualMachineExtensionInstanceView"]] = None,
boot_diagnostics: Optional["BootDiagnosticsInstanceView"] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
placement_group_id: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetVMInstanceView, self).__init__(**kwargs)
self.platform_update_domain = platform_update_domain
self.platform_fault_domain = platform_fault_domain
self.rdp_thumb_print = rdp_thumb_print
self.vm_agent = vm_agent
self.maintenance_redeploy_status = maintenance_redeploy_status
self.disks = disks
self.extensions = extensions
self.vm_health = None
self.boot_diagnostics = boot_diagnostics
self.statuses = statuses
self.placement_group_id = placement_group_id
class VirtualMachineScaleSetVMListResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineScaleSetVM]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachineScaleSetVM"],
next_link: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetVMListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineScaleSetVMNetworkProfileConfiguration(msrest.serialization.Model):
_attribute_map = {
'network_interface_configurations': {'key': 'networkInterfaceConfigurations', 'type': '[VirtualMachineScaleSetNetworkConfiguration]'},
}
def __init__(
self,
*,
network_interface_configurations: Optional[List["VirtualMachineScaleSetNetworkConfiguration"]] = None,
**kwargs
):
super(VirtualMachineScaleSetVMNetworkProfileConfiguration, self).__init__(**kwargs)
self.network_interface_configurations = network_interface_configurations
class VirtualMachineScaleSetVMProfile(msrest.serialization.Model):
_attribute_map = {
'os_profile': {'key': 'osProfile', 'type': 'VirtualMachineScaleSetOSProfile'},
'storage_profile': {'key': 'storageProfile', 'type': 'VirtualMachineScaleSetStorageProfile'},
'network_profile': {'key': 'networkProfile', 'type': 'VirtualMachineScaleSetNetworkProfile'},
'diagnostics_profile': {'key': 'diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'extension_profile': {'key': 'extensionProfile', 'type': 'VirtualMachineScaleSetExtensionProfile'},
'license_type': {'key': 'licenseType', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'str'},
'eviction_policy': {'key': 'evictionPolicy', 'type': 'str'},
'billing_profile': {'key': 'billingProfile', 'type': 'BillingProfile'},
'scheduled_events_profile': {'key': 'scheduledEventsProfile', 'type': 'ScheduledEventsProfile'},
}
def __init__(
self,
*,
os_profile: Optional["VirtualMachineScaleSetOSProfile"] = None,
storage_profile: Optional["VirtualMachineScaleSetStorageProfile"] = None,
network_profile: Optional["VirtualMachineScaleSetNetworkProfile"] = None,
diagnostics_profile: Optional["DiagnosticsProfile"] = None,
extension_profile: Optional["VirtualMachineScaleSetExtensionProfile"] = None,
license_type: Optional[str] = None,
priority: Optional[Union[str, "VirtualMachinePriorityTypes"]] = None,
eviction_policy: Optional[Union[str, "VirtualMachineEvictionPolicyTypes"]] = None,
billing_profile: Optional["BillingProfile"] = None,
scheduled_events_profile: Optional["ScheduledEventsProfile"] = None,
**kwargs
):
super(VirtualMachineScaleSetVMProfile, self).__init__(**kwargs)
self.os_profile = os_profile
self.storage_profile = storage_profile
self.network_profile = network_profile
self.diagnostics_profile = diagnostics_profile
self.extension_profile = extension_profile
self.license_type = license_type
self.priority = priority
self.eviction_policy = eviction_policy
self.billing_profile = billing_profile
self.scheduled_events_profile = scheduled_events_profile
class VirtualMachineScaleSetVMProtectionPolicy(msrest.serialization.Model):
_attribute_map = {
'protect_from_scale_in': {'key': 'protectFromScaleIn', 'type': 'bool'},
'protect_from_scale_set_actions': {'key': 'protectFromScaleSetActions', 'type': 'bool'},
}
def __init__(
self,
*,
protect_from_scale_in: Optional[bool] = None,
protect_from_scale_set_actions: Optional[bool] = None,
**kwargs
):
super(VirtualMachineScaleSetVMProtectionPolicy, self).__init__(**kwargs)
self.protect_from_scale_in = protect_from_scale_in
self.protect_from_scale_set_actions = protect_from_scale_set_actions
class VirtualMachineScaleSetVMReimageParameters(VirtualMachineReimageParameters):
_attribute_map = {
'temp_disk': {'key': 'tempDisk', 'type': 'bool'},
}
def __init__(
self,
*,
temp_disk: Optional[bool] = None,
**kwargs
):
super(VirtualMachineScaleSetVMReimageParameters, self).__init__(temp_disk=temp_disk, **kwargs)
class VirtualMachineSize(msrest.serialization.Model):
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'number_of_cores': {'key': 'numberOfCores', 'type': 'int'},
'os_disk_size_in_mb': {'key': 'osDiskSizeInMB', 'type': 'int'},
'resource_disk_size_in_mb': {'key': 'resourceDiskSizeInMB', 'type': 'int'},
'memory_in_mb': {'key': 'memoryInMB', 'type': 'int'},
'max_data_disk_count': {'key': 'maxDataDiskCount', 'type': 'int'},
}
def __init__(
self,
*,
name: Optional[str] = None,
number_of_cores: Optional[int] = None,
os_disk_size_in_mb: Optional[int] = None,
resource_disk_size_in_mb: Optional[int] = None,
memory_in_mb: Optional[int] = None,
max_data_disk_count: Optional[int] = None,
**kwargs
):
super(VirtualMachineSize, self).__init__(**kwargs)
self.name = name
self.number_of_cores = number_of_cores
self.os_disk_size_in_mb = os_disk_size_in_mb
self.resource_disk_size_in_mb = resource_disk_size_in_mb
self.memory_in_mb = memory_in_mb
self.max_data_disk_count = max_data_disk_count
class VirtualMachineSizeListResult(msrest.serialization.Model):
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineSize]'},
}
def __init__(
self,
*,
value: Optional[List["VirtualMachineSize"]] = None,
**kwargs
):
super(VirtualMachineSizeListResult, self).__init__(**kwargs)
self.value = value
class VirtualMachineStatusCodeCount(msrest.serialization.Model):
_validation = {
'code': {'readonly': True},
'count': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'count': {'key': 'count', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(VirtualMachineStatusCodeCount, self).__init__(**kwargs)
self.code = None
self.count = None
class VirtualMachineUpdate(UpdateResource):
_validation = {
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
'vm_id': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'plan': {'key': 'plan', 'type': 'Plan'},
'identity': {'key': 'identity', 'type': 'VirtualMachineIdentity'},
'zones': {'key': 'zones', 'type': '[str]'},
'hardware_profile': {'key': 'properties.hardwareProfile', 'type': 'HardwareProfile'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'StorageProfile'},
'additional_capabilities': {'key': 'properties.additionalCapabilities', 'type': 'AdditionalCapabilities'},
'os_profile': {'key': 'properties.osProfile', 'type': 'OSProfile'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'NetworkProfile'},
'diagnostics_profile': {'key': 'properties.diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'availability_set': {'key': 'properties.availabilitySet', 'type': 'SubResource'},
'virtual_machine_scale_set': {'key': 'properties.virtualMachineScaleSet', 'type': 'SubResource'},
'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'SubResource'},
'priority': {'key': 'properties.priority', 'type': 'str'},
'eviction_policy': {'key': 'properties.evictionPolicy', 'type': 'str'},
'billing_profile': {'key': 'properties.billingProfile', 'type': 'BillingProfile'},
'host': {'key': 'properties.host', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineInstanceView'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'vm_id': {'key': 'properties.vmId', 'type': 'str'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
plan: Optional["Plan"] = None,
identity: Optional["VirtualMachineIdentity"] = None,
zones: Optional[List[str]] = None,
hardware_profile: Optional["HardwareProfile"] = None,
storage_profile: Optional["StorageProfile"] = None,
additional_capabilities: Optional["AdditionalCapabilities"] = None,
os_profile: Optional["OSProfile"] = None,
network_profile: Optional["NetworkProfile"] = None,
diagnostics_profile: Optional["DiagnosticsProfile"] = None,
availability_set: Optional["SubResource"] = None,
virtual_machine_scale_set: Optional["SubResource"] = None,
proximity_placement_group: Optional["SubResource"] = None,
priority: Optional[Union[str, "VirtualMachinePriorityTypes"]] = None,
eviction_policy: Optional[Union[str, "VirtualMachineEvictionPolicyTypes"]] = None,
billing_profile: Optional["BillingProfile"] = None,
host: Optional["SubResource"] = None,
license_type: Optional[str] = None,
**kwargs
):
super(VirtualMachineUpdate, self).__init__(tags=tags, **kwargs)
self.plan = plan
self.identity = identity
self.zones = zones
self.hardware_profile = hardware_profile
self.storage_profile = storage_profile
self.additional_capabilities = additional_capabilities
self.os_profile = os_profile
self.network_profile = network_profile
self.diagnostics_profile = diagnostics_profile
self.availability_set = availability_set
self.virtual_machine_scale_set = virtual_machine_scale_set
self.proximity_placement_group = proximity_placement_group
self.priority = priority
self.eviction_policy = eviction_policy
self.billing_profile = billing_profile
self.host = host
self.provisioning_state = None
self.instance_view = None
self.license_type = license_type
self.vm_id = None
class VMScaleSetConvertToSinglePlacementGroupInput(msrest.serialization.Model):
_attribute_map = {
'active_placement_group_id': {'key': 'activePlacementGroupId', 'type': 'str'},
}
def __init__(
self,
*,
active_placement_group_id: Optional[str] = None,
**kwargs
):
super(VMScaleSetConvertToSinglePlacementGroupInput, self).__init__(**kwargs)
self.active_placement_group_id = active_placement_group_id
class WindowsConfiguration(msrest.serialization.Model):
_attribute_map = {
'provision_vm_agent': {'key': 'provisionVMAgent', 'type': 'bool'},
'enable_automatic_updates': {'key': 'enableAutomaticUpdates', 'type': 'bool'},
'time_zone': {'key': 'timeZone', 'type': 'str'},
'additional_unattend_content': {'key': 'additionalUnattendContent', 'type': '[AdditionalUnattendContent]'},
'win_rm': {'key': 'winRM', 'type': 'WinRMConfiguration'},
}
def __init__(
self,
*,
provision_vm_agent: Optional[bool] = None,
enable_automatic_updates: Optional[bool] = None,
time_zone: Optional[str] = None,
additional_unattend_content: Optional[List["AdditionalUnattendContent"]] = None,
win_rm: Optional["WinRMConfiguration"] = None,
**kwargs
):
super(WindowsConfiguration, self).__init__(**kwargs)
self.provision_vm_agent = provision_vm_agent
self.enable_automatic_updates = enable_automatic_updates
self.time_zone = time_zone
self.additional_unattend_content = additional_unattend_content
self.win_rm = win_rm
class WinRMConfiguration(msrest.serialization.Model):
_attribute_map = {
'listeners': {'key': 'listeners', 'type': '[WinRMListener]'},
}
def __init__(
self,
*,
listeners: Optional[List["WinRMListener"]] = None,
**kwargs
):
super(WinRMConfiguration, self).__init__(**kwargs)
self.listeners = listeners
class WinRMListener(msrest.serialization.Model):
_attribute_map = {
'protocol': {'key': 'protocol', 'type': 'str'},
'certificate_url': {'key': 'certificateUrl', 'type': 'str'},
}
def __init__(
self,
*,
protocol: Optional[Union[str, "ProtocolTypes"]] = None,
certificate_url: Optional[str] = None,
**kwargs
):
super(WinRMListener, self).__init__(**kwargs)
self.protocol = protocol
self.certificate_url = certificate_url
| true | true |
f732f0d839cc5d33925dcd7c1ee794b50b520202 | 576 | py | Python | utils/cython/setup.py | AsaphLightricks/3DDFA | 7630986c0286cd2c85b5dfd14ae6e8322e4ba605 | [
"MIT"
] | null | null | null | utils/cython/setup.py | AsaphLightricks/3DDFA | 7630986c0286cd2c85b5dfd14ae6e8322e4ba605 | [
"MIT"
] | null | null | null | utils/cython/setup.py | AsaphLightricks/3DDFA | 7630986c0286cd2c85b5dfd14ae6e8322e4ba605 | [
"MIT"
] | null | null | null | '''
python setup.py build_ext -i
to compile
'''
# setup.py
from distutils.core import setup, Extension
# from Cython.Build import cythonize
from Cython.Distutils import build_ext
import numpy
setup(
name='mesh_core_cython',
cmdclass={'build_ext': build_ext},
ext_modules=[Extension("mesh_core_cython",
sources=["mesh_core_cython.pyx", "mesh_core.cpp"],
language='c++',
include_dirs=[numpy.get_include()], extra_compile_args=['-std=c++11', '-D_hypot=hypot', '-stdlib=libc++'])],
)
| 27.428571 | 135 | 0.623264 |
from distutils.core import setup, Extension
from Cython.Distutils import build_ext
import numpy
setup(
name='mesh_core_cython',
cmdclass={'build_ext': build_ext},
ext_modules=[Extension("mesh_core_cython",
sources=["mesh_core_cython.pyx", "mesh_core.cpp"],
language='c++',
include_dirs=[numpy.get_include()], extra_compile_args=['-std=c++11', '-D_hypot=hypot', '-stdlib=libc++'])],
)
| true | true |
f732f0dc65b3444ec138652f503105e575e280c7 | 1,563 | py | Python | saboteurs/tools.py | Edinburgh-Genome-Foundry/saboteurs | e421b241cd227537c78e6d6bff97fa8aa3f88219 | [
"MIT"
] | 6 | 2018-12-17T11:44:22.000Z | 2020-11-02T10:46:16.000Z | saboteurs/tools.py | Edinburgh-Genome-Foundry/saboteurs | e421b241cd227537c78e6d6bff97fa8aa3f88219 | [
"MIT"
] | null | null | null | saboteurs/tools.py | Edinburgh-Genome-Foundry/saboteurs | e421b241cd227537c78e6d6bff97fa8aa3f88219 | [
"MIT"
] | null | null | null | from collections import OrderedDict
def csv_to_groups_data(csv_path=None, csv_string=None):
"""Read a CSV to get the data to feed to ``find_statistical_saboteurs()``
or ``find_logical_saboteurs()``.
See examples of such a file in the code repository:
https://github.com/Edinburgh-Genome-Foundry/saboteurs/
Returns
-------
groups, failed_groups
For datasheets for logical saboteur finding.
group_data
For datasheets for statistical saboteur finding. The data is of the form
>>> {"Exp. 1": {
>>> exp_id: "Exp. 1",
>>> attempts: 7,
>>> failures: 10,
>>> members: ["Alice", "Bob"]}
>>> }
>>> "Exp. 2": { etc...
"""
if csv_string is None:
with open(csv_path, "r") as f:
csv_string = f.read()
lines = [
[e.strip() for e in l.split(",") if len(e.strip())]
for l in csv_string.split("\n")
if len(l)
]
groups = OrderedDict([])
if "result" in lines[0]:
failed_groups = []
for line in lines[1:]:
name, result, members = line[0], line[1], line[2:]
groups[name] = members
if result != "success":
failed_groups.append(name)
return groups, failed_groups
else:
for line in lines[1:]:
(name, attempts, failures), members = line[:3], line[3:]
groups[name] = dict(
id=name, attempts=int(attempts), failures=int(failures), members=members
)
return groups
| 30.057692 | 88 | 0.550224 | from collections import OrderedDict
def csv_to_groups_data(csv_path=None, csv_string=None):
if csv_string is None:
with open(csv_path, "r") as f:
csv_string = f.read()
lines = [
[e.strip() for e in l.split(",") if len(e.strip())]
for l in csv_string.split("\n")
if len(l)
]
groups = OrderedDict([])
if "result" in lines[0]:
failed_groups = []
for line in lines[1:]:
name, result, members = line[0], line[1], line[2:]
groups[name] = members
if result != "success":
failed_groups.append(name)
return groups, failed_groups
else:
for line in lines[1:]:
(name, attempts, failures), members = line[:3], line[3:]
groups[name] = dict(
id=name, attempts=int(attempts), failures=int(failures), members=members
)
return groups
| true | true |
f732f1d978cb0ce632674ce18f5e5d8efdd69688 | 25,770 | py | Python | mne/io/pick.py | britta-wstnr/mne-python | b69afd1ff3337ac84f219b26c53537a5c8ceb1b9 | [
"BSD-3-Clause"
] | null | null | null | mne/io/pick.py | britta-wstnr/mne-python | b69afd1ff3337ac84f219b26c53537a5c8ceb1b9 | [
"BSD-3-Clause"
] | null | null | null | mne/io/pick.py | britta-wstnr/mne-python | b69afd1ff3337ac84f219b26c53537a5c8ceb1b9 | [
"BSD-3-Clause"
] | null | null | null | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
from copy import deepcopy
import re
import numpy as np
from .constants import FIFF
from ..utils import logger, verbose
from ..externals.six import string_types
def channel_type(info, idx):
"""Get channel type.
Parameters
----------
info : dict
Measurement info
idx : int
Index of channel
Returns
-------
type : 'grad' | 'mag' | 'eeg' | 'stim' | 'eog' | 'emg' | 'ecg'
'ref_meg' | 'resp' | 'exci' | 'ias' | 'syst' | 'misc'
'seeg' | 'bio' | 'chpi' | 'dipole' | 'gof' | 'ecog' | 'hbo' | 'hbr'
Type of channel
"""
kind = info['chs'][idx]['kind']
if kind == FIFF.FIFFV_MEG_CH:
if info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_T_M:
return 'grad'
elif info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_T:
return 'mag'
elif kind == FIFF.FIFFV_REF_MEG_CH:
return 'ref_meg'
elif kind == FIFF.FIFFV_EEG_CH:
return 'eeg'
elif kind == FIFF.FIFFV_STIM_CH:
return 'stim'
elif kind == FIFF.FIFFV_EOG_CH:
return 'eog'
elif kind == FIFF.FIFFV_EMG_CH:
return 'emg'
elif kind == FIFF.FIFFV_ECG_CH:
return 'ecg'
elif kind == FIFF.FIFFV_RESP_CH:
return 'resp'
elif kind == FIFF.FIFFV_MISC_CH:
return 'misc'
elif kind == FIFF.FIFFV_EXCI_CH:
return 'exci'
elif kind == FIFF.FIFFV_IAS_CH:
return 'ias'
elif kind == FIFF.FIFFV_SYST_CH:
return 'syst'
elif kind == FIFF.FIFFV_SEEG_CH:
return 'seeg'
elif kind == FIFF.FIFFV_BIO_CH:
return 'bio'
elif kind in [FIFF.FIFFV_QUAT_0, FIFF.FIFFV_QUAT_1, FIFF.FIFFV_QUAT_2,
FIFF.FIFFV_QUAT_3, FIFF.FIFFV_QUAT_4, FIFF.FIFFV_QUAT_5,
FIFF.FIFFV_QUAT_6, FIFF.FIFFV_HPI_G, FIFF.FIFFV_HPI_ERR,
FIFF.FIFFV_HPI_MOV]:
return 'chpi' # channels relative to head position monitoring
elif kind == FIFF.FIFFV_DIPOLE_WAVE:
return 'dipole'
elif kind == FIFF.FIFFV_GOODNESS_FIT:
return 'gof'
elif kind == FIFF.FIFFV_ECOG_CH:
return 'ecog'
elif kind == FIFF.FIFFV_FNIRS_CH:
if info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBO:
return 'hbo'
elif info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBR:
return 'hbr'
raise Exception('Unknown channel type')
def pick_channels(ch_names, include, exclude=[]):
"""Pick channels by names.
Returns the indices of the good channels in ch_names.
Parameters
----------
ch_names : list of string
List of channels.
include : list of string
List of channels to include (if empty include all available).
.. note:: This is to be treated as a set. The order of this list
is not used or maintained in ``sel``.
exclude : list of string
List of channels to exclude (if empty do not exclude any channel).
Defaults to [].
See Also
--------
pick_channels_regexp, pick_types
Returns
-------
sel : array of int
Indices of good channels.
"""
if len(np.unique(ch_names)) != len(ch_names):
raise RuntimeError('ch_names is not a unique list, picking is unsafe')
_check_excludes_includes(include)
_check_excludes_includes(exclude)
if not isinstance(include, set):
include = set(include)
if not isinstance(exclude, set):
exclude = set(exclude)
sel = []
for k, name in enumerate(ch_names):
if (len(include) == 0 or name in include) and name not in exclude:
sel.append(k)
return np.array(sel, int)
def pick_channels_regexp(ch_names, regexp):
"""Pick channels using regular expression.
Returns the indices of the good channels in ch_names.
Parameters
----------
ch_names : list of string
List of channels
regexp : string
The regular expression. See python standard module for regular
expressions.
Returns
-------
sel : array of int
Indices of good channels.
See Also
--------
pick_channels
Examples
--------
>>> pick_channels_regexp(['MEG 2331', 'MEG 2332', 'MEG 2333'], 'MEG ...1')
[0]
>>> pick_channels_regexp(['MEG 2331', 'MEG 2332', 'MEG 2333'], 'MEG *')
[0, 1, 2]
"""
r = re.compile(regexp)
return [k for k, name in enumerate(ch_names) if r.match(name)]
def _triage_meg_pick(ch, meg):
"""Triage an MEG pick type."""
if meg is True:
return True
elif ch['unit'] == FIFF.FIFF_UNIT_T_M:
if meg == 'grad':
return True
elif meg == 'planar1' and ch['ch_name'].endswith('2'):
return True
elif meg == 'planar2' and ch['ch_name'].endswith('3'):
return True
elif (meg == 'mag' and ch['unit'] == FIFF.FIFF_UNIT_T):
return True
return False
def _triage_fnirs_pick(ch, fnirs):
"""Triage an fNIRS pick type."""
if fnirs is True:
return True
elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBO and fnirs == 'hbo':
return True
elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBR and fnirs == 'hbr':
return True
return False
def _check_meg_type(meg, allow_auto=False):
"""Ensure a valid meg type."""
if isinstance(meg, string_types):
allowed_types = ['grad', 'mag', 'planar1', 'planar2']
allowed_types += ['auto'] if allow_auto else []
if meg not in allowed_types:
raise ValueError('meg value must be one of %s or bool, not %s'
% (allowed_types, meg))
def pick_types(info, meg=True, eeg=False, stim=False, eog=False, ecg=False,
emg=False, ref_meg='auto', misc=False, resp=False, chpi=False,
exci=False, ias=False, syst=False, seeg=False, dipole=False,
gof=False, bio=False, ecog=False, fnirs=False, include=(),
exclude='bads', selection=None):
"""Pick channels by type and names.
Parameters
----------
info : dict
The measurement info.
meg : bool | str
If True include all MEG channels. If False include None
If string it can be 'mag', 'grad', 'planar1' or 'planar2' to select
only magnetometers, all gradiometers, or a specific type of
gradiometer.
eeg : bool
If True include EEG channels.
stim : bool
If True include stimulus channels.
eog : bool
If True include EOG channels.
ecg : bool
If True include ECG channels.
emg : bool
If True include EMG channels.
ref_meg: bool | str
If True include CTF / 4D reference channels. If 'auto', the reference
channels are only included if compensations are present. Can also be
the string options from `meg`.
misc : bool
If True include miscellaneous analog channels.
resp : bool
If True include response-trigger channel. For some MEG systems this
is separate from the stim channel.
chpi : bool
If True include continuous HPI coil channels.
exci : bool
Flux excitation channel used to be a stimulus channel.
ias : bool
Internal Active Shielding data (maybe on Triux only).
syst : bool
System status channel information (on Triux systems only).
seeg : bool
Stereotactic EEG channels.
dipole : bool
Dipole time course channels.
gof : bool
Dipole goodness of fit channels.
bio : bool
Bio channels.
ecog : bool
Electrocorticography channels.
fnirs : bool | str
Functional near-infrared spectroscopy channels. If True include all
fNIRS channels. If False (default) include none. If string it can be
'hbo' (to include channels measuring oxyhemoglobin) or 'hbr' (to
include channels measuring deoxyhemoglobin).
include : list of string
List of additional channels to include. If empty do not include any.
exclude : list of string | str
List of channels to exclude. If 'bads' (default), exclude channels
in ``info['bads']``.
selection : list of string
Restrict sensor channels (MEG, EEG) to this list of channel names.
Returns
-------
sel : array of int
Indices of good channels.
"""
# NOTE: Changes to this function's signature should also be changed in
# PickChannelsMixin
from .meas_info import Info
if not isinstance(info, Info):
raise TypeError('info must be an instance of Info, not %s'
% type(info))
info._check_consistency()
nchan = info['nchan']
pick = np.zeros(nchan, dtype=np.bool)
if exclude is None:
raise ValueError('exclude must be a list of strings or "bads"')
elif exclude == 'bads':
exclude = info.get('bads', [])
elif not isinstance(exclude, (list, tuple)):
raise ValueError('exclude must either be "bads" or a list of strings.'
' If only one channel is to be excluded, use '
'[ch_name] instead of passing ch_name.')
_check_meg_type(ref_meg, allow_auto=True)
_check_meg_type(meg)
if isinstance(ref_meg, string_types) and ref_meg == 'auto':
ref_meg = ('comps' in info and info['comps'] is not None and
len(info['comps']) > 0)
for param in (eeg, stim, eog, ecg, emg, misc, resp, chpi, exci,
ias, syst, seeg, dipole, gof, bio, ecog):
if not isinstance(param, bool):
w = ('Parameters for all channel types (with the exception '
'of "meg", "ref_meg" and "fnirs") must be of type bool, '
'not {0}.')
raise ValueError(w.format(type(param)))
for k in range(nchan):
kind = info['chs'][k]['kind']
# XXX eventually we should de-duplicate this with channel_type!
if kind == FIFF.FIFFV_MEG_CH and meg:
pick[k] = _triage_meg_pick(info['chs'][k], meg)
elif kind == FIFF.FIFFV_EEG_CH and eeg:
pick[k] = True
elif kind == FIFF.FIFFV_STIM_CH and stim:
pick[k] = True
elif kind == FIFF.FIFFV_EOG_CH and eog:
pick[k] = True
elif kind == FIFF.FIFFV_ECG_CH and ecg:
pick[k] = True
elif kind == FIFF.FIFFV_EMG_CH and emg:
pick[k] = True
elif kind == FIFF.FIFFV_MISC_CH and misc:
pick[k] = True
elif kind == FIFF.FIFFV_REF_MEG_CH and ref_meg:
pick[k] = _triage_meg_pick(info['chs'][k], ref_meg)
elif kind == FIFF.FIFFV_RESP_CH and resp:
pick[k] = True
elif kind == FIFF.FIFFV_SYST_CH and syst:
pick[k] = True
elif kind == FIFF.FIFFV_SEEG_CH and seeg:
pick[k] = True
elif kind == FIFF.FIFFV_IAS_CH and ias:
pick[k] = True
elif kind == FIFF.FIFFV_EXCI_CH and exci:
pick[k] = True
elif kind in [FIFF.FIFFV_QUAT_0, FIFF.FIFFV_QUAT_1, FIFF.FIFFV_QUAT_2,
FIFF.FIFFV_QUAT_3, FIFF.FIFFV_QUAT_4, FIFF.FIFFV_QUAT_5,
FIFF.FIFFV_QUAT_6, FIFF.FIFFV_HPI_G, FIFF.FIFFV_HPI_ERR,
FIFF.FIFFV_HPI_MOV] and chpi:
pick[k] = True
elif kind == FIFF.FIFFV_DIPOLE_WAVE and dipole:
pick[k] = True
elif kind == FIFF.FIFFV_GOODNESS_FIT and gof:
pick[k] = True
elif kind == FIFF.FIFFV_BIO_CH and bio:
pick[k] = True
elif kind == FIFF.FIFFV_ECOG_CH and ecog:
pick[k] = True
elif kind == FIFF.FIFFV_FNIRS_CH:
pick[k] = _triage_fnirs_pick(info['chs'][k], fnirs)
# restrict channels to selection if provided
if selection is not None:
# the selection only restricts these types of channels
sel_kind = [FIFF.FIFFV_MEG_CH, FIFF.FIFFV_REF_MEG_CH,
FIFF.FIFFV_EEG_CH]
for k in np.where(pick)[0]:
if (info['chs'][k]['kind'] in sel_kind and
info['ch_names'][k] not in selection):
pick[k] = False
myinclude = [info['ch_names'][k] for k in range(nchan) if pick[k]]
myinclude += include
if len(myinclude) == 0:
sel = np.array([], int)
else:
sel = pick_channels(info['ch_names'], myinclude, exclude)
return sel
def pick_info(info, sel=(), copy=True):
"""Restrict an info structure to a selection of channels.
Parameters
----------
info : dict
Info structure from evoked or raw data.
sel : list of int | None
Indices of channels to include.
copy : bool
If copy is False, info is modified inplace.
Returns
-------
res : dict
Info structure restricted to a selection of channels.
"""
info._check_consistency()
info = info.copy() if copy else info
if sel is None:
return info
elif len(sel) == 0:
raise ValueError('No channels match the selection.')
info['chs'] = [info['chs'][k] for k in sel]
info._update_redundant()
info['bads'] = [ch for ch in info['bads'] if ch in info['ch_names']]
comps = deepcopy(info['comps'])
for c in comps:
row_idx = [k for k, n in enumerate(c['data']['row_names'])
if n in info['ch_names']]
row_names = [c['data']['row_names'][i] for i in row_idx]
rowcals = c['rowcals'][row_idx]
c['rowcals'] = rowcals
c['data']['nrow'] = len(row_names)
c['data']['row_names'] = row_names
c['data']['data'] = c['data']['data'][row_idx]
info['comps'] = comps
info._check_consistency()
return info
def _has_kit_refs(info, picks):
"""Determine if KIT ref channels are chosen.
This is currently only used by make_forward_solution, which cannot
run when KIT reference channels are included.
"""
for p in picks:
if info['chs'][p]['coil_type'] == FIFF.FIFFV_COIL_KIT_REF_MAG:
return True
return False
def pick_channels_evoked(orig, include=[], exclude='bads'):
"""Pick channels from evoked data.
Parameters
----------
orig : Evoked object
One evoked dataset.
include : list of string, (optional)
List of channels to include (if empty, include all available).
exclude : list of string | str
List of channels to exclude. If empty do not exclude any (default).
If 'bads', exclude channels in orig.info['bads']. Defaults to 'bads'.
Returns
-------
res : instance of Evoked
Evoked data restricted to selected channels. If include and
exclude are empty it returns orig without copy.
"""
if len(include) == 0 and len(exclude) == 0:
return orig
exclude = _check_excludes_includes(exclude, info=orig.info,
allow_bads=True)
sel = pick_channels(orig.info['ch_names'], include=include,
exclude=exclude)
if len(sel) == 0:
raise ValueError('Warning : No channels match the selection.')
res = deepcopy(orig)
#
# Modify the measurement info
#
res.info = pick_info(res.info, sel)
#
# Create the reduced data set
#
res.data = res.data[sel, :]
return res
@verbose
def pick_channels_forward(orig, include=[], exclude=[], verbose=None):
"""Pick channels from forward operator.
Parameters
----------
orig : dict
A forward solution.
include : list of string
List of channels to include (if empty, include all available).
Defaults to [].
exclude : list of string | 'bads'
Channels to exclude (if empty, do not exclude any). Defaults to [].
If 'bads', then exclude bad channels in orig.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
res : dict
Forward solution restricted to selected channels. If include and
exclude are empty it returns orig without copy.
"""
orig['info']._check_consistency()
if len(include) == 0 and len(exclude) == 0:
return orig
exclude = _check_excludes_includes(exclude,
info=orig['info'], allow_bads=True)
# Allow for possibility of channel ordering in forward solution being
# different from that of the M/EEG file it is based on.
sel_sol = pick_channels(orig['sol']['row_names'], include=include,
exclude=exclude)
sel_info = pick_channels(orig['info']['ch_names'], include=include,
exclude=exclude)
fwd = deepcopy(orig)
# Check that forward solution and original data file agree on #channels
if len(sel_sol) != len(sel_info):
raise ValueError('Forward solution and functional data appear to '
'have different channel names, please check.')
# Do we have something?
nuse = len(sel_sol)
if nuse == 0:
raise ValueError('Nothing remains after picking')
logger.info(' %d out of %d channels remain after picking'
% (nuse, fwd['nchan']))
# Pick the correct rows of the forward operator using sel_sol
fwd['sol']['data'] = fwd['sol']['data'][sel_sol, :]
fwd['_orig_sol'] = fwd['_orig_sol'][sel_sol, :]
fwd['sol']['nrow'] = nuse
ch_names = [fwd['sol']['row_names'][k] for k in sel_sol]
fwd['nchan'] = nuse
fwd['sol']['row_names'] = ch_names
# Pick the appropriate channel names from the info-dict using sel_info
fwd['info']['chs'] = [fwd['info']['chs'][k] for k in sel_info]
fwd['info']._update_redundant()
fwd['info']['bads'] = [b for b in fwd['info']['bads'] if b in ch_names]
if fwd['sol_grad'] is not None:
fwd['sol_grad']['data'] = fwd['sol_grad']['data'][sel_sol, :]
fwd['_orig_sol_grad'] = fwd['_orig_sol_grad'][sel_sol, :]
fwd['sol_grad']['nrow'] = nuse
fwd['sol_grad']['row_names'] = [fwd['sol_grad']['row_names'][k]
for k in sel_sol]
return fwd
def pick_types_forward(orig, meg=True, eeg=False, ref_meg=True, seeg=False,
ecog=False, include=[], exclude=[]):
"""Pick by channel type and names from a forward operator.
Parameters
----------
orig : dict
A forward solution
meg : bool or string
If True include all MEG channels. If False include None
If string it can be 'mag' or 'grad' to select only gradiometers
or magnetometers.
eeg : bool
If True include EEG channels
ref_meg : bool
If True include CTF / 4D reference channels
seeg : bool
If True include stereotactic EEG channels
ecog : bool
If True include electrocorticography channels
include : list of string
List of additional channels to include. If empty do not include any.
exclude : list of string | str
List of channels to exclude. If empty do not exclude any (default).
If 'bads', exclude channels in orig['info']['bads'].
Returns
-------
res : dict
Forward solution restricted to selected channel types.
"""
info = orig['info']
sel = pick_types(info, meg, eeg, ref_meg=ref_meg, seeg=seeg, ecog=ecog,
include=include, exclude=exclude)
if len(sel) == 0:
raise ValueError('No valid channels found')
include_ch_names = [info['ch_names'][k] for k in sel]
return pick_channels_forward(orig, include_ch_names)
def channel_indices_by_type(info):
"""Get indices of channels by type."""
idx = dict((key, list()) for key in _PICK_TYPES_KEYS if
key not in ('meg', 'fnirs'))
idx.update(mag=list(), grad=list(), hbo=list(), hbr=list())
for k, ch in enumerate(info['chs']):
for key in idx.keys():
if channel_type(info, k) == key:
idx[key].append(k)
return idx
def pick_channels_cov(orig, include=[], exclude='bads'):
"""Pick channels from covariance matrix.
Parameters
----------
orig : Covariance
A covariance.
include : list of string, (optional)
List of channels to include (if empty, include all available).
exclude : list of string, (optional) | 'bads'
Channels to exclude (if empty, do not exclude any). Defaults to 'bads'.
Returns
-------
res : dict
Covariance solution restricted to selected channels.
"""
from ..cov import Covariance
exclude = orig['bads'] if exclude == 'bads' else exclude
sel = pick_channels(orig['names'], include=include, exclude=exclude)
data = orig['data'][sel][:, sel] if not orig['diag'] else orig['data'][sel]
names = [orig['names'][k] for k in sel]
bads = [name for name in orig['bads'] if name in orig['names']]
res = Covariance(
data=data, names=names, bads=bads, projs=deepcopy(orig['projs']),
nfree=orig['nfree'], eig=None, eigvec=None,
method=orig.get('method', None), loglik=orig.get('loglik', None))
return res
def _picks_by_type(info, meg_combined=False, ref_meg=False, exclude='bads'):
"""Get data channel indices as separate list of tuples.
Parameters
----------
info : instance of mne.measuerment_info.Info
The info.
meg_combined : bool
Whether to return combined picks for grad and mag.
ref_meg : bool
If True include CTF / 4D reference channels
exclude : list of string | str
List of channels to exclude. If 'bads' (default), exclude channels
in info['bads'].
Returns
-------
picks_list : list of tuples
The list of tuples of picks and the type string.
"""
from ..channels.channels import _contains_ch_type
picks_list = []
has_mag, has_grad, has_eeg = [_contains_ch_type(info, k)
for k in ('mag', 'grad', 'eeg')]
if has_mag and (meg_combined is not True or not has_grad):
picks_list.append(
('mag', pick_types(info, meg='mag', eeg=False, stim=False,
ref_meg=ref_meg, exclude=exclude))
)
if has_grad and (meg_combined is not True or not has_mag):
picks_list.append(
('grad', pick_types(info, meg='grad', eeg=False, stim=False,
ref_meg=ref_meg, exclude=exclude))
)
if has_mag and has_grad and meg_combined is True:
picks_list.append(
('meg', pick_types(info, meg=True, eeg=False, stim=False,
ref_meg=ref_meg, exclude=exclude))
)
if has_eeg:
picks_list.append(
('eeg', pick_types(info, meg=False, eeg=True, stim=False,
ref_meg=ref_meg, exclude=exclude))
)
return picks_list
def _check_excludes_includes(chs, info=None, allow_bads=False):
"""Ensure that inputs to exclude/include are list-like or "bads".
Parameters
----------
chs : any input, should be list, tuple, string
The channels passed to include or exclude.
allow_bads : bool
Allow the user to supply "bads" as a string for auto exclusion.
Returns
-------
chs : list
Channels to be excluded/excluded. If allow_bads, and chs=="bads",
this will be the bad channels found in 'info'.
"""
from .meas_info import Info
if not isinstance(chs, (list, tuple, np.ndarray)):
if allow_bads is True:
if not isinstance(info, Info):
raise ValueError('Supply an info object if allow_bads is true')
elif chs != 'bads':
raise ValueError('If chs is a string, it must be "bads"')
else:
chs = info['bads']
else:
raise ValueError(
'include/exclude must be list, tuple, ndarray, or "bads". ' +
'You provided type {0}'.format(type(chs)))
return chs
_PICK_TYPES_DATA_DICT = dict(
meg=True, eeg=True, stim=False, eog=False, ecg=False, emg=False,
misc=False, resp=False, chpi=False, exci=False, ias=False, syst=False,
seeg=True, dipole=False, gof=False, bio=False, ecog=True, fnirs=True)
_PICK_TYPES_KEYS = tuple(list(_PICK_TYPES_DATA_DICT.keys()) + ['ref_meg'])
_DATA_CH_TYPES_SPLIT = ['mag', 'grad', 'eeg', 'seeg', 'ecog', 'hbo', 'hbr']
# Valid data types, ordered for consistency, used in viz/evoked.
_VALID_CHANNEL_TYPES = ['eeg', 'grad', 'mag', 'seeg', 'eog', 'ecg', 'emg',
'dipole', 'gof', 'bio', 'ecog', 'hbo', 'hbr',
'misc']
def _pick_data_channels(info, exclude='bads', with_ref_meg=True):
"""Pick only data channels."""
return pick_types(info, ref_meg=with_ref_meg, include=[], exclude=exclude,
selection=None, **_PICK_TYPES_DATA_DICT)
def _pick_aux_channels(info, exclude='bads'):
"""Pick only auxiliary channels.
Corresponds to EOG, ECG, EMG and BIO
"""
return pick_types(info, meg=False, eog=True, ecg=True, emg=True, bio=True,
ref_meg=False, exclude=exclude)
def _pick_data_or_ica(info):
"""Pick only data or ICA channels."""
ch_names = [c['ch_name'] for c in info['chs']]
if 'ICA ' in ','.join(ch_names):
picks = pick_types(info, exclude=[], misc=True)
else:
picks = _pick_data_channels(info, exclude=[], with_ref_meg=True)
return picks
| 34.683715 | 79 | 0.60066 |
from copy import deepcopy
import re
import numpy as np
from .constants import FIFF
from ..utils import logger, verbose
from ..externals.six import string_types
def channel_type(info, idx):
kind = info['chs'][idx]['kind']
if kind == FIFF.FIFFV_MEG_CH:
if info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_T_M:
return 'grad'
elif info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_T:
return 'mag'
elif kind == FIFF.FIFFV_REF_MEG_CH:
return 'ref_meg'
elif kind == FIFF.FIFFV_EEG_CH:
return 'eeg'
elif kind == FIFF.FIFFV_STIM_CH:
return 'stim'
elif kind == FIFF.FIFFV_EOG_CH:
return 'eog'
elif kind == FIFF.FIFFV_EMG_CH:
return 'emg'
elif kind == FIFF.FIFFV_ECG_CH:
return 'ecg'
elif kind == FIFF.FIFFV_RESP_CH:
return 'resp'
elif kind == FIFF.FIFFV_MISC_CH:
return 'misc'
elif kind == FIFF.FIFFV_EXCI_CH:
return 'exci'
elif kind == FIFF.FIFFV_IAS_CH:
return 'ias'
elif kind == FIFF.FIFFV_SYST_CH:
return 'syst'
elif kind == FIFF.FIFFV_SEEG_CH:
return 'seeg'
elif kind == FIFF.FIFFV_BIO_CH:
return 'bio'
elif kind in [FIFF.FIFFV_QUAT_0, FIFF.FIFFV_QUAT_1, FIFF.FIFFV_QUAT_2,
FIFF.FIFFV_QUAT_3, FIFF.FIFFV_QUAT_4, FIFF.FIFFV_QUAT_5,
FIFF.FIFFV_QUAT_6, FIFF.FIFFV_HPI_G, FIFF.FIFFV_HPI_ERR,
FIFF.FIFFV_HPI_MOV]:
return 'chpi'
elif kind == FIFF.FIFFV_DIPOLE_WAVE:
return 'dipole'
elif kind == FIFF.FIFFV_GOODNESS_FIT:
return 'gof'
elif kind == FIFF.FIFFV_ECOG_CH:
return 'ecog'
elif kind == FIFF.FIFFV_FNIRS_CH:
if info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBO:
return 'hbo'
elif info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBR:
return 'hbr'
raise Exception('Unknown channel type')
def pick_channels(ch_names, include, exclude=[]):
if len(np.unique(ch_names)) != len(ch_names):
raise RuntimeError('ch_names is not a unique list, picking is unsafe')
_check_excludes_includes(include)
_check_excludes_includes(exclude)
if not isinstance(include, set):
include = set(include)
if not isinstance(exclude, set):
exclude = set(exclude)
sel = []
for k, name in enumerate(ch_names):
if (len(include) == 0 or name in include) and name not in exclude:
sel.append(k)
return np.array(sel, int)
def pick_channels_regexp(ch_names, regexp):
r = re.compile(regexp)
return [k for k, name in enumerate(ch_names) if r.match(name)]
def _triage_meg_pick(ch, meg):
if meg is True:
return True
elif ch['unit'] == FIFF.FIFF_UNIT_T_M:
if meg == 'grad':
return True
elif meg == 'planar1' and ch['ch_name'].endswith('2'):
return True
elif meg == 'planar2' and ch['ch_name'].endswith('3'):
return True
elif (meg == 'mag' and ch['unit'] == FIFF.FIFF_UNIT_T):
return True
return False
def _triage_fnirs_pick(ch, fnirs):
if fnirs is True:
return True
elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBO and fnirs == 'hbo':
return True
elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBR and fnirs == 'hbr':
return True
return False
def _check_meg_type(meg, allow_auto=False):
if isinstance(meg, string_types):
allowed_types = ['grad', 'mag', 'planar1', 'planar2']
allowed_types += ['auto'] if allow_auto else []
if meg not in allowed_types:
raise ValueError('meg value must be one of %s or bool, not %s'
% (allowed_types, meg))
def pick_types(info, meg=True, eeg=False, stim=False, eog=False, ecg=False,
emg=False, ref_meg='auto', misc=False, resp=False, chpi=False,
exci=False, ias=False, syst=False, seeg=False, dipole=False,
gof=False, bio=False, ecog=False, fnirs=False, include=(),
exclude='bads', selection=None):
# PickChannelsMixin
from .meas_info import Info
if not isinstance(info, Info):
raise TypeError('info must be an instance of Info, not %s'
% type(info))
info._check_consistency()
nchan = info['nchan']
pick = np.zeros(nchan, dtype=np.bool)
if exclude is None:
raise ValueError('exclude must be a list of strings or "bads"')
elif exclude == 'bads':
exclude = info.get('bads', [])
elif not isinstance(exclude, (list, tuple)):
raise ValueError('exclude must either be "bads" or a list of strings.'
' If only one channel is to be excluded, use '
'[ch_name] instead of passing ch_name.')
_check_meg_type(ref_meg, allow_auto=True)
_check_meg_type(meg)
if isinstance(ref_meg, string_types) and ref_meg == 'auto':
ref_meg = ('comps' in info and info['comps'] is not None and
len(info['comps']) > 0)
for param in (eeg, stim, eog, ecg, emg, misc, resp, chpi, exci,
ias, syst, seeg, dipole, gof, bio, ecog):
if not isinstance(param, bool):
w = ('Parameters for all channel types (with the exception '
'of "meg", "ref_meg" and "fnirs") must be of type bool, '
'not {0}.')
raise ValueError(w.format(type(param)))
for k in range(nchan):
kind = info['chs'][k]['kind']
# XXX eventually we should de-duplicate this with channel_type!
if kind == FIFF.FIFFV_MEG_CH and meg:
pick[k] = _triage_meg_pick(info['chs'][k], meg)
elif kind == FIFF.FIFFV_EEG_CH and eeg:
pick[k] = True
elif kind == FIFF.FIFFV_STIM_CH and stim:
pick[k] = True
elif kind == FIFF.FIFFV_EOG_CH and eog:
pick[k] = True
elif kind == FIFF.FIFFV_ECG_CH and ecg:
pick[k] = True
elif kind == FIFF.FIFFV_EMG_CH and emg:
pick[k] = True
elif kind == FIFF.FIFFV_MISC_CH and misc:
pick[k] = True
elif kind == FIFF.FIFFV_REF_MEG_CH and ref_meg:
pick[k] = _triage_meg_pick(info['chs'][k], ref_meg)
elif kind == FIFF.FIFFV_RESP_CH and resp:
pick[k] = True
elif kind == FIFF.FIFFV_SYST_CH and syst:
pick[k] = True
elif kind == FIFF.FIFFV_SEEG_CH and seeg:
pick[k] = True
elif kind == FIFF.FIFFV_IAS_CH and ias:
pick[k] = True
elif kind == FIFF.FIFFV_EXCI_CH and exci:
pick[k] = True
elif kind in [FIFF.FIFFV_QUAT_0, FIFF.FIFFV_QUAT_1, FIFF.FIFFV_QUAT_2,
FIFF.FIFFV_QUAT_3, FIFF.FIFFV_QUAT_4, FIFF.FIFFV_QUAT_5,
FIFF.FIFFV_QUAT_6, FIFF.FIFFV_HPI_G, FIFF.FIFFV_HPI_ERR,
FIFF.FIFFV_HPI_MOV] and chpi:
pick[k] = True
elif kind == FIFF.FIFFV_DIPOLE_WAVE and dipole:
pick[k] = True
elif kind == FIFF.FIFFV_GOODNESS_FIT and gof:
pick[k] = True
elif kind == FIFF.FIFFV_BIO_CH and bio:
pick[k] = True
elif kind == FIFF.FIFFV_ECOG_CH and ecog:
pick[k] = True
elif kind == FIFF.FIFFV_FNIRS_CH:
pick[k] = _triage_fnirs_pick(info['chs'][k], fnirs)
# restrict channels to selection if provided
if selection is not None:
# the selection only restricts these types of channels
sel_kind = [FIFF.FIFFV_MEG_CH, FIFF.FIFFV_REF_MEG_CH,
FIFF.FIFFV_EEG_CH]
for k in np.where(pick)[0]:
if (info['chs'][k]['kind'] in sel_kind and
info['ch_names'][k] not in selection):
pick[k] = False
myinclude = [info['ch_names'][k] for k in range(nchan) if pick[k]]
myinclude += include
if len(myinclude) == 0:
sel = np.array([], int)
else:
sel = pick_channels(info['ch_names'], myinclude, exclude)
return sel
def pick_info(info, sel=(), copy=True):
info._check_consistency()
info = info.copy() if copy else info
if sel is None:
return info
elif len(sel) == 0:
raise ValueError('No channels match the selection.')
info['chs'] = [info['chs'][k] for k in sel]
info._update_redundant()
info['bads'] = [ch for ch in info['bads'] if ch in info['ch_names']]
comps = deepcopy(info['comps'])
for c in comps:
row_idx = [k for k, n in enumerate(c['data']['row_names'])
if n in info['ch_names']]
row_names = [c['data']['row_names'][i] for i in row_idx]
rowcals = c['rowcals'][row_idx]
c['rowcals'] = rowcals
c['data']['nrow'] = len(row_names)
c['data']['row_names'] = row_names
c['data']['data'] = c['data']['data'][row_idx]
info['comps'] = comps
info._check_consistency()
return info
def _has_kit_refs(info, picks):
for p in picks:
if info['chs'][p]['coil_type'] == FIFF.FIFFV_COIL_KIT_REF_MAG:
return True
return False
def pick_channels_evoked(orig, include=[], exclude='bads'):
if len(include) == 0 and len(exclude) == 0:
return orig
exclude = _check_excludes_includes(exclude, info=orig.info,
allow_bads=True)
sel = pick_channels(orig.info['ch_names'], include=include,
exclude=exclude)
if len(sel) == 0:
raise ValueError('Warning : No channels match the selection.')
res = deepcopy(orig)
#
# Modify the measurement info
#
res.info = pick_info(res.info, sel)
#
# Create the reduced data set
#
res.data = res.data[sel, :]
return res
@verbose
def pick_channels_forward(orig, include=[], exclude=[], verbose=None):
orig['info']._check_consistency()
if len(include) == 0 and len(exclude) == 0:
return orig
exclude = _check_excludes_includes(exclude,
info=orig['info'], allow_bads=True)
# Allow for possibility of channel ordering in forward solution being
# different from that of the M/EEG file it is based on.
sel_sol = pick_channels(orig['sol']['row_names'], include=include,
exclude=exclude)
sel_info = pick_channels(orig['info']['ch_names'], include=include,
exclude=exclude)
fwd = deepcopy(orig)
# Check that forward solution and original data file agree on #channels
if len(sel_sol) != len(sel_info):
raise ValueError('Forward solution and functional data appear to '
'have different channel names, please check.')
# Do we have something?
nuse = len(sel_sol)
if nuse == 0:
raise ValueError('Nothing remains after picking')
logger.info(' %d out of %d channels remain after picking'
% (nuse, fwd['nchan']))
# Pick the correct rows of the forward operator using sel_sol
fwd['sol']['data'] = fwd['sol']['data'][sel_sol, :]
fwd['_orig_sol'] = fwd['_orig_sol'][sel_sol, :]
fwd['sol']['nrow'] = nuse
ch_names = [fwd['sol']['row_names'][k] for k in sel_sol]
fwd['nchan'] = nuse
fwd['sol']['row_names'] = ch_names
# Pick the appropriate channel names from the info-dict using sel_info
fwd['info']['chs'] = [fwd['info']['chs'][k] for k in sel_info]
fwd['info']._update_redundant()
fwd['info']['bads'] = [b for b in fwd['info']['bads'] if b in ch_names]
if fwd['sol_grad'] is not None:
fwd['sol_grad']['data'] = fwd['sol_grad']['data'][sel_sol, :]
fwd['_orig_sol_grad'] = fwd['_orig_sol_grad'][sel_sol, :]
fwd['sol_grad']['nrow'] = nuse
fwd['sol_grad']['row_names'] = [fwd['sol_grad']['row_names'][k]
for k in sel_sol]
return fwd
def pick_types_forward(orig, meg=True, eeg=False, ref_meg=True, seeg=False,
ecog=False, include=[], exclude=[]):
info = orig['info']
sel = pick_types(info, meg, eeg, ref_meg=ref_meg, seeg=seeg, ecog=ecog,
include=include, exclude=exclude)
if len(sel) == 0:
raise ValueError('No valid channels found')
include_ch_names = [info['ch_names'][k] for k in sel]
return pick_channels_forward(orig, include_ch_names)
def channel_indices_by_type(info):
idx = dict((key, list()) for key in _PICK_TYPES_KEYS if
key not in ('meg', 'fnirs'))
idx.update(mag=list(), grad=list(), hbo=list(), hbr=list())
for k, ch in enumerate(info['chs']):
for key in idx.keys():
if channel_type(info, k) == key:
idx[key].append(k)
return idx
def pick_channels_cov(orig, include=[], exclude='bads'):
from ..cov import Covariance
exclude = orig['bads'] if exclude == 'bads' else exclude
sel = pick_channels(orig['names'], include=include, exclude=exclude)
data = orig['data'][sel][:, sel] if not orig['diag'] else orig['data'][sel]
names = [orig['names'][k] for k in sel]
bads = [name for name in orig['bads'] if name in orig['names']]
res = Covariance(
data=data, names=names, bads=bads, projs=deepcopy(orig['projs']),
nfree=orig['nfree'], eig=None, eigvec=None,
method=orig.get('method', None), loglik=orig.get('loglik', None))
return res
def _picks_by_type(info, meg_combined=False, ref_meg=False, exclude='bads'):
from ..channels.channels import _contains_ch_type
picks_list = []
has_mag, has_grad, has_eeg = [_contains_ch_type(info, k)
for k in ('mag', 'grad', 'eeg')]
if has_mag and (meg_combined is not True or not has_grad):
picks_list.append(
('mag', pick_types(info, meg='mag', eeg=False, stim=False,
ref_meg=ref_meg, exclude=exclude))
)
if has_grad and (meg_combined is not True or not has_mag):
picks_list.append(
('grad', pick_types(info, meg='grad', eeg=False, stim=False,
ref_meg=ref_meg, exclude=exclude))
)
if has_mag and has_grad and meg_combined is True:
picks_list.append(
('meg', pick_types(info, meg=True, eeg=False, stim=False,
ref_meg=ref_meg, exclude=exclude))
)
if has_eeg:
picks_list.append(
('eeg', pick_types(info, meg=False, eeg=True, stim=False,
ref_meg=ref_meg, exclude=exclude))
)
return picks_list
def _check_excludes_includes(chs, info=None, allow_bads=False):
from .meas_info import Info
if not isinstance(chs, (list, tuple, np.ndarray)):
if allow_bads is True:
if not isinstance(info, Info):
raise ValueError('Supply an info object if allow_bads is true')
elif chs != 'bads':
raise ValueError('If chs is a string, it must be "bads"')
else:
chs = info['bads']
else:
raise ValueError(
'include/exclude must be list, tuple, ndarray, or "bads". ' +
'You provided type {0}'.format(type(chs)))
return chs
_PICK_TYPES_DATA_DICT = dict(
meg=True, eeg=True, stim=False, eog=False, ecg=False, emg=False,
misc=False, resp=False, chpi=False, exci=False, ias=False, syst=False,
seeg=True, dipole=False, gof=False, bio=False, ecog=True, fnirs=True)
_PICK_TYPES_KEYS = tuple(list(_PICK_TYPES_DATA_DICT.keys()) + ['ref_meg'])
_DATA_CH_TYPES_SPLIT = ['mag', 'grad', 'eeg', 'seeg', 'ecog', 'hbo', 'hbr']
# Valid data types, ordered for consistency, used in viz/evoked.
_VALID_CHANNEL_TYPES = ['eeg', 'grad', 'mag', 'seeg', 'eog', 'ecg', 'emg',
'dipole', 'gof', 'bio', 'ecog', 'hbo', 'hbr',
'misc']
def _pick_data_channels(info, exclude='bads', with_ref_meg=True):
return pick_types(info, ref_meg=with_ref_meg, include=[], exclude=exclude,
selection=None, **_PICK_TYPES_DATA_DICT)
def _pick_aux_channels(info, exclude='bads'):
return pick_types(info, meg=False, eog=True, ecg=True, emg=True, bio=True,
ref_meg=False, exclude=exclude)
def _pick_data_or_ica(info):
ch_names = [c['ch_name'] for c in info['chs']]
if 'ICA ' in ','.join(ch_names):
picks = pick_types(info, exclude=[], misc=True)
else:
picks = _pick_data_channels(info, exclude=[], with_ref_meg=True)
return picks
| true | true |
f732f4e7e21e8a4fab8a3b90ccfac4661d2c653c | 1,118 | py | Python | reana_job_controller/spec.py | CodyKank/reana-job-controller | 66da26389b69b8c711ace53fedaa2a70b9c34e5f | [
"MIT"
] | null | null | null | reana_job_controller/spec.py | CodyKank/reana-job-controller | 66da26389b69b8c711ace53fedaa2a70b9c34e5f | [
"MIT"
] | null | null | null | reana_job_controller/spec.py | CodyKank/reana-job-controller | 66da26389b69b8c711ace53fedaa2a70b9c34e5f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2017, 2018 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""OpenAPI generator."""
from apispec import APISpec
from flask import current_app
from reana_job_controller.schemas import Job, JobRequest
def build_openapi_spec():
"""Create OpenAPI definition."""
spec = APISpec(
title='reana-job-controller',
version='0.4.0',
info=dict(
description='REANA Job Controller API'
),
plugins=[
'apispec.ext.flask',
'apispec.ext.marshmallow',
]
)
# Add marshmallow models to specification
spec.definition('Job', schema=Job)
spec.definition('JobRequest', schema=JobRequest)
# Collect OpenAPI docstrings from Flask endpoints
for key in current_app.view_functions:
if key != 'static' and key != 'get_openapi_spec':
spec.add_path(view=current_app.view_functions[key])
return spec.to_dict()
| 26.619048 | 72 | 0.65653 |
from apispec import APISpec
from flask import current_app
from reana_job_controller.schemas import Job, JobRequest
def build_openapi_spec():
spec = APISpec(
title='reana-job-controller',
version='0.4.0',
info=dict(
description='REANA Job Controller API'
),
plugins=[
'apispec.ext.flask',
'apispec.ext.marshmallow',
]
)
spec.definition('Job', schema=Job)
spec.definition('JobRequest', schema=JobRequest)
for key in current_app.view_functions:
if key != 'static' and key != 'get_openapi_spec':
spec.add_path(view=current_app.view_functions[key])
return spec.to_dict()
| true | true |
f732f5eed6da1e0445d9d1307cec5a6e5f0ed759 | 1,887 | py | Python | books_app/core/migrations/0002_auto_20210704_1304.py | JvitorS23/my-book-list | 3d41404d1eead6e10628678a67f6c47615f66caf | [
"MIT"
] | 3 | 2021-07-14T00:33:59.000Z | 2022-03-08T11:21:49.000Z | books_app/core/migrations/0002_auto_20210704_1304.py | JvitorS23/my-book-list | 3d41404d1eead6e10628678a67f6c47615f66caf | [
"MIT"
] | 8 | 2021-07-01T12:37:08.000Z | 2021-07-04T20:09:35.000Z | books_app/core/migrations/0002_auto_20210704_1304.py | JvitorS23/my-book-list | 3d41404d1eead6e10628678a67f6c47615f66caf | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-07-04 13:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='is_staff',
field=models.BooleanField(default=False),
),
migrations.CreateModel(
name='BookGender',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(db_index=True, max_length=255)),
('author', models.CharField(max_length=255)),
('num_pages', models.IntegerField()),
('status', models.CharField(choices=[('READING', 'Reading'), ('COMPLETED', 'Completed'), ('DROPPED', 'Dropped'), ('PLAN_TO_READ', 'Plan to read')], default='READING', max_length=15)),
('score', models.IntegerField(blank=True, null=True)),
('gender', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.bookgender')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('user', 'title')},
},
),
]
| 41.933333 | 199 | 0.583996 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='is_staff',
field=models.BooleanField(default=False),
),
migrations.CreateModel(
name='BookGender',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(db_index=True, max_length=255)),
('author', models.CharField(max_length=255)),
('num_pages', models.IntegerField()),
('status', models.CharField(choices=[('READING', 'Reading'), ('COMPLETED', 'Completed'), ('DROPPED', 'Dropped'), ('PLAN_TO_READ', 'Plan to read')], default='READING', max_length=15)),
('score', models.IntegerField(blank=True, null=True)),
('gender', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.bookgender')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('user', 'title')},
},
),
]
| true | true |
f732f62e663a2d692aa7e047db88d8bf741bbb37 | 4,878 | py | Python | networkx-d3-v2/lib/gdata/tlslite/integration/TLSAsyncDispatcherMixIn.py | suraj-testing2/Clock_Websites | 0e65331da40cfd3766f1bde17f0a9c7ff6666dea | [
"Apache-2.0"
] | 2,293 | 2015-01-02T12:46:10.000Z | 2022-03-29T09:45:43.000Z | networkx-d3-v2/lib/gdata/tlslite/integration/TLSAsyncDispatcherMixIn.py | suraj-testing2/Clock_Websites | 0e65331da40cfd3766f1bde17f0a9c7ff6666dea | [
"Apache-2.0"
] | 315 | 2015-05-31T11:55:46.000Z | 2022-01-12T08:36:37.000Z | networkx-d3-v2/lib/gdata/tlslite/integration/TLSAsyncDispatcherMixIn.py | suraj-testing2/Clock_Websites | 0e65331da40cfd3766f1bde17f0a9c7ff6666dea | [
"Apache-2.0"
] | 1,033 | 2015-01-04T07:48:40.000Z | 2022-03-24T09:34:37.000Z | """TLS Lite + asyncore."""
import asyncore
from gdata.tlslite.TLSConnection import TLSConnection
from AsyncStateMachine import AsyncStateMachine
class TLSAsyncDispatcherMixIn(AsyncStateMachine):
"""This class can be "mixed in" with an
L{asyncore.dispatcher} to add TLS support.
This class essentially sits between the dispatcher and the select
loop, intercepting events and only calling the dispatcher when
applicable.
In the case of handle_read(), a read operation will be activated,
and when it completes, the bytes will be placed in a buffer where
the dispatcher can retrieve them by calling recv(), and the
dispatcher's handle_read() will be called.
In the case of handle_write(), the dispatcher's handle_write() will
be called, and when it calls send(), a write operation will be
activated.
To use this class, you must combine it with an asyncore.dispatcher,
and pass in a handshake operation with setServerHandshakeOp().
Below is an example of using this class with medusa. This class is
mixed in with http_channel to create http_tls_channel. Note:
1. the mix-in is listed first in the inheritance list
2. the input buffer size must be at least 16K, otherwise the
dispatcher might not read all the bytes from the TLS layer,
leaving some bytes in limbo.
3. IE seems to have a problem receiving a whole HTTP response in a
single TLS record, so HTML pages containing '\\r\\n\\r\\n' won't
be displayed on IE.
Add the following text into 'start_medusa.py', in the 'HTTP Server'
section::
from tlslite.api import *
s = open("./serverX509Cert.pem").read()
x509 = X509()
x509.parse(s)
certChain = X509CertChain([x509])
s = open("./serverX509Key.pem").read()
privateKey = parsePEMKey(s, private=True)
class http_tls_channel(TLSAsyncDispatcherMixIn,
http_server.http_channel):
ac_in_buffer_size = 16384
def __init__ (self, server, conn, addr):
http_server.http_channel.__init__(self, server, conn, addr)
TLSAsyncDispatcherMixIn.__init__(self, conn)
self.tlsConnection.ignoreAbruptClose = True
self.setServerHandshakeOp(certChain=certChain,
privateKey=privateKey)
hs.channel_class = http_tls_channel
If the TLS layer raises an exception, the exception will be caught
in asyncore.dispatcher, which will call close() on this class. The
TLS layer always closes the TLS connection before raising an
exception, so the close operation will complete right away, causing
asyncore.dispatcher.close() to be called, which closes the socket
and removes this instance from the asyncore loop.
"""
def __init__(self, sock=None):
AsyncStateMachine.__init__(self)
if sock:
self.tlsConnection = TLSConnection(sock)
#Calculate the sibling I'm being mixed in with.
#This is necessary since we override functions
#like readable(), handle_read(), etc., but we
#also want to call the sibling's versions.
for cl in self.__class__.__bases__:
if cl != TLSAsyncDispatcherMixIn and cl != AsyncStateMachine:
self.siblingClass = cl
break
else:
raise AssertionError()
def readable(self):
result = self.wantsReadEvent()
if result != None:
return result
return self.siblingClass.readable(self)
def writable(self):
result = self.wantsWriteEvent()
if result != None:
return result
return self.siblingClass.writable(self)
def handle_read(self):
self.inReadEvent()
def handle_write(self):
self.inWriteEvent()
def outConnectEvent(self):
self.siblingClass.handle_connect(self)
def outCloseEvent(self):
asyncore.dispatcher.close(self)
def outReadEvent(self, readBuffer):
self.readBuffer = readBuffer
self.siblingClass.handle_read(self)
def outWriteEvent(self):
self.siblingClass.handle_write(self)
def recv(self, bufferSize=16384):
if bufferSize < 16384 or self.readBuffer == None:
raise AssertionError()
returnValue = self.readBuffer
self.readBuffer = None
return returnValue
def send(self, writeBuffer):
self.setWriteOp(writeBuffer)
return len(writeBuffer)
def close(self):
if hasattr(self, "tlsConnection"):
self.setCloseOp()
else:
asyncore.dispatcher.close(self)
| 34.842857 | 76 | 0.639811 |
import asyncore
from gdata.tlslite.TLSConnection import TLSConnection
from AsyncStateMachine import AsyncStateMachine
class TLSAsyncDispatcherMixIn(AsyncStateMachine):
def __init__(self, sock=None):
AsyncStateMachine.__init__(self)
if sock:
self.tlsConnection = TLSConnection(sock)
#This is necessary since we override functions
#like readable(), handle_read(), etc., but we
#also want to call the sibling's versions.
for cl in self.__class__.__bases__:
if cl != TLSAsyncDispatcherMixIn and cl != AsyncStateMachine:
self.siblingClass = cl
break
else:
raise AssertionError()
def readable(self):
result = self.wantsReadEvent()
if result != None:
return result
return self.siblingClass.readable(self)
def writable(self):
result = self.wantsWriteEvent()
if result != None:
return result
return self.siblingClass.writable(self)
def handle_read(self):
self.inReadEvent()
def handle_write(self):
self.inWriteEvent()
def outConnectEvent(self):
self.siblingClass.handle_connect(self)
def outCloseEvent(self):
asyncore.dispatcher.close(self)
def outReadEvent(self, readBuffer):
self.readBuffer = readBuffer
self.siblingClass.handle_read(self)
def outWriteEvent(self):
self.siblingClass.handle_write(self)
def recv(self, bufferSize=16384):
if bufferSize < 16384 or self.readBuffer == None:
raise AssertionError()
returnValue = self.readBuffer
self.readBuffer = None
return returnValue
def send(self, writeBuffer):
self.setWriteOp(writeBuffer)
return len(writeBuffer)
def close(self):
if hasattr(self, "tlsConnection"):
self.setCloseOp()
else:
asyncore.dispatcher.close(self)
| true | true |
f732f736784b388962d513111f1e7834e84036cd | 14,608 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/arista/eos/tests/unit/modules/network/eos/test_eos_acls.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/arista/eos/tests/unit/modules/network/eos/test_eos_acls.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/arista/eos/tests/unit/modules/network/eos/test_eos_acls.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | #
# (c) 2019, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.arista.eos.tests.unit.compat.mock import patch
from ansible_collections.arista.eos.plugins.modules import eos_acls
from ansible_collections.arista.eos.plugins.module_utils.network.eos.config.acls.acls import (
add_commands,
)
from ansible_collections.arista.eos.tests.unit.modules.utils import (
set_module_args,
)
from .eos_module import TestEosModule, load_fixture
import itertools
class TestEosAclsModule(TestEosModule):
module = eos_acls
def setUp(self):
super(TestEosAclsModule, self).setUp()
self.mock_get_config = patch(
"ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.get_config"
)
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch(
"ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.load_config"
)
self.load_config = self.mock_load_config.start()
self.mock_get_resource_connection_config = patch(
"ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base.get_resource_connection"
)
self.get_resource_connection_config = (
self.mock_get_resource_connection_config.start()
)
self.mock_get_resource_connection_facts = patch(
"ansible_collections.ansible.netcommon.plugins.module_utils.network.common.facts.facts.get_resource_connection"
)
self.get_resource_connection_facts = (
self.mock_get_resource_connection_facts.start()
)
self.mock_edit_config = patch(
"ansible_collections.arista.eos.plugins.module_utils.network.eos.providers.providers.CliProvider.edit_config"
)
self.edit_config = self.mock_edit_config.start()
self.mock_execute_show_command = patch(
"ansible_collections.arista.eos.plugins.module_utils.network.eos.facts.acls.acls.AclsFacts.get_device_data"
)
self.execute_show_command = self.mock_execute_show_command.start()
def tearDown(self):
super(TestEosAclsModule, self).tearDown()
self.mock_get_resource_connection_config.stop()
self.mock_get_resource_connection_facts.stop()
self.mock_edit_config.stop()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_execute_show_command.stop()
def load_fixtures(self, commands=None, transport="cli", filename=None):
if filename is None:
filename = "eos_acls_config.cfg"
def load_from_file(*args, **kwargs):
output = load_fixture(filename)
return output
self.execute_show_command.side_effect = load_from_file
def test_eos_acls_merged(self):
set_module_args(
dict(
config=[
dict(
afi="ipv6",
acls=[
dict(
name="test2",
standard="true",
aces=[
dict(
sequence="10",
grant="permit",
protocol="tcp",
protocol_options=dict(
tcp=dict(
flags=dict(established="yes")
)
),
source=dict(
subnet_address="30.2.0.0/8"
),
destination=dict(any="true"),
log="true",
)
],
)
],
)
],
state="merged",
)
)
commands = [
"ipv6 access-list standard test2",
"10 permit tcp 30.2.0.0/8 any established log",
]
self.execute_module(changed=True, commands=commands)
def test_eos_acls_merged_idempotent(self):
set_module_args(
dict(
config=[
dict(
afi="ipv4",
acls=[
dict(
name="test1",
aces=[
dict(
sequence="35",
grant="deny",
protocol="tcp",
source=dict(
subnet_address="20.0.0.0/8"
),
destination=dict(any="true"),
log="true",
),
dict(
grant="permit",
source=dict(any="true"),
destination=dict(any="true"),
protocol=6,
),
],
)
],
)
],
state="merged",
)
)
self.execute_module(changed=False, commands=[])
def test_eos_acls_replaced(self):
set_module_args(
dict(
config=[
dict(
afi="ipv4",
acls=[
dict(
name="test1",
aces=[
dict(
sequence="10",
grant="permit",
protocol="ospf",
source=dict(
subnet_address="30.2.0.0/8"
),
destination=dict(any="true"),
log="true",
)
],
)
],
)
],
state="replaced",
)
)
commands = [
"ip access-list test1",
"no 35",
"no 45",
"10 permit ospf 30.2.0.0/8 any log",
]
self.execute_module(changed=True, commands=commands)
def test_eos_acls_replaced_idempotent(self):
set_module_args(
dict(
config=[
dict(
afi="ipv4",
acls=[
dict(
name="test1",
aces=[
dict(
sequence="35",
grant="deny",
protocol="tcp",
source=dict(
subnet_address="20.0.0.0/8"
),
destination=dict(any="true"),
log="true",
),
dict(
grant="permit",
source=dict(any="true"),
destination=dict(any="true"),
sequence="45",
protocol="tcp",
),
],
)
],
)
],
state="replaced",
)
)
self.execute_module(changed=False, commands=[])
def test_eos_acls_overridden(self):
set_module_args(
dict(
config=[
dict(
afi="ipv4",
acls=[
dict(
name="test1",
aces=[
dict(
sequence="10",
grant="permit",
protocol="ospf",
source=dict(
subnet_address="30.2.0.0/8"
),
destination=dict(any="true"),
log="true",
)
],
)
],
)
],
state="overridden",
)
)
commands = [
"ip access-list test1",
"no 35",
"no 45",
"10 permit ospf 30.2.0.0/8 any log",
]
self.execute_module(changed=True, commands=commands)
def test_eos_acls_overridden_idempotent(self):
set_module_args(
dict(
config=[
dict(
afi="ipv4",
acls=[
dict(
name="test1",
aces=[
dict(
sequence="35",
grant="deny",
protocol="tcp",
source=dict(
subnet_address="20.0.0.0/8"
),
destination=dict(any="true"),
log="true",
),
dict(
grant="permit",
source=dict(any="true"),
destination=dict(any="true"),
sequence="45",
protocol="tcp",
),
],
)
],
)
],
state="overridden",
)
)
self.execute_module(changed=False, commands=[])
def test_eos_acls_deletedacls(self):
set_module_args(
dict(
config=[dict(afi="ipv4", acls=[dict(name="test1")])],
state="deleted",
)
)
commands = ["no ip access-list test1"]
self.execute_module(changed=True, commands=commands)
def test_eos_acls_deletedafis(self):
set_module_args(dict(config=[dict(afi="ipv4")], state="deleted"))
commands = ["no ip access-list test1"]
self.execute_module(changed=True, commands=commands)
def test_eos_acls_gathered(self):
set_module_args(dict(config=[], state="gathered"))
result = self.execute_module(
changed=False, filename="eos_acls_config.cfg"
)
commands = []
for gathered_cmds in result["gathered"]:
cfg = add_commands(gathered_cmds)
commands.append(cfg)
commands = list(itertools.chain(*commands))
config_commands = [
"ip access-list test1",
"35 deny tcp 20.0.0.0/8 any log",
"45 permit tcp any any",
]
self.assertEqual(
sorted(config_commands), sorted(commands), result["gathered"]
)
def test_eos_acls_rendered(self):
set_module_args(
dict(
config=[
dict(
afi="ipv4",
acls=[
dict(
name="test1",
aces=[
dict(
grant="permit",
sequence="45",
source=dict(any="true"),
destination=dict(any="true"),
protocol=6,
)
],
)
],
)
],
state="rendered",
)
)
commands = ["ip access-list test1", "45 permit tcp any any"]
result = self.execute_module(changed=False)
self.assertEqual(
sorted(result["rendered"]), sorted(commands), result["rendered"]
)
def test_eos_acls_parsed(self):
set_module_args(
dict(
running_config="ipv6 access-list test2\n 10 permit icmpv6 host 10.2.33.1 any ttl eq 25",
state="parsed",
)
)
commands = [
"ipv6 access-list test2",
"10 permit icmpv6 host 10.2.33.1 any ttl eq 25",
]
result = self.execute_module(changed=False)
parsed_commands = []
for cmds in result["parsed"]:
cfg = add_commands(cmds)
parsed_commands.append(cfg)
parsed_commands = list(itertools.chain(*parsed_commands))
self.assertEqual(
sorted(parsed_commands), sorted(commands), result["parsed"]
)
| 37.649485 | 123 | 0.374932 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.arista.eos.tests.unit.compat.mock import patch
from ansible_collections.arista.eos.plugins.modules import eos_acls
from ansible_collections.arista.eos.plugins.module_utils.network.eos.config.acls.acls import (
add_commands,
)
from ansible_collections.arista.eos.tests.unit.modules.utils import (
set_module_args,
)
from .eos_module import TestEosModule, load_fixture
import itertools
class TestEosAclsModule(TestEosModule):
module = eos_acls
def setUp(self):
super(TestEosAclsModule, self).setUp()
self.mock_get_config = patch(
"ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.get_config"
)
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch(
"ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.load_config"
)
self.load_config = self.mock_load_config.start()
self.mock_get_resource_connection_config = patch(
"ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base.get_resource_connection"
)
self.get_resource_connection_config = (
self.mock_get_resource_connection_config.start()
)
self.mock_get_resource_connection_facts = patch(
"ansible_collections.ansible.netcommon.plugins.module_utils.network.common.facts.facts.get_resource_connection"
)
self.get_resource_connection_facts = (
self.mock_get_resource_connection_facts.start()
)
self.mock_edit_config = patch(
"ansible_collections.arista.eos.plugins.module_utils.network.eos.providers.providers.CliProvider.edit_config"
)
self.edit_config = self.mock_edit_config.start()
self.mock_execute_show_command = patch(
"ansible_collections.arista.eos.plugins.module_utils.network.eos.facts.acls.acls.AclsFacts.get_device_data"
)
self.execute_show_command = self.mock_execute_show_command.start()
def tearDown(self):
super(TestEosAclsModule, self).tearDown()
self.mock_get_resource_connection_config.stop()
self.mock_get_resource_connection_facts.stop()
self.mock_edit_config.stop()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_execute_show_command.stop()
def load_fixtures(self, commands=None, transport="cli", filename=None):
if filename is None:
filename = "eos_acls_config.cfg"
def load_from_file(*args, **kwargs):
output = load_fixture(filename)
return output
self.execute_show_command.side_effect = load_from_file
def test_eos_acls_merged(self):
set_module_args(
dict(
config=[
dict(
afi="ipv6",
acls=[
dict(
name="test2",
standard="true",
aces=[
dict(
sequence="10",
grant="permit",
protocol="tcp",
protocol_options=dict(
tcp=dict(
flags=dict(established="yes")
)
),
source=dict(
subnet_address="30.2.0.0/8"
),
destination=dict(any="true"),
log="true",
)
],
)
],
)
],
state="merged",
)
)
commands = [
"ipv6 access-list standard test2",
"10 permit tcp 30.2.0.0/8 any established log",
]
self.execute_module(changed=True, commands=commands)
def test_eos_acls_merged_idempotent(self):
set_module_args(
dict(
config=[
dict(
afi="ipv4",
acls=[
dict(
name="test1",
aces=[
dict(
sequence="35",
grant="deny",
protocol="tcp",
source=dict(
subnet_address="20.0.0.0/8"
),
destination=dict(any="true"),
log="true",
),
dict(
grant="permit",
source=dict(any="true"),
destination=dict(any="true"),
protocol=6,
),
],
)
],
)
],
state="merged",
)
)
self.execute_module(changed=False, commands=[])
def test_eos_acls_replaced(self):
set_module_args(
dict(
config=[
dict(
afi="ipv4",
acls=[
dict(
name="test1",
aces=[
dict(
sequence="10",
grant="permit",
protocol="ospf",
source=dict(
subnet_address="30.2.0.0/8"
),
destination=dict(any="true"),
log="true",
)
],
)
],
)
],
state="replaced",
)
)
commands = [
"ip access-list test1",
"no 35",
"no 45",
"10 permit ospf 30.2.0.0/8 any log",
]
self.execute_module(changed=True, commands=commands)
def test_eos_acls_replaced_idempotent(self):
set_module_args(
dict(
config=[
dict(
afi="ipv4",
acls=[
dict(
name="test1",
aces=[
dict(
sequence="35",
grant="deny",
protocol="tcp",
source=dict(
subnet_address="20.0.0.0/8"
),
destination=dict(any="true"),
log="true",
),
dict(
grant="permit",
source=dict(any="true"),
destination=dict(any="true"),
sequence="45",
protocol="tcp",
),
],
)
],
)
],
state="replaced",
)
)
self.execute_module(changed=False, commands=[])
def test_eos_acls_overridden(self):
set_module_args(
dict(
config=[
dict(
afi="ipv4",
acls=[
dict(
name="test1",
aces=[
dict(
sequence="10",
grant="permit",
protocol="ospf",
source=dict(
subnet_address="30.2.0.0/8"
),
destination=dict(any="true"),
log="true",
)
],
)
],
)
],
state="overridden",
)
)
commands = [
"ip access-list test1",
"no 35",
"no 45",
"10 permit ospf 30.2.0.0/8 any log",
]
self.execute_module(changed=True, commands=commands)
def test_eos_acls_overridden_idempotent(self):
set_module_args(
dict(
config=[
dict(
afi="ipv4",
acls=[
dict(
name="test1",
aces=[
dict(
sequence="35",
grant="deny",
protocol="tcp",
source=dict(
subnet_address="20.0.0.0/8"
),
destination=dict(any="true"),
log="true",
),
dict(
grant="permit",
source=dict(any="true"),
destination=dict(any="true"),
sequence="45",
protocol="tcp",
),
],
)
],
)
],
state="overridden",
)
)
self.execute_module(changed=False, commands=[])
def test_eos_acls_deletedacls(self):
set_module_args(
dict(
config=[dict(afi="ipv4", acls=[dict(name="test1")])],
state="deleted",
)
)
commands = ["no ip access-list test1"]
self.execute_module(changed=True, commands=commands)
def test_eos_acls_deletedafis(self):
set_module_args(dict(config=[dict(afi="ipv4")], state="deleted"))
commands = ["no ip access-list test1"]
self.execute_module(changed=True, commands=commands)
def test_eos_acls_gathered(self):
set_module_args(dict(config=[], state="gathered"))
result = self.execute_module(
changed=False, filename="eos_acls_config.cfg"
)
commands = []
for gathered_cmds in result["gathered"]:
cfg = add_commands(gathered_cmds)
commands.append(cfg)
commands = list(itertools.chain(*commands))
config_commands = [
"ip access-list test1",
"35 deny tcp 20.0.0.0/8 any log",
"45 permit tcp any any",
]
self.assertEqual(
sorted(config_commands), sorted(commands), result["gathered"]
)
def test_eos_acls_rendered(self):
set_module_args(
dict(
config=[
dict(
afi="ipv4",
acls=[
dict(
name="test1",
aces=[
dict(
grant="permit",
sequence="45",
source=dict(any="true"),
destination=dict(any="true"),
protocol=6,
)
],
)
],
)
],
state="rendered",
)
)
commands = ["ip access-list test1", "45 permit tcp any any"]
result = self.execute_module(changed=False)
self.assertEqual(
sorted(result["rendered"]), sorted(commands), result["rendered"]
)
def test_eos_acls_parsed(self):
set_module_args(
dict(
running_config="ipv6 access-list test2\n 10 permit icmpv6 host 10.2.33.1 any ttl eq 25",
state="parsed",
)
)
commands = [
"ipv6 access-list test2",
"10 permit icmpv6 host 10.2.33.1 any ttl eq 25",
]
result = self.execute_module(changed=False)
parsed_commands = []
for cmds in result["parsed"]:
cfg = add_commands(cmds)
parsed_commands.append(cfg)
parsed_commands = list(itertools.chain(*parsed_commands))
self.assertEqual(
sorted(parsed_commands), sorted(commands), result["parsed"]
)
| true | true |
f732f7906f9eea0af911846b31d9da44293429d5 | 7,757 | py | Python | sas/sascalc/data_util/ordereddicttest.py | arm61/fitbenchmarking | c745c684e3ca4895a666eb863426746d8f06636c | [
"BSD-3-Clause"
] | null | null | null | sas/sascalc/data_util/ordereddicttest.py | arm61/fitbenchmarking | c745c684e3ca4895a666eb863426746d8f06636c | [
"BSD-3-Clause"
] | null | null | null | sas/sascalc/data_util/ordereddicttest.py | arm61/fitbenchmarking | c745c684e3ca4895a666eb863426746d8f06636c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from random import shuffle
import copy
import inspect
import pickle
import unittest
from ordereddict import OrderedDict
class TestOrderedDict(unittest.TestCase):
def test_init(self):
self.assertRaises(TypeError, OrderedDict, ([('a', 1), ('b', 2)], None))
# too many args
pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
self.assertEqual(sorted(OrderedDict(dict(pairs)).items()), pairs) # dict input
self.assertEqual(sorted(OrderedDict(**dict(pairs)).items()), pairs) # kwds input
self.assertEqual(list(OrderedDict(pairs).items()), pairs) # pairs input
self.assertEqual(list(OrderedDict([('a', 1), ('b', 2), ('c', 9), ('d', 4)],
c=3, e=5).items()), pairs) # mixed input
# make sure no positional args conflict with possible kwdargs
self.assertEqual(inspect.getargspec(OrderedDict.__dict__['__init__'])[0],
['self'])
# Make sure that direct calls to __init__ do not clear previous contents
d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)])
d.__init__([('e', 5), ('f', 6)], g=7, d=4)
self.assertEqual(list(d.items()),
[('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
def test_update(self):
self.assertRaises(TypeError, OrderedDict().update, [('a', 1), ('b',
2)], None) # too many args
pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
od = OrderedDict()
od.update(dict(pairs))
self.assertEqual(sorted(od.items()), pairs) # dict input
od = OrderedDict()
od.update(**dict(pairs))
self.assertEqual(sorted(od.items()), pairs) # kwds input
od = OrderedDict()
od.update(pairs)
self.assertEqual(list(od.items()), pairs) # pairs input
od = OrderedDict()
od.update([('a', 1), ('b', 2), ('c', 9), ('d', 4)], c=3, e=5)
self.assertEqual(list(od.items()), pairs) # mixed input
# Make sure that direct calls to update do not clear previous contents
# add that updates items are not moved to the end
d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)])
d.update([('e', 5), ('f', 6)], g=7, d=4)
self.assertEqual(list(d.items()),
[('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
def test_clear(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
self.assertEqual(len(od), len(pairs))
od.clear()
self.assertEqual(len(od), 0)
def test_delitem(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
del od['a']
self.assert_('a' not in od)
self.assertRaises(KeyError, od.__delitem__, 'a')
self.assertEqual(list(od.items()), pairs[:2] + pairs[3:])
def test_setitem(self):
od = OrderedDict([('d', 1), ('b', 2), ('c', 3), ('a', 4), ('e', 5)])
od['c'] = 10 # existing element
od['f'] = 20 # new element
self.assertEqual(list(od.items()),
[('d', 1), ('b', 2), ('c', 10), ('a', 4), ('e', 5), ('f', 20)])
def test_iterators(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
self.assertEqual(list(od), [t[0] for t in pairs])
self.assertEqual(list(od.keys()), [t[0] for t in pairs])
self.assertEqual(list(od.values()), [t[1] for t in pairs])
self.assertEqual(list(od.items()), pairs)
self.assertEqual(list(reversed(od)),
[t[0] for t in reversed(pairs)])
def test_popitem(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
while pairs:
self.assertEqual(od.popitem(), pairs.pop())
self.assertRaises(KeyError, od.popitem)
self.assertEqual(len(od), 0)
def test_pop(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
shuffle(pairs)
while pairs:
k, v = pairs.pop()
self.assertEqual(od.pop(k), v)
self.assertRaises(KeyError, od.pop, 'xyz')
self.assertEqual(len(od), 0)
self.assertEqual(od.pop(k, 12345), 12345)
def test_equality(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od1 = OrderedDict(pairs)
od2 = OrderedDict(pairs)
self.assertEqual(od1, od2) # same order implies equality
pairs = pairs[2:] + pairs[:2]
od2 = OrderedDict(pairs)
self.assertNotEqual(od1, od2) # different order implies inequality
# comparison to regular dict is not order sensitive
self.assertEqual(od1, dict(od2))
self.assertEqual(dict(od2), od1)
# different length implied inequality
self.assertNotEqual(od1, OrderedDict(pairs[:-1]))
def test_copying(self):
# Check that ordered dicts are copyable, deepcopyable, picklable,
# and have a repr/eval round-trip
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
update_test = OrderedDict()
update_test.update(od)
for i, dup in enumerate([
od.copy(),
copy.copy(od),
copy.deepcopy(od),
pickle.loads(pickle.dumps(od, 0)),
pickle.loads(pickle.dumps(od, 1)),
pickle.loads(pickle.dumps(od, 2)),
pickle.loads(pickle.dumps(od, -1)),
eval(repr(od)),
update_test,
OrderedDict(od),
]):
self.assert_(dup is not od)
self.assertEquals(dup, od)
self.assertEquals(list(dup.items()), list(od.items()))
self.assertEquals(len(dup), len(od))
self.assertEquals(type(dup), type(od))
def test_repr(self):
od = OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])
self.assertEqual(repr(od),
"OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])")
self.assertEqual(eval(repr(od)), od)
self.assertEqual(repr(OrderedDict()), "OrderedDict()")
def test_setdefault(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
pair_order = list(od.items())
self.assertEqual(od.setdefault('a', 10), 3)
# make sure order didn't change
self.assertEqual(list(od.items()), pair_order)
self.assertEqual(od.setdefault('x', 10), 10)
# make sure 'x' is added to the end
self.assertEqual(list(od.items())[-1], ('x', 10))
def test_reinsert(self):
# Given insert a, insert b, delete a, re-insert a,
# verify that a is now later than b.
od = OrderedDict()
od['a'] = 1
od['b'] = 2
del od['a']
od['a'] = 1
self.assertEqual(list(od.items()), [('b', 2), ('a', 1)])
if __name__ == "__main__":
unittest.main()
| 41.481283 | 97 | 0.485755 |
from random import shuffle
import copy
import inspect
import pickle
import unittest
from ordereddict import OrderedDict
class TestOrderedDict(unittest.TestCase):
def test_init(self):
self.assertRaises(TypeError, OrderedDict, ([('a', 1), ('b', 2)], None))
pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
self.assertEqual(sorted(OrderedDict(dict(pairs)).items()), pairs)
self.assertEqual(sorted(OrderedDict(**dict(pairs)).items()), pairs)
self.assertEqual(list(OrderedDict(pairs).items()), pairs)
self.assertEqual(list(OrderedDict([('a', 1), ('b', 2), ('c', 9), ('d', 4)],
c=3, e=5).items()), pairs)
self.assertEqual(inspect.getargspec(OrderedDict.__dict__['__init__'])[0],
['self'])
d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)])
d.__init__([('e', 5), ('f', 6)], g=7, d=4)
self.assertEqual(list(d.items()),
[('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
def test_update(self):
self.assertRaises(TypeError, OrderedDict().update, [('a', 1), ('b',
2)], None)
pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
od = OrderedDict()
od.update(dict(pairs))
self.assertEqual(sorted(od.items()), pairs)
od = OrderedDict()
od.update(**dict(pairs))
self.assertEqual(sorted(od.items()), pairs)
od = OrderedDict()
od.update(pairs)
self.assertEqual(list(od.items()), pairs)
od = OrderedDict()
od.update([('a', 1), ('b', 2), ('c', 9), ('d', 4)], c=3, e=5)
self.assertEqual(list(od.items()), pairs)
d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)])
d.update([('e', 5), ('f', 6)], g=7, d=4)
self.assertEqual(list(d.items()),
[('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
def test_clear(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
self.assertEqual(len(od), len(pairs))
od.clear()
self.assertEqual(len(od), 0)
def test_delitem(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
del od['a']
self.assert_('a' not in od)
self.assertRaises(KeyError, od.__delitem__, 'a')
self.assertEqual(list(od.items()), pairs[:2] + pairs[3:])
def test_setitem(self):
od = OrderedDict([('d', 1), ('b', 2), ('c', 3), ('a', 4), ('e', 5)])
od['c'] = 10
od['f'] = 20
self.assertEqual(list(od.items()),
[('d', 1), ('b', 2), ('c', 10), ('a', 4), ('e', 5), ('f', 20)])
def test_iterators(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
self.assertEqual(list(od), [t[0] for t in pairs])
self.assertEqual(list(od.keys()), [t[0] for t in pairs])
self.assertEqual(list(od.values()), [t[1] for t in pairs])
self.assertEqual(list(od.items()), pairs)
self.assertEqual(list(reversed(od)),
[t[0] for t in reversed(pairs)])
def test_popitem(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
while pairs:
self.assertEqual(od.popitem(), pairs.pop())
self.assertRaises(KeyError, od.popitem)
self.assertEqual(len(od), 0)
def test_pop(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
shuffle(pairs)
while pairs:
k, v = pairs.pop()
self.assertEqual(od.pop(k), v)
self.assertRaises(KeyError, od.pop, 'xyz')
self.assertEqual(len(od), 0)
self.assertEqual(od.pop(k, 12345), 12345)
def test_equality(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od1 = OrderedDict(pairs)
od2 = OrderedDict(pairs)
self.assertEqual(od1, od2)
pairs = pairs[2:] + pairs[:2]
od2 = OrderedDict(pairs)
self.assertNotEqual(od1, od2)
self.assertEqual(od1, dict(od2))
self.assertEqual(dict(od2), od1)
self.assertNotEqual(od1, OrderedDict(pairs[:-1]))
def test_copying(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
update_test = OrderedDict()
update_test.update(od)
for i, dup in enumerate([
od.copy(),
copy.copy(od),
copy.deepcopy(od),
pickle.loads(pickle.dumps(od, 0)),
pickle.loads(pickle.dumps(od, 1)),
pickle.loads(pickle.dumps(od, 2)),
pickle.loads(pickle.dumps(od, -1)),
eval(repr(od)),
update_test,
OrderedDict(od),
]):
self.assert_(dup is not od)
self.assertEquals(dup, od)
self.assertEquals(list(dup.items()), list(od.items()))
self.assertEquals(len(dup), len(od))
self.assertEquals(type(dup), type(od))
def test_repr(self):
od = OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])
self.assertEqual(repr(od),
"OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])")
self.assertEqual(eval(repr(od)), od)
self.assertEqual(repr(OrderedDict()), "OrderedDict()")
def test_setdefault(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
pair_order = list(od.items())
self.assertEqual(od.setdefault('a', 10), 3)
self.assertEqual(list(od.items()), pair_order)
self.assertEqual(od.setdefault('x', 10), 10)
# make sure 'x' is added to the end
self.assertEqual(list(od.items())[-1], ('x', 10))
def test_reinsert(self):
# Given insert a, insert b, delete a, re-insert a,
# verify that a is now later than b.
od = OrderedDict()
od['a'] = 1
od['b'] = 2
del od['a']
od['a'] = 1
self.assertEqual(list(od.items()), [('b', 2), ('a', 1)])
if __name__ == "__main__":
unittest.main()
| true | true |
f732f8891382f4d823f29467801b37510e4bebda | 118 | py | Python | {{cookiecutter.project_slug}}/src/app/admin/model_admin.py | nvo87/django | fd07fb74ab59e868c73512cd0ca4952129b44cd8 | [
"MIT"
] | 98 | 2020-04-21T20:22:16.000Z | 2021-06-07T12:33:51.000Z | {{cookiecutter.project_slug}}/src/app/admin/model_admin.py | nvo87/django | fd07fb74ab59e868c73512cd0ca4952129b44cd8 | [
"MIT"
] | 70 | 2020-04-21T21:59:49.000Z | 2021-06-13T13:35:01.000Z | {{cookiecutter.project_slug}}/src/app/admin/model_admin.py | ginsenghillock/django | 65ab4f52897ca7efdfde347383153fca4f2d2c14 | [
"MIT"
] | 23 | 2020-04-23T06:03:13.000Z | 2021-06-09T06:59:34.000Z | from django.contrib import admin
class ModelAdmin(admin.ModelAdmin):
"""Future app-wide admin customizations"""
| 19.666667 | 46 | 0.762712 | from django.contrib import admin
class ModelAdmin(admin.ModelAdmin):
| true | true |
f732f8a071807b509d76e81fff8b30ee19833db3 | 2,278 | py | Python | src/config/fabric-ansible/job_manager/sandesh_utils.py | atsgen/tf-controller | 9321889cdd3d7108980cc88937b2e82956502cc5 | [
"Apache-2.0"
] | 37 | 2020-09-21T10:42:26.000Z | 2022-01-09T10:16:40.000Z | src/config/fabric-ansible/job_manager/sandesh_utils.py | atsgen/tf-controller | 9321889cdd3d7108980cc88937b2e82956502cc5 | [
"Apache-2.0"
] | null | null | null | src/config/fabric-ansible/job_manager/sandesh_utils.py | atsgen/tf-controller | 9321889cdd3d7108980cc88937b2e82956502cc5 | [
"Apache-2.0"
] | 21 | 2020-08-25T12:48:42.000Z | 2022-03-22T04:32:18.000Z | #
# Copyright (c) 2018 Juniper Networks, Inc. All rights reserved.
#
"""Contains utility functions used for Sandesh initialization and logging."""
from builtins import object
import time
from job_manager.job_exception import JobException
from job_manager.job_messages import MsgBundle
class SandeshUtils(object):
def __init__(self, logger):
"""
Create an instance of SandeshUtils.
:param logger: Sandesh logger
"""
self._logger = logger
# @wrapt_timeout_decorator.timeout(15, timeout_exception=JobException)
def wait_for_connection_establish(self):
total_wait = 0
state = self._logger._sandesh._client._connection.\
statemachine().state()
while state is None or state != "Established":
time.sleep(0.2)
total_wait += 0.2
if total_wait > 30:
self._logger.error("Giving up on opening sandesh connection:"
" %s" % state)
break
state = self._logger._sandesh._client._connection.\
statemachine().state()
def uninit_sandesh(self):
self._logger._sandesh._client._connection.set_admin_state(down=True)
self._logger._sandesh.uninit()
# @wrapt_timeout_decorator.timeout(15, timeout_exception=JobException)
def wait_for_msg_send(self):
total_wait = 0
while not self._logger._sandesh.is_send_queue_empty():
time.sleep(0.2)
total_wait += 0.2
if total_wait > 30:
state = self._logger._sandesh._client._connection.\
statemachine().state()
self._logger.error("Giving up on empty sandesh send queue:"
" %s" % state)
break
# checks and waits for the sandesh client message queue to be empty and
# then closes the sandesh connection
def close_sandesh_connection(self):
try:
self.wait_for_msg_send()
except JobException as job_exp:
msg = MsgBundle.getMessage(MsgBundle.CLOSE_SANDESH_EXCEPTION)
self._logger.error(msg)
job_exp.msg = msg
raise job_exp
finally:
self.uninit_sandesh()
| 34 | 77 | 0.614574 |
from builtins import object
import time
from job_manager.job_exception import JobException
from job_manager.job_messages import MsgBundle
class SandeshUtils(object):
def __init__(self, logger):
self._logger = logger
def wait_for_connection_establish(self):
total_wait = 0
state = self._logger._sandesh._client._connection.\
statemachine().state()
while state is None or state != "Established":
time.sleep(0.2)
total_wait += 0.2
if total_wait > 30:
self._logger.error("Giving up on opening sandesh connection:"
" %s" % state)
break
state = self._logger._sandesh._client._connection.\
statemachine().state()
def uninit_sandesh(self):
self._logger._sandesh._client._connection.set_admin_state(down=True)
self._logger._sandesh.uninit()
def wait_for_msg_send(self):
total_wait = 0
while not self._logger._sandesh.is_send_queue_empty():
time.sleep(0.2)
total_wait += 0.2
if total_wait > 30:
state = self._logger._sandesh._client._connection.\
statemachine().state()
self._logger.error("Giving up on empty sandesh send queue:"
" %s" % state)
break
def close_sandesh_connection(self):
try:
self.wait_for_msg_send()
except JobException as job_exp:
msg = MsgBundle.getMessage(MsgBundle.CLOSE_SANDESH_EXCEPTION)
self._logger.error(msg)
job_exp.msg = msg
raise job_exp
finally:
self.uninit_sandesh()
| true | true |
f732f8e51cba0ac367420fda29c64bfbd1f0079a | 1,484 | py | Python | backend/pycloud_api/api/endpoints/user.py | git-albertomarin/pycloud | 2d273096bc16d09ec2890ca3e3200ad64bde92d3 | [
"Apache-2.0"
] | 3 | 2020-02-23T12:54:47.000Z | 2021-01-09T11:32:49.000Z | backend/pycloud_api/api/endpoints/user.py | git-albertomarin/pycloud | 2d273096bc16d09ec2890ca3e3200ad64bde92d3 | [
"Apache-2.0"
] | 5 | 2021-03-09T22:04:10.000Z | 2022-02-18T14:07:29.000Z | backend/pycloud_api/api/endpoints/user.py | albmarin/Pycloud | 2d273096bc16d09ec2890ca3e3200ad64bde92d3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from fastapi import APIRouter, Body, Security
from pycloud_api.crud.tenant import get_tenant_by_id
from pycloud_api.crud.user import get_current_user, check_free_username_and_email
from pycloud_api.models.schemas.tenant import Tenant, TenantInResponse
from pycloud_api.models.schemas.user import User, UserInDB, UserInResponse, UserInUpdate
router = APIRouter()
@router.get("/users/me", response_model=UserInResponse, tags=["users"])
async def retrieve_current_user(
current_user: UserInDB = Security(get_current_user, scopes=["read:profile"])
):
return UserInResponse(user=User(**current_user.dict()))
@router.put("/users/me", response_model=UserInResponse, tags=["users"])
async def update_current_user(
user: UserInUpdate = Body(..., embed=True),
current_user: UserInDB = Security(get_current_user, scopes=["edit:profile"]),
):
if user.username == current_user.username:
user.username = None
if user.email == current_user.email:
user.email = None
await check_free_username_and_email(user.username, user.email)
return UserInResponse(user=User(**current_user.dict()))
@router.get("/users/me/tenant", response_model=TenantInResponse, tags=["users"])
async def retrieve_current_user_tenant(
current_user: UserInDB = Security(get_current_user, scopes=["read:profile"])
):
tenant_by_id = await get_tenant_by_id(current_user.tenant)
return TenantInResponse(tenant=Tenant(**tenant_by_id.dict()))
| 38.051282 | 88 | 0.760108 |
from fastapi import APIRouter, Body, Security
from pycloud_api.crud.tenant import get_tenant_by_id
from pycloud_api.crud.user import get_current_user, check_free_username_and_email
from pycloud_api.models.schemas.tenant import Tenant, TenantInResponse
from pycloud_api.models.schemas.user import User, UserInDB, UserInResponse, UserInUpdate
router = APIRouter()
@router.get("/users/me", response_model=UserInResponse, tags=["users"])
async def retrieve_current_user(
current_user: UserInDB = Security(get_current_user, scopes=["read:profile"])
):
return UserInResponse(user=User(**current_user.dict()))
@router.put("/users/me", response_model=UserInResponse, tags=["users"])
async def update_current_user(
user: UserInUpdate = Body(..., embed=True),
current_user: UserInDB = Security(get_current_user, scopes=["edit:profile"]),
):
if user.username == current_user.username:
user.username = None
if user.email == current_user.email:
user.email = None
await check_free_username_and_email(user.username, user.email)
return UserInResponse(user=User(**current_user.dict()))
@router.get("/users/me/tenant", response_model=TenantInResponse, tags=["users"])
async def retrieve_current_user_tenant(
current_user: UserInDB = Security(get_current_user, scopes=["read:profile"])
):
tenant_by_id = await get_tenant_by_id(current_user.tenant)
return TenantInResponse(tenant=Tenant(**tenant_by_id.dict()))
| true | true |
f732f9ff8549e75b8c69601d77977b4b42746a1b | 1,713 | py | Python | amaranth_boards/zturn_lite_z007s.py | lethalbit/nmigen-boards | aaf18252e457ff95257137da2a629820c0ff2bfa | [
"BSD-2-Clause"
] | 11 | 2021-12-10T12:23:29.000Z | 2022-03-13T08:40:20.000Z | amaranth_boards/zturn_lite_z007s.py | lethalbit/nmigen-boards | aaf18252e457ff95257137da2a629820c0ff2bfa | [
"BSD-2-Clause"
] | 12 | 2021-12-11T18:51:29.000Z | 2022-03-12T05:08:52.000Z | amaranth_boards/zturn_lite_z007s.py | lethalbit/nmigen-boards | aaf18252e457ff95257137da2a629820c0ff2bfa | [
"BSD-2-Clause"
] | 7 | 2021-12-12T07:20:21.000Z | 2022-03-06T06:20:55.000Z | from amaranth.build import *
from amaranth.vendor.xilinx_7series import *
__all__ = ["ZTurnLiteZ007SPlatform"]
class ZTurnLiteZ007SPlatform(Xilinx7SeriesPlatform):
device = "xc7z007s"
package = "clg400"
speed = "1"
resources = []
connectors = [
Connector("expansion", 0,
"- - "
"B19 E17 "
"A20 D18 "
"- - "
"E18 D19 "
"E19 D20 "
"G17 F16 "
"G18 F17 "
"- - "
"- - "
"J18 J20 "
"H18 H20 "
"C20 K17 "
"B20 K18 "
"- - "
"G19 K19 "
"G20 J19 "
"F19 H15 "
"F20 G15 "
"- - "
"L16 K14 "
"L17 J14 "
"L19 H16 "
"L20 H17 "
"- - "
"K16 L14 "
"J16 L15 "
"M17 M14 "
"M18 M15 "
"- - "
"N17 P15 "
"P18 P16 "
"M19 N15 "
"M20 N16 "
"- - "
"N18 - "
"P19 R16 "
"N20 R17 "
"P20 T20 "
"- U20 "
"- - "
"T16 V20 "
"U17 W20 "
"U18 T17 "
"U19 R18 "
"- - "
"W18 V17 "
"W19 V18 "
"U14 V16 "
"U15 W16 "
"- - "
"V15 Y18 "
"W15 Y19 "
"Y16 W14 "
"Y17 Y14 "
"- - "
"- - "
"- - "
"- - "
"- - "
),
]
| 22.246753 | 52 | 0.269119 | from amaranth.build import *
from amaranth.vendor.xilinx_7series import *
__all__ = ["ZTurnLiteZ007SPlatform"]
class ZTurnLiteZ007SPlatform(Xilinx7SeriesPlatform):
device = "xc7z007s"
package = "clg400"
speed = "1"
resources = []
connectors = [
Connector("expansion", 0,
"- - "
"B19 E17 "
"A20 D18 "
"- - "
"E18 D19 "
"E19 D20 "
"G17 F16 "
"G18 F17 "
"- - "
"- - "
"J18 J20 "
"H18 H20 "
"C20 K17 "
"B20 K18 "
"- - "
"G19 K19 "
"G20 J19 "
"F19 H15 "
"F20 G15 "
"- - "
"L16 K14 "
"L17 J14 "
"L19 H16 "
"L20 H17 "
"- - "
"K16 L14 "
"J16 L15 "
"M17 M14 "
"M18 M15 "
"- - "
"N17 P15 "
"P18 P16 "
"M19 N15 "
"M20 N16 "
"- - "
"N18 - "
"P19 R16 "
"N20 R17 "
"P20 T20 "
"- U20 "
"- - "
"T16 V20 "
"U17 W20 "
"U18 T17 "
"U19 R18 "
"- - "
"W18 V17 "
"W19 V18 "
"U14 V16 "
"U15 W16 "
"- - "
"V15 Y18 "
"W15 Y19 "
"Y16 W14 "
"Y17 Y14 "
"- - "
"- - "
"- - "
"- - "
"- - "
),
]
| true | true |
f732fbd3e6dc3979180d2e2ad1d28018bd128e2d | 12,791 | py | Python | backend/proxy_modules/forgery.py | veepee-oss/hermes | 24b3057a8a1faf3a92291e86dc889f291d874ad0 | [
"0BSD"
] | 26 | 2021-08-04T14:06:58.000Z | 2021-12-14T09:15:02.000Z | backend/proxy_modules/forgery.py | veepee-oss/hermes | 24b3057a8a1faf3a92291e86dc889f291d874ad0 | [
"0BSD"
] | null | null | null | backend/proxy_modules/forgery.py | veepee-oss/hermes | 24b3057a8a1faf3a92291e86dc889f291d874ad0 | [
"0BSD"
] | 4 | 2021-08-18T04:06:45.000Z | 2022-01-02T17:47:27.000Z | import copy
import json
import ssl
import flexssl
import re
import base64
import subprocess
import os
import gzip
import random
from bson import json_util
from py_mini_racer import py_mini_racer
import utils.redis_utils
import proxy_modules.utils
def handle_connection_close_header(request_breakdown, json_config):
try:
do_not_modify_connection_header = json_config['do_not_modify_connection_header']
except:
do_not_modify_connection_header = False
if do_not_modify_connection_header is False:
# Default: means force connection close on request !
new_req = copy.deepcopy(request_breakdown)
new_req['connection'] = {"present": True, "value_when_present": "close"}
return new_req
else:
# Do not modify ! keep it as is !
return request_breakdown
def handle_request_headers(request_breakdown, json_config):
try:
headers_js_function = json_config['headers']
except:
headers_js_function = None
if headers_js_function is None: return request_breakdown
# Run the function !
clean_function_code = headers_js_function.encode('latin1').decode('unicode-escape').encode('latin1').decode('utf-8')
clean_function_code_with_payload = clean_function_code.replace("DATA_ATTRIBUTE_INPUT",json.dumps(request_breakdown['headers']))
ctx = py_mini_racer.MiniRacer()
success = True
try:
function_output = json.loads(ctx.eval(clean_function_code_with_payload))
new_headers = []
for el in function_output:
new_headers.append((el[0], el[1]))
except Exception as e:
success = False
del ctx
if success:
new_req = copy.deepcopy(request_breakdown)
new_req['headers'] = copy.deepcopy(new_headers)
return new_req
else:
return request_breakdown
def _gen_key_freeze(node, config_id):
list_headers = copy.deepcopy(node['headers'])
list_headers = [str(x).lower() for x in list_headers]
list_headers = sorted(list_headers)
hashed_key = node['host_regex'].lower() + '-' + '-'.join(list_headers)
hashed_key = base64.b64encode(hashed_key.encode('utf-8')).decode('ascii')
return 'type=headers_freeze_data/config_id='+str(config_id)+'/key=' + hashed_key, 'type=headers_freeze_counter/config_id='+str(config_id)+'/key=' + hashed_key
def handle_request_headers_freeze(request_breakdown, json_config, config_id):
# Init the context
if not 'headers_freeze' in json_config: return request_breakdown, 'No actions taken'
if request_breakdown['host'] is None: return request_breakdown, 'No actions taken'
redis_client = utils.redis_utils.create_redis_client()
request_breakdown_res = copy.deepcopy(request_breakdown)
# Run the freeze
return_msg_logs = []
idx = -1
for node in json_config['headers_freeze']:
idx = idx + 1
if re.search(node['host_regex'], request_breakdown_res['host']):
# Check cached data
key_data, key_counter = _gen_key_freeze(node, config_id)
stored_val = redis_client.get(key_data)
stored_counter = redis_client.get(key_counter)
stored_counter = 0 if stored_counter is None else int(stored_counter)
# Will cache if relevant
if stored_val is None:
will_cache = True
data_to_freeze = []
for header_i in node['headers']:
val_header = proxy_modules.utils.get_header_from_xploded_req(request_breakdown_res, header_i)
if val_header is None:
will_cache = False
elif len(val_header) == 0:
will_cache = False
else:
data_to_freeze.append([header_i, val_header])
if will_cache is True:
redis_client.set(key_data, json_util.dumps(data_to_freeze))
redis_client.set(key_counter, 0)
return_msg_logs.append('Node idx ' + str(idx) + ': Stored values as headers present and no cached values')
else:
return_msg_logs.append('Node idx ' + str(idx) + ': No actions taken ! No cached values and no headers present')
# Will use cached headers
else:
redis_client.incr(key_counter)
data_to_get = json_util.loads(stored_val)
request_breakdown_res = proxy_modules.utils.merge_headers(request_breakdown_res, data_to_get)
# Respect max requests !
if ((node['max_requests'] >= 0) and (stored_counter+1 >= node['max_requests'])):
redis_client.delete(key_data)
redis_client.delete(key_counter)
return_msg_logs.append('Node idx ' + str(idx) + ': Used cached data and delete it as it reached maximum used times.')
else:
return_msg_logs.append('Node idx ' + str(idx) + ': Used cached data !')
return request_breakdown_res, '. '.join(return_msg_logs)
def handle_ssl_context(json_config):
# ------------ Inputs fetch !
# 1.1. Select the version
try:
version_ssl = json_config['ssl']['version']
except:
version_ssl = "PROTOCOL_TLS"
# 1.2. Force SSL checks
try:
verify_ssl = json_config['ssl']['verify_ssl']
except:
verify_ssl = False
# 1.3. Ciphers
try:
ciphers_ssl = json_config['ssl']['ciphers']
if (ciphers_ssl == ""): ciphers_ssl = None
except:
ciphers_ssl = None
# 1.4. Signatures (not used in HTTP2)
try:
signatures_ssl = json_config['ssl']['signatures']
if (signatures_ssl == ""): signatures_ssl = None
except:
signatures_ssl = None
used_version = ssl.PROTOCOL_TLS
if (version_ssl == "PROTOCOL_TLS"): used_version = ssl.PROTOCOL_TLS
if (version_ssl == "PROTOCOL_TLSv1"): used_version = ssl.PROTOCOL_TLSv1
if (version_ssl == "PROTOCOL_TLSv1_1"): used_version = ssl.PROTOCOL_TLSv1_1
if (version_ssl == "PROTOCOL_TLSv1_2"): used_version = ssl.PROTOCOL_TLSv1_2
res_context = ssl.SSLContext(used_version)
if (verify_ssl is True):
res_context.verify_mode = ssl.CERT_REQUIRED
else:
res_context.verify_mode = ssl.CERT_NONE
if not ciphers_ssl is None: res_context.set_ciphers(ciphers_ssl)
if not signatures_ssl is None:
flexssl.set_sigalgs(signatures_ssl, res_context)
return res_context, {
"version_ssl": version_ssl,
"verify_ssl": verify_ssl,
"ciphers_ssl": ciphers_ssl,
"signatures_ssl": signatures_ssl
}
def handle_http2_request(request_breakdown, json_config):
try:
http2 = json_config['http2']
except:
http2 = "NO"
if not http2 in ['NO', 'YES', 'TRY']: http2 = "NO"
if http2 == "NO": return False, {}
if http2 == "TRY": return False, {}
if http2 == "YES":
host_to_req = request_breakdown['host']
if host_to_req is None: return False, {}
return True, {
"headers": request_breakdown['headers'],
"command": request_breakdown['request_uri']['__DETAIL_command'],
"path": request_breakdown['request_uri']['__DETAIL_path'],
"host_connect": "https://" + host_to_req,
"data": request_breakdown['data'],
}
def _delete_pass_file(file_name):
try:
os.remove(file_name)
except:
pass
def proxy_js_function(js_function_code):
# Create the JS file
file_tmp_to_run = "/tmp/js_proxy_node_" + str(random.randint(0,1000000000000000)) + '.js'
_delete_pass_file(file_tmp_to_run)
file_js_tmp = open(file_tmp_to_run,'w')
file_js_tmp.write(js_function_code)
file_js_tmp.close()
# Run the JS and clean
node_js_stdout = b""
node_js_stderr = b""
node_js_exec_err = ''
data_carried = None
try:
node_running_process = subprocess.run(["node", file_tmp_to_run], capture_output=True)
node_js_stdout = node_running_process.stdout
node_js_stderr = node_running_process.stderr
end_res = node_running_process.stdout
# Formatting
end_res = json.loads(end_res)
proxy_host = end_res['host']
if proxy_host is None:
proxy_port = 0
else:
proxy_port = int(end_res['port'])
if (not "type" in end_res): #HTTP by default not to break the existing config
type=None
else:
type=end_res["type"]
proxy_user = end_res['user']
proxy_password = end_res['password']
try:
data_carried = end_res['data']
except:
pass
except Exception as e:
node_js_exec_err = str(e)
end_res = None
json_messages_js_run = {"stdout": node_js_stdout.decode('utf-8'), "stderr": node_js_stderr.decode('utf-8'), "execerr": node_js_exec_err, "data_carried" : data_carried}
_delete_pass_file(file_tmp_to_run)
# Return
if end_res is None: return None, None, None,json_messages_js_run
if proxy_host is None:
json_messages_js_run['note'] = 'host is set to null'
return None, None, None, json_messages_js_run
else:
if (proxy_user is None) and (proxy_password is None):
json_messages_js_run['note'] = 'no authentication needed'
return (proxy_host, proxy_port), None, type,json_messages_js_run
else:
return (proxy_host, proxy_port), (proxy_user, proxy_password), type,json_messages_js_run
def run_js_function_io(reply, proxy_verbose_messages, js_function_to_run, return_type = 'BINARY'):
# return_type can be: 'BINARY' or None
# Create the JS file
random_pick = str(random.randint(0,1000000000000000))
file_tmp_to_run = "/tmp/js_function_io_" + random_pick + '.js'
_delete_pass_file(file_tmp_to_run)
file_js_tmp = open(file_tmp_to_run,'w')
file_js_tmp.write(js_function_to_run)
file_js_tmp.close()
# Build the Payload
header, data = proxy_modules.utils.separate_header_and_body(reply)
payload = {}
payload['reply'] = {}
try:
payload['reply']["code"] = proxy_modules.utils.fetch_response_code(reply)
except:
payload['reply']["code"] = None
try:
payload['reply']['header'] = header.decode('utf-8')
except:
payload['reply']['header'] = None
try:
payload['reply']['data'] = gzip.decompress(data).decode('utf-8')
except:
try:
payload['reply']['data'] = data.decode('utf-8')
except:
payload['reply']['data'] = None
payload['proxy'] = proxy_verbose_messages
file_tmp_to_load = "/tmp/js_function_io_" + random_pick + '.json'
_delete_pass_file(file_tmp_to_load)
file_js_tmp = open(file_tmp_to_load,'w')
file_js_tmp.write(json.dumps(payload))
file_js_tmp.close()
# Run the JS and clean
node_js_stdout = b""
node_js_stderr = b""
node_js_exec_err = ''
try:
node_running_process = subprocess.run(["node", file_tmp_to_run, file_tmp_to_load], capture_output=True)
node_js_stdout = node_running_process.stdout
node_js_stderr = node_running_process.stderr
except Exception as e:
node_js_exec_err = str(e)
_delete_pass_file(file_tmp_to_run)
_delete_pass_file(file_tmp_to_load)
# Wrap up !
if return_type == 'BINARY':
if node_js_exec_err == '':
try:
end_res = int(node_running_process.stdout.decode('utf-8'))
except Exception as e:
node_js_exec_err = str(e)
end_res = 0
else:
end_res = 0
if not end_res in [0, 1]:
end_res = 0
node_js_exec_err = "Output should be 0 (not blacklisted) or 1 (blacklisted)"
json_messages_js_run = {"stdout": node_js_stdout.decode('utf-8'), "stderr": node_js_stderr.decode('utf-8'), "execerr": node_js_exec_err}
res = True if end_res == 1 else False, json_messages_js_run
return res
if return_type is None:
return {"stdout": node_js_stdout.decode('utf-8'), "stderr": node_js_stderr.decode('utf-8'), "execerr": node_js_exec_err}
| 34.384409 | 172 | 0.619029 | import copy
import json
import ssl
import flexssl
import re
import base64
import subprocess
import os
import gzip
import random
from bson import json_util
from py_mini_racer import py_mini_racer
import utils.redis_utils
import proxy_modules.utils
def handle_connection_close_header(request_breakdown, json_config):
try:
do_not_modify_connection_header = json_config['do_not_modify_connection_header']
except:
do_not_modify_connection_header = False
if do_not_modify_connection_header is False:
new_req = copy.deepcopy(request_breakdown)
new_req['connection'] = {"present": True, "value_when_present": "close"}
return new_req
else:
return request_breakdown
def handle_request_headers(request_breakdown, json_config):
try:
headers_js_function = json_config['headers']
except:
headers_js_function = None
if headers_js_function is None: return request_breakdown
clean_function_code = headers_js_function.encode('latin1').decode('unicode-escape').encode('latin1').decode('utf-8')
clean_function_code_with_payload = clean_function_code.replace("DATA_ATTRIBUTE_INPUT",json.dumps(request_breakdown['headers']))
ctx = py_mini_racer.MiniRacer()
success = True
try:
function_output = json.loads(ctx.eval(clean_function_code_with_payload))
new_headers = []
for el in function_output:
new_headers.append((el[0], el[1]))
except Exception as e:
success = False
del ctx
if success:
new_req = copy.deepcopy(request_breakdown)
new_req['headers'] = copy.deepcopy(new_headers)
return new_req
else:
return request_breakdown
def _gen_key_freeze(node, config_id):
list_headers = copy.deepcopy(node['headers'])
list_headers = [str(x).lower() for x in list_headers]
list_headers = sorted(list_headers)
hashed_key = node['host_regex'].lower() + '-' + '-'.join(list_headers)
hashed_key = base64.b64encode(hashed_key.encode('utf-8')).decode('ascii')
return 'type=headers_freeze_data/config_id='+str(config_id)+'/key=' + hashed_key, 'type=headers_freeze_counter/config_id='+str(config_id)+'/key=' + hashed_key
def handle_request_headers_freeze(request_breakdown, json_config, config_id):
if not 'headers_freeze' in json_config: return request_breakdown, 'No actions taken'
if request_breakdown['host'] is None: return request_breakdown, 'No actions taken'
redis_client = utils.redis_utils.create_redis_client()
request_breakdown_res = copy.deepcopy(request_breakdown)
return_msg_logs = []
idx = -1
for node in json_config['headers_freeze']:
idx = idx + 1
if re.search(node['host_regex'], request_breakdown_res['host']):
key_data, key_counter = _gen_key_freeze(node, config_id)
stored_val = redis_client.get(key_data)
stored_counter = redis_client.get(key_counter)
stored_counter = 0 if stored_counter is None else int(stored_counter)
if stored_val is None:
will_cache = True
data_to_freeze = []
for header_i in node['headers']:
val_header = proxy_modules.utils.get_header_from_xploded_req(request_breakdown_res, header_i)
if val_header is None:
will_cache = False
elif len(val_header) == 0:
will_cache = False
else:
data_to_freeze.append([header_i, val_header])
if will_cache is True:
redis_client.set(key_data, json_util.dumps(data_to_freeze))
redis_client.set(key_counter, 0)
return_msg_logs.append('Node idx ' + str(idx) + ': Stored values as headers present and no cached values')
else:
return_msg_logs.append('Node idx ' + str(idx) + ': No actions taken ! No cached values and no headers present')
else:
redis_client.incr(key_counter)
data_to_get = json_util.loads(stored_val)
request_breakdown_res = proxy_modules.utils.merge_headers(request_breakdown_res, data_to_get)
if ((node['max_requests'] >= 0) and (stored_counter+1 >= node['max_requests'])):
redis_client.delete(key_data)
redis_client.delete(key_counter)
return_msg_logs.append('Node idx ' + str(idx) + ': Used cached data and delete it as it reached maximum used times.')
else:
return_msg_logs.append('Node idx ' + str(idx) + ': Used cached data !')
return request_breakdown_res, '. '.join(return_msg_logs)
def handle_ssl_context(json_config):
try:
version_ssl = json_config['ssl']['version']
except:
version_ssl = "PROTOCOL_TLS"
try:
verify_ssl = json_config['ssl']['verify_ssl']
except:
verify_ssl = False
try:
ciphers_ssl = json_config['ssl']['ciphers']
if (ciphers_ssl == ""): ciphers_ssl = None
except:
ciphers_ssl = None
try:
signatures_ssl = json_config['ssl']['signatures']
if (signatures_ssl == ""): signatures_ssl = None
except:
signatures_ssl = None
used_version = ssl.PROTOCOL_TLS
if (version_ssl == "PROTOCOL_TLS"): used_version = ssl.PROTOCOL_TLS
if (version_ssl == "PROTOCOL_TLSv1"): used_version = ssl.PROTOCOL_TLSv1
if (version_ssl == "PROTOCOL_TLSv1_1"): used_version = ssl.PROTOCOL_TLSv1_1
if (version_ssl == "PROTOCOL_TLSv1_2"): used_version = ssl.PROTOCOL_TLSv1_2
res_context = ssl.SSLContext(used_version)
if (verify_ssl is True):
res_context.verify_mode = ssl.CERT_REQUIRED
else:
res_context.verify_mode = ssl.CERT_NONE
if not ciphers_ssl is None: res_context.set_ciphers(ciphers_ssl)
if not signatures_ssl is None:
flexssl.set_sigalgs(signatures_ssl, res_context)
return res_context, {
"version_ssl": version_ssl,
"verify_ssl": verify_ssl,
"ciphers_ssl": ciphers_ssl,
"signatures_ssl": signatures_ssl
}
def handle_http2_request(request_breakdown, json_config):
try:
http2 = json_config['http2']
except:
http2 = "NO"
if not http2 in ['NO', 'YES', 'TRY']: http2 = "NO"
if http2 == "NO": return False, {}
if http2 == "TRY": return False, {}
if http2 == "YES":
host_to_req = request_breakdown['host']
if host_to_req is None: return False, {}
return True, {
"headers": request_breakdown['headers'],
"command": request_breakdown['request_uri']['__DETAIL_command'],
"path": request_breakdown['request_uri']['__DETAIL_path'],
"host_connect": "https://" + host_to_req,
"data": request_breakdown['data'],
}
def _delete_pass_file(file_name):
try:
os.remove(file_name)
except:
pass
def proxy_js_function(js_function_code):
file_tmp_to_run = "/tmp/js_proxy_node_" + str(random.randint(0,1000000000000000)) + '.js'
_delete_pass_file(file_tmp_to_run)
file_js_tmp = open(file_tmp_to_run,'w')
file_js_tmp.write(js_function_code)
file_js_tmp.close()
node_js_stdout = b""
node_js_stderr = b""
node_js_exec_err = ''
data_carried = None
try:
node_running_process = subprocess.run(["node", file_tmp_to_run], capture_output=True)
node_js_stdout = node_running_process.stdout
node_js_stderr = node_running_process.stderr
end_res = node_running_process.stdout
end_res = json.loads(end_res)
proxy_host = end_res['host']
if proxy_host is None:
proxy_port = 0
else:
proxy_port = int(end_res['port'])
if (not "type" in end_res):
type=None
else:
type=end_res["type"]
proxy_user = end_res['user']
proxy_password = end_res['password']
try:
data_carried = end_res['data']
except:
pass
except Exception as e:
node_js_exec_err = str(e)
end_res = None
json_messages_js_run = {"stdout": node_js_stdout.decode('utf-8'), "stderr": node_js_stderr.decode('utf-8'), "execerr": node_js_exec_err, "data_carried" : data_carried}
_delete_pass_file(file_tmp_to_run)
if end_res is None: return None, None, None,json_messages_js_run
if proxy_host is None:
json_messages_js_run['note'] = 'host is set to null'
return None, None, None, json_messages_js_run
else:
if (proxy_user is None) and (proxy_password is None):
json_messages_js_run['note'] = 'no authentication needed'
return (proxy_host, proxy_port), None, type,json_messages_js_run
else:
return (proxy_host, proxy_port), (proxy_user, proxy_password), type,json_messages_js_run
def run_js_function_io(reply, proxy_verbose_messages, js_function_to_run, return_type = 'BINARY'):
random_pick = str(random.randint(0,1000000000000000))
file_tmp_to_run = "/tmp/js_function_io_" + random_pick + '.js'
_delete_pass_file(file_tmp_to_run)
file_js_tmp = open(file_tmp_to_run,'w')
file_js_tmp.write(js_function_to_run)
file_js_tmp.close()
header, data = proxy_modules.utils.separate_header_and_body(reply)
payload = {}
payload['reply'] = {}
try:
payload['reply']["code"] = proxy_modules.utils.fetch_response_code(reply)
except:
payload['reply']["code"] = None
try:
payload['reply']['header'] = header.decode('utf-8')
except:
payload['reply']['header'] = None
try:
payload['reply']['data'] = gzip.decompress(data).decode('utf-8')
except:
try:
payload['reply']['data'] = data.decode('utf-8')
except:
payload['reply']['data'] = None
payload['proxy'] = proxy_verbose_messages
file_tmp_to_load = "/tmp/js_function_io_" + random_pick + '.json'
_delete_pass_file(file_tmp_to_load)
file_js_tmp = open(file_tmp_to_load,'w')
file_js_tmp.write(json.dumps(payload))
file_js_tmp.close()
node_js_stdout = b""
node_js_stderr = b""
node_js_exec_err = ''
try:
node_running_process = subprocess.run(["node", file_tmp_to_run, file_tmp_to_load], capture_output=True)
node_js_stdout = node_running_process.stdout
node_js_stderr = node_running_process.stderr
except Exception as e:
node_js_exec_err = str(e)
_delete_pass_file(file_tmp_to_run)
_delete_pass_file(file_tmp_to_load)
if return_type == 'BINARY':
if node_js_exec_err == '':
try:
end_res = int(node_running_process.stdout.decode('utf-8'))
except Exception as e:
node_js_exec_err = str(e)
end_res = 0
else:
end_res = 0
if not end_res in [0, 1]:
end_res = 0
node_js_exec_err = "Output should be 0 (not blacklisted) or 1 (blacklisted)"
json_messages_js_run = {"stdout": node_js_stdout.decode('utf-8'), "stderr": node_js_stderr.decode('utf-8'), "execerr": node_js_exec_err}
res = True if end_res == 1 else False, json_messages_js_run
return res
if return_type is None:
return {"stdout": node_js_stdout.decode('utf-8'), "stderr": node_js_stderr.decode('utf-8'), "execerr": node_js_exec_err}
| true | true |
f732fd11bb89a0d743915922aa15867ea03dfdb3 | 10,654 | py | Python | logging_plus/__init__.py | signag/py-logging-plus | dee3c63a36033183628d1941eb08e4dd0d613866 | [
"MIT"
] | null | null | null | logging_plus/__init__.py | signag/py-logging-plus | dee3c63a36033183628d1941eb08e4dd0d613866 | [
"MIT"
] | null | null | null | logging_plus/__init__.py | signag/py-logging-plus | dee3c63a36033183628d1941eb08e4dd0d613866 | [
"MIT"
] | null | null | null | #MIT License
#
#Copyright (c) 2020 signag
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
"""
`logging_plus` - Add-on to Python logging
This module extends the standard Python logging module for the following aspects:
- Subclassing Logger allows customization of logging messages depending on context
- This is used for automatic indentation depending on call stack level
- The module provides also the capability to generically log function entry and exit
"""
import logging
import inspect
import sys
import atexit
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
#
#This parameter controls whether or not logging inside infrastructure modules is activated.
#The following modules are affected: inspect, logging, logging-plus
#
noInfrastructureLogging = True
class Manager(logging.Manager):
"""
Subclassing logging.Manager supports instantiating the subclassed Logger
instead of the standard Logger.
"""
def __init__(self, rootnode):
"""
Initialize the manager
"""
super().__init__(rootnode)
def getLogger(self, name):
"""
Return the subclassed Logger rather than the standard Logger
"""
rv = None
if not isinstance(name, str):
raise TypeError('A logger name must be a string')
logging._acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, logging.PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
logging._releaseLock()
return rv
def cleanupLoggers(self):
"""
Remove registered file handlers from all available loggers
"""
lgr = root
for hdl in reversed(lgr.handlers):
if isinstance(hdl, logging.FileHandler):
lgr.removeHandler(hdl)
for lgName in self.loggerDict:
lgr = self.getLogger(lgName)
for hdl in lgr.handlers:
if isinstance(hdl, logging.FileHandler):
lgr.removeHandler(hdl)
class Logger(logging.Logger):
def __init__(self, name, level=logging.NOTSET):
"""
Initialize the subclassed Logger
"""
super().__init__(name, level)
def debug(self, msg, *args, **kwargs):
"""
Indent DEBUG message according to call stack level before logging
"""
indent = len(inspect.stack()) - 1
msg = " " * indent + msg
super().debug(msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Indent INFO message according to call stack level before logging
"""
indent = len(inspect.stack()) - 1
msg = " " * indent + msg
super().info(msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Indent WARNING message according to call stack level before logging
"""
indent = len(inspect.stack()) - 1
msg = " " * indent + msg
super().warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Indent ERROR message according to call stack level before logging
"""
indent = len(inspect.stack()) - 1
msg = " " * indent + msg
super().error(msg, *args, **kwargs)
def logEntry(self, msg, *args, **kwargs):
"""
Log function entry with DEBUG sverity
"""
indent = len(inspect.stack()) - 2
if indent < 0:
indent = 0
msg = " " * indent + ">>> Entry " + msg
super().debug(msg, *args, **kwargs)
def autoLogEntry(self, msg, *args, **kwargs):
"""
Log function entry with DEBUG sverity in case of automatic logging
"""
indent = len(inspect.stack()) - 3
if indent < 0:
indent = 0
msg = " " * indent + ">>> Entry " + msg
super().debug(msg, *args, **kwargs)
def logExit(self, msg, *args, **kwargs):
"""
Log function exit with DEBUG sverity
"""
indent = len(inspect.stack()) - 2
if indent < 0:
indent = 0
msg = " " * indent + "<<< Exit " + msg
super().debug(msg, *args, **kwargs)
def autoLogExit(self, msg, *args, **kwargs):
"""
Log function exit with DEBUG sverity in case of automatic logging
"""
indent = len(inspect.stack()) - 3
if indent < 0:
indent = 0
msg = " " * indent + "<<< Exit " + msg
super().debug(msg, *args, **kwargs)
class RootLogger(Logger):
"""
This is the extended root logger
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
root = RootLogger(logging.WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
def getLogger(name=None):
"""
Return an extended logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if not name or isinstance(name, str) and name == root.name:
return root
return Logger.manager.getLogger(name)
def excludeFromLogging(frame):
"""
Check whether frame shall be excluded from logging.
This is the case if the module of the frame itself or on of its outer frames
belongs to the inspect or logging infrastructure
"""
if not frame:
return False
module = inspect.getmodule(frame)
if not module:
return False
moduleName = module.__name__
if (moduleName == "inspect") \
or (moduleName == "logging") \
or (moduleName == __name__):
#Do not log inside infrastructure modules
return True
else:
oframe = frame.f_back
if not oframe:
return False
return excludeFromLogging(oframe)
def autoLogIgnore(frame, event, arg):
"""
Function to register as trace function for scopes where logging shall be deactivated.
The function is used to log entry to a new scope ('call') or exit from a scope ('return').
"""
if (event == 'call'):
#Only call needs to be sensed
return autoLogIgnore
def autoLogEntryExit(frame, event, arg):
"""
Function to register as trace function for the current scope.
The function is used to log entry to a new scope ('call') or exit from a scope ('return').
"""
if (event == 'call') or (event == 'return'):
#Only call and return events are sensed
if not frame:
return autoLogIgnore
code_obj = frame.f_code
func_name = code_obj.co_name
file_name = code_obj.co_filename
file_line = code_obj.co_firstlineno
module = inspect.getmodule(frame)
if not module:
return autoLogIgnore
moduleName = module.__name__
if event == 'call':
#System has been entering a new scope.
if noInfrastructureLogging:
if excludeFromLogging(frame):
return autoLogIgnore
getLogger(moduleName).autoLogEntry('%s (%s - line %s - module %s)', func_name, file_name, file_line, moduleName)
#The function returns a reference to itself, in order to register itself as trace functuion for the new scope
return autoLogEntryExit
elif event == 'return':
#System is about to exit a scope (function or other code block). arg is the value being returned.
getLogger(moduleName).autoLogExit ('%s : Return value: %s', func_name, arg)
def removeFileHandlers():
"""This function removes file handlers from available loggers in order to avoid a race condition during shutdown
The Python shutdown sequence is as follows:
1. Stop main thread
2. Close open file handlers
3. Wait for termination of non-daemon threads
4. Execute registered atexit functions
5. Garbage collection
6. Process termination
If class __del__ functions include logging with file handlers, and if ojects are destroyed
during garbage collection (5), file output will lead to an exception
because open file handlers have already been closed (2).
Note: This applies only to explicit logging within the __del__ functions.
Automatic logging of entry and exit has already been switched off at this time
through unregisterAutoLogEntryExit.
"""
mgr = getLogger().manager
mgr.cleanupLoggers()
def registerAutoLogEntryExit():
"""
Register autoLogEntryExit as system trace function
This will issue logging whenever a function scope is entered or exited
"""
sys.settrace(autoLogEntryExit)
def unregisterAutoLogEntryExit():
"""
Clear system trace function
This will stop logging function entry / exit
"""
sys.settrace(None)
#Register unregisterAutoLogEntryExit to avoid logging exceptions during module shutdown
atexit.register(removeFileHandlers)
atexit.register(unregisterAutoLogEntryExit) | 33.822222 | 124 | 0.620237 |
import logging
import inspect
import sys
import atexit
noInfrastructureLogging = True
class Manager(logging.Manager):
def __init__(self, rootnode):
super().__init__(rootnode)
def getLogger(self, name):
rv = None
if not isinstance(name, str):
raise TypeError('A logger name must be a string')
logging._acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, logging.PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
logging._releaseLock()
return rv
def cleanupLoggers(self):
lgr = root
for hdl in reversed(lgr.handlers):
if isinstance(hdl, logging.FileHandler):
lgr.removeHandler(hdl)
for lgName in self.loggerDict:
lgr = self.getLogger(lgName)
for hdl in lgr.handlers:
if isinstance(hdl, logging.FileHandler):
lgr.removeHandler(hdl)
class Logger(logging.Logger):
def __init__(self, name, level=logging.NOTSET):
super().__init__(name, level)
def debug(self, msg, *args, **kwargs):
indent = len(inspect.stack()) - 1
msg = " " * indent + msg
super().debug(msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
indent = len(inspect.stack()) - 1
msg = " " * indent + msg
super().info(msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
indent = len(inspect.stack()) - 1
msg = " " * indent + msg
super().warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
indent = len(inspect.stack()) - 1
msg = " " * indent + msg
super().error(msg, *args, **kwargs)
def logEntry(self, msg, *args, **kwargs):
indent = len(inspect.stack()) - 2
if indent < 0:
indent = 0
msg = " " * indent + ">>> Entry " + msg
super().debug(msg, *args, **kwargs)
def autoLogEntry(self, msg, *args, **kwargs):
indent = len(inspect.stack()) - 3
if indent < 0:
indent = 0
msg = " " * indent + ">>> Entry " + msg
super().debug(msg, *args, **kwargs)
def logExit(self, msg, *args, **kwargs):
indent = len(inspect.stack()) - 2
if indent < 0:
indent = 0
msg = " " * indent + "<<< Exit " + msg
super().debug(msg, *args, **kwargs)
def autoLogExit(self, msg, *args, **kwargs):
indent = len(inspect.stack()) - 3
if indent < 0:
indent = 0
msg = " " * indent + "<<< Exit " + msg
super().debug(msg, *args, **kwargs)
class RootLogger(Logger):
def __init__(self, level):
Logger.__init__(self, "root", level)
_loggerClass = Logger
root = RootLogger(logging.WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
def getLogger(name=None):
if not name or isinstance(name, str) and name == root.name:
return root
return Logger.manager.getLogger(name)
def excludeFromLogging(frame):
if not frame:
return False
module = inspect.getmodule(frame)
if not module:
return False
moduleName = module.__name__
if (moduleName == "inspect") \
or (moduleName == "logging") \
or (moduleName == __name__):
return True
else:
oframe = frame.f_back
if not oframe:
return False
return excludeFromLogging(oframe)
def autoLogIgnore(frame, event, arg):
if (event == 'call'):
return autoLogIgnore
def autoLogEntryExit(frame, event, arg):
if (event == 'call') or (event == 'return'):
if not frame:
return autoLogIgnore
code_obj = frame.f_code
func_name = code_obj.co_name
file_name = code_obj.co_filename
file_line = code_obj.co_firstlineno
module = inspect.getmodule(frame)
if not module:
return autoLogIgnore
moduleName = module.__name__
if event == 'call':
if noInfrastructureLogging:
if excludeFromLogging(frame):
return autoLogIgnore
getLogger(moduleName).autoLogEntry('%s (%s - line %s - module %s)', func_name, file_name, file_line, moduleName)
return autoLogEntryExit
elif event == 'return':
getLogger(moduleName).autoLogExit ('%s : Return value: %s', func_name, arg)
def removeFileHandlers():
mgr = getLogger().manager
mgr.cleanupLoggers()
def registerAutoLogEntryExit():
sys.settrace(autoLogEntryExit)
def unregisterAutoLogEntryExit():
sys.settrace(None)
atexit.register(removeFileHandlers)
atexit.register(unregisterAutoLogEntryExit) | true | true |
f732fd69f8f5bfd7c3e567e627a4c59e84d2aee0 | 3,260 | py | Python | gym-rbdl/gym_rbdl/envs/real_pendulum.py | michaelyeah7/magics_mbrl | 7f1503986fd50c8336b8b9e7bb1d2f4be4e84b08 | [
"MIT"
] | 2 | 2021-10-02T21:58:02.000Z | 2022-03-23T15:34:46.000Z | gym-rbdl/gym_rbdl/envs/real_pendulum.py | michaelyeah7/roblax | 7f1503986fd50c8336b8b9e7bb1d2f4be4e84b08 | [
"MIT"
] | null | null | null | gym-rbdl/gym_rbdl/envs/real_pendulum.py | michaelyeah7/roblax | 7f1503986fd50c8336b8b9e7bb1d2f4be4e84b08 | [
"MIT"
] | null | null | null | import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
from os import path
class PendulumEnv(gym.Env):
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 30}
def __init__(self, g=10.0):
self.max_speed = 8
self.max_torque = 2.0
self.dt = 0.05
self.g = g
self.m = 1.0
self.l = 1.0
self.viewer = None
high = np.array([1.0, 1.0, self.max_speed], dtype=np.float32)
self.action_space = spaces.Box(
low=-self.max_torque, high=self.max_torque, shape=(1,), dtype=np.float32
)
self.observation_space = spaces.Box(low=-high, high=high, dtype=np.float32)
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, u):
th, thdot = self.state # th := theta
g = self.g
m = self.m
l = self.l
dt = self.dt
u = np.clip(u, -self.max_torque, self.max_torque)[0]
self.last_u = u # for rendering
# costs = angle_normalize(th) ** 2 + 0.1 * thdot ** 2 + 0.001 * (u ** 2)
y = np.cos(th)
x = np.sin(th)
costs = y + .1 * np.abs(x) + 0.1 * thdot ** 2 + 0.001 * (u ** 2)
newthdot = (
thdot
+ (-3 * g / (2 * l) * np.sin(th + np.pi) + 3.0 / (m * l ** 2) * u) * dt
)
newth = th + newthdot * dt
newthdot = np.clip(newthdot, -self.max_speed, self.max_speed)
self.state = np.array([newth, newthdot])
return self._get_obs(), -costs, False, {}
def reset(self):
high = np.array([np.pi, 1])
self.state = self.np_random.uniform(low=-high, high=high)
self.last_u = None
return self._get_obs()
def _get_obs(self):
theta, thetadot = self.state
return np.array([np.cos(theta), np.sin(theta), thetadot], dtype=np.float32)
def render(self, mode="human"):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(500, 500)
self.viewer.set_bounds(-2.2, 2.2, -2.2, 2.2)
rod = rendering.make_capsule(1, 0.2)
rod.set_color(0.8, 0.3, 0.3)
self.pole_transform = rendering.Transform()
rod.add_attr(self.pole_transform)
self.viewer.add_geom(rod)
axle = rendering.make_circle(0.05)
axle.set_color(0, 0, 0)
self.viewer.add_geom(axle)
fname = path.join(path.dirname(__file__), "assets/clockwise.png")
self.img = rendering.Image(fname, 1.0, 1.0)
self.imgtrans = rendering.Transform()
self.img.add_attr(self.imgtrans)
self.viewer.add_onetime(self.img)
self.pole_transform.set_rotation(self.state[0] + np.pi / 2)
if self.last_u is not None:
self.imgtrans.scale = (-self.last_u / 2, np.abs(self.last_u) / 2)
return self.viewer.render(return_rgb_array=mode == "rgb_array")
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
def angle_normalize(x):
return ((x + np.pi) % (2 * np.pi)) - np.pi
| 32.277228 | 86 | 0.557975 | import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
from os import path
class PendulumEnv(gym.Env):
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 30}
def __init__(self, g=10.0):
self.max_speed = 8
self.max_torque = 2.0
self.dt = 0.05
self.g = g
self.m = 1.0
self.l = 1.0
self.viewer = None
high = np.array([1.0, 1.0, self.max_speed], dtype=np.float32)
self.action_space = spaces.Box(
low=-self.max_torque, high=self.max_torque, shape=(1,), dtype=np.float32
)
self.observation_space = spaces.Box(low=-high, high=high, dtype=np.float32)
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, u):
th, thdot = self.state
g = self.g
m = self.m
l = self.l
dt = self.dt
u = np.clip(u, -self.max_torque, self.max_torque)[0]
self.last_u = u
y = np.cos(th)
x = np.sin(th)
costs = y + .1 * np.abs(x) + 0.1 * thdot ** 2 + 0.001 * (u ** 2)
newthdot = (
thdot
+ (-3 * g / (2 * l) * np.sin(th + np.pi) + 3.0 / (m * l ** 2) * u) * dt
)
newth = th + newthdot * dt
newthdot = np.clip(newthdot, -self.max_speed, self.max_speed)
self.state = np.array([newth, newthdot])
return self._get_obs(), -costs, False, {}
def reset(self):
high = np.array([np.pi, 1])
self.state = self.np_random.uniform(low=-high, high=high)
self.last_u = None
return self._get_obs()
def _get_obs(self):
theta, thetadot = self.state
return np.array([np.cos(theta), np.sin(theta), thetadot], dtype=np.float32)
def render(self, mode="human"):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(500, 500)
self.viewer.set_bounds(-2.2, 2.2, -2.2, 2.2)
rod = rendering.make_capsule(1, 0.2)
rod.set_color(0.8, 0.3, 0.3)
self.pole_transform = rendering.Transform()
rod.add_attr(self.pole_transform)
self.viewer.add_geom(rod)
axle = rendering.make_circle(0.05)
axle.set_color(0, 0, 0)
self.viewer.add_geom(axle)
fname = path.join(path.dirname(__file__), "assets/clockwise.png")
self.img = rendering.Image(fname, 1.0, 1.0)
self.imgtrans = rendering.Transform()
self.img.add_attr(self.imgtrans)
self.viewer.add_onetime(self.img)
self.pole_transform.set_rotation(self.state[0] + np.pi / 2)
if self.last_u is not None:
self.imgtrans.scale = (-self.last_u / 2, np.abs(self.last_u) / 2)
return self.viewer.render(return_rgb_array=mode == "rgb_array")
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
def angle_normalize(x):
return ((x + np.pi) % (2 * np.pi)) - np.pi
| true | true |
f732fdf1128b31b7b49d386c93aa86199f8cc84f | 109 | py | Python | examples/etcc.py | t-pimpisa/pythainlp17 | cc6bc4991dfffd68953dcdb26fd99c22d60a4c1f | [
"Apache-2.0"
] | null | null | null | examples/etcc.py | t-pimpisa/pythainlp17 | cc6bc4991dfffd68953dcdb26fd99c22d60a4c1f | [
"Apache-2.0"
] | null | null | null | examples/etcc.py | t-pimpisa/pythainlp17 | cc6bc4991dfffd68953dcdb26fd99c22d60a4c1f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from pythainlp.tokenize import etcc
print(etcc.etcc("คืนความสุข")) # /คืน/ความสุข
| 18.166667 | 46 | 0.642202 |
from pythainlp.tokenize import etcc
print(etcc.etcc("คืนความสุข"))
| true | true |
f732fedcfa95d0c98923cfbada7fd3b630707d7f | 1,515 | py | Python | gridpot-feeder/GL_TRANSFORMER.py | sk4ld/vagrant-skyline-puppet | 3cfd6083e26f3de0abeea4c6e68f12b3151eb350 | [
"Apache-2.0"
] | 1 | 2015-11-18T19:58:28.000Z | 2015-11-18T19:58:28.000Z | gridpot-feeder/GL_TRANSFORMER.py | sk4ld/vagrant-skyline-puppet | 3cfd6083e26f3de0abeea4c6e68f12b3151eb350 | [
"Apache-2.0"
] | null | null | null | gridpot-feeder/GL_TRANSFORMER.py | sk4ld/vagrant-skyline-puppet | 3cfd6083e26f3de0abeea4c6e68f12b3151eb350 | [
"Apache-2.0"
] | 1 | 2021-08-06T22:17:32.000Z | 2021-08-06T22:17:32.000Z | # GridPot code
# switch object class for integrating with a GridLAB-D simulation instance
# Author: sk4ld
import logging
import urllib2
logger = logging.getLogger(__name__)
from GL_obj import GL_obj
# base object class for integrating with a GridLAB-D simulation instance
class GL_TRANSFORMER(GL_obj):
def init_params(self):
# Here we define what we want to poll for this object.
# We dont necessarily want to have a setter for each one of these
# Nor do we necessarily have to display each of these to the HMI
self.params["status"] = ""
self.params["phases"] = ""
self.params["from"] = ""
self.params["to"] = ""
self.params["ambient_temperature"] = ""
self.params["winding_hot_spot_temperature"] = "" # Not sure exactly what this is
self.params["configuration "] = "" # This one is a config file, is complicated to update/set
# OVERLOADED http display for the conpot built in http hmi
def http_display(self):
ht_format = "<table border=0>\n"
ht_format += "<tr>\n"
ht_format += " <td>"+ self.obj_name +"</td>\n"
ht_format += " <td></td>\n"
ht_format += "</tr>\n"
for x in ('status', 'phases', 'from', 'to', 'ambient_temperature'):
ht_format += "<tr>\n"
ht_format += " <td>" + x + "</td>\n"
ht_format += " <td>" + self.params[x] + "</td>\n"
ht_format += "<tr>\n"
return ht_format
| 37.875 | 101 | 0.592079 |
import logging
import urllib2
logger = logging.getLogger(__name__)
from GL_obj import GL_obj
class GL_TRANSFORMER(GL_obj):
def init_params(self):
self.params["status"] = ""
self.params["phases"] = ""
self.params["from"] = ""
self.params["to"] = ""
self.params["ambient_temperature"] = ""
self.params["winding_hot_spot_temperature"] = ""
self.params["configuration "] = ""
def http_display(self):
ht_format = "<table border=0>\n"
ht_format += "<tr>\n"
ht_format += " <td>"+ self.obj_name +"</td>\n"
ht_format += " <td></td>\n"
ht_format += "</tr>\n"
for x in ('status', 'phases', 'from', 'to', 'ambient_temperature'):
ht_format += "<tr>\n"
ht_format += " <td>" + x + "</td>\n"
ht_format += " <td>" + self.params[x] + "</td>\n"
ht_format += "<tr>\n"
return ht_format
| true | true |
f732ff07b4b9ceaad3c0ef1c9713573cdba5964c | 65 | py | Python | tests/resource/valid/class/import_check.py | JSAbrahams/mamba | 66ae435a4abf496aae945a78e4fdfa8e4785d854 | [
"MIT"
] | 82 | 2019-03-19T14:29:59.000Z | 2022-01-07T15:08:11.000Z | tests/resource/valid/class/import_check.py | JSAbrahams/mamba | 66ae435a4abf496aae945a78e4fdfa8e4785d854 | [
"MIT"
] | 171 | 2019-03-16T14:21:28.000Z | 2022-01-11T15:27:39.000Z | tests/resource/valid/class/import_check.py | JSAbrahams/mamba | 66ae435a4abf496aae945a78e4fdfa8e4785d854 | [
"MIT"
] | 3 | 2019-03-20T14:06:49.000Z | 2020-05-18T09:24:02.000Z | from a import b,c as c,d
from b import c
import d
import d as dd
| 13 | 24 | 0.723077 | from a import b,c as c,d
from b import c
import d
import d as dd
| true | true |
f732ff2b0f8c36e28675b28dbadae1a7182a00bc | 42,238 | py | Python | lib/sqlalchemy/sql/ddl.py | drecover/sqlalchemy | 6206f0ff74e95c9339dc0f0e26caab55e9bcda45 | [
"MIT"
] | null | null | null | lib/sqlalchemy/sql/ddl.py | drecover/sqlalchemy | 6206f0ff74e95c9339dc0f0e26caab55e9bcda45 | [
"MIT"
] | 1 | 2020-08-07T16:50:16.000Z | 2020-08-07T16:50:16.000Z | lib/sqlalchemy/sql/ddl.py | drecover/sqlalchemy | 6206f0ff74e95c9339dc0f0e26caab55e9bcda45 | [
"MIT"
] | null | null | null | # sql/ddl.py
# Copyright (C) 2009-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""
Provides the hierarchy of DDL-defining schema items as well as routines
to invoke them for a create/drop call.
"""
from . import roles
from .base import _generative
from .base import Executable
from .base import SchemaVisitor
from .elements import ClauseElement
from .. import exc
from .. import util
from ..util import topological
class _DDLCompiles(ClauseElement):
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
Dialect."""
return dialect.ddl_compiler(dialect, self, **kw)
def _compile_w_cache(self, *arg, **kw):
raise NotImplementedError()
class DDLElement(roles.DDLRole, Executable, _DDLCompiles):
"""Base class for DDL expression constructs.
This class is the base for the general purpose :class:`.DDL` class,
as well as the various create/drop clause constructs such as
:class:`.CreateTable`, :class:`.DropTable`, :class:`.AddConstraint`,
etc.
:class:`.DDLElement` integrates closely with SQLAlchemy events,
introduced in :ref:`event_toplevel`. An instance of one is
itself an event receiving callable::
event.listen(
users,
'after_create',
AddConstraint(constraint).execute_if(dialect='postgresql')
)
.. seealso::
:class:`.DDL`
:class:`.DDLEvents`
:ref:`event_toplevel`
:ref:`schema_ddl_sequences`
"""
target = None
on = None
dialect = None
callable_ = None
def _execute_on_connection(
self, connection, distilled_params, execution_options
):
return connection._execute_ddl(
self, distilled_params, execution_options
)
@_generative
def against(self, target):
"""Return a copy of this :class:`_schema.DDLElement` which will include
the given target.
This essentially applies the given item to the ``.target`` attribute
of the returned :class:`_schema.DDLElement` object. This target
is then usable by event handlers and compilation routines in order to
provide services such as tokenization of a DDL string in terms of a
particular :class:`_schema.Table`.
When a :class:`_schema.DDLElement` object is established as an event
handler for the :meth:`_events.DDLEvents.before_create` or
:meth:`_events.DDLEvents.after_create` events, and the event
then occurs for a given target such as a :class:`_schema.Constraint`
or :class:`_schema.Table`, that target is established with a copy
of the :class:`_schema.DDLElement` object using this method, which
then proceeds to the :meth:`_schema.DDLElement.execute` method
in order to invoke the actual DDL instruction.
:param target: a :class:`_schema.SchemaItem` that will be the subject
of a DDL operation.
:return: a copy of this :class:`_schema.DDLElement` with the
``.target`` attribute assigned to the given
:class:`_schema.SchemaItem`.
.. seealso::
:class:`_schema.DDL` - uses tokenization against the "target" when
processing the DDL string.
"""
self.target = target
@_generative
def execute_if(self, dialect=None, callable_=None, state=None):
r"""Return a callable that will execute this
:class:`_ddl.DDLElement` conditionally within an event handler.
Used to provide a wrapper for event listening::
event.listen(
metadata,
'before_create',
DDL("my_ddl").execute_if(dialect='postgresql')
)
:param dialect: May be a string or tuple of strings.
If a string, it will be compared to the name of the
executing database dialect::
DDL('something').execute_if(dialect='postgresql')
If a tuple, specifies multiple dialect names::
DDL('something').execute_if(dialect=('postgresql', 'mysql'))
:param callable\_: A callable, which will be invoked with
four positional arguments as well as optional keyword
arguments:
:ddl:
This DDL element.
:target:
The :class:`_schema.Table` or :class:`_schema.MetaData`
object which is the
target of this event. May be None if the DDL is executed
explicitly.
:bind:
The :class:`_engine.Connection` being used for DDL execution
:tables:
Optional keyword argument - a list of Table objects which are to
be created/ dropped within a MetaData.create_all() or drop_all()
method call.
:state:
Optional keyword argument - will be the ``state`` argument
passed to this function.
:checkfirst:
Keyword argument, will be True if the 'checkfirst' flag was
set during the call to ``create()``, ``create_all()``,
``drop()``, ``drop_all()``.
If the callable returns a True value, the DDL statement will be
executed.
:param state: any value which will be passed to the callable\_
as the ``state`` keyword argument.
.. seealso::
:class:`.DDLEvents`
:ref:`event_toplevel`
"""
self.dialect = dialect
self.callable_ = callable_
self.state = state
def _should_execute(self, target, bind, **kw):
if isinstance(self.dialect, util.string_types):
if self.dialect != bind.engine.name:
return False
elif isinstance(self.dialect, (tuple, list, set)):
if bind.engine.name not in self.dialect:
return False
if self.callable_ is not None and not self.callable_(
self, target, bind, state=self.state, **kw
):
return False
return True
def __call__(self, target, bind, **kw):
"""Execute the DDL as a ddl_listener."""
if self._should_execute(target, bind, **kw):
return bind.execute(self.against(target))
def bind(self):
if self._bind:
return self._bind
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
return s
class DDL(DDLElement):
"""A literal DDL statement.
Specifies literal SQL DDL to be executed by the database. DDL objects
function as DDL event listeners, and can be subscribed to those events
listed in :class:`.DDLEvents`, using either :class:`_schema.Table` or
:class:`_schema.MetaData` objects as targets.
Basic templating support allows
a single DDL instance to handle repetitive tasks for multiple tables.
Examples::
from sqlalchemy import event, DDL
tbl = Table('users', metadata, Column('uid', Integer))
event.listen(tbl, 'before_create', DDL('DROP TRIGGER users_trigger'))
spow = DDL('ALTER TABLE %(table)s SET secretpowers TRUE')
event.listen(tbl, 'after_create', spow.execute_if(dialect='somedb'))
drop_spow = DDL('ALTER TABLE users SET secretpowers FALSE')
connection.execute(drop_spow)
When operating on Table events, the following ``statement``
string substitutions are available::
%(table)s - the Table name, with any required quoting applied
%(schema)s - the schema name, with any required quoting applied
%(fullname)s - the Table name including schema, quoted if needed
The DDL's "context", if any, will be combined with the standard
substitutions noted above. Keys present in the context will override
the standard substitutions.
"""
__visit_name__ = "ddl"
@util.deprecated_params(
bind=(
"2.0",
"The :paramref:`_ddl.DDL.bind` argument is deprecated and "
"will be removed in SQLAlchemy 2.0.",
),
)
def __init__(self, statement, context=None, bind=None):
"""Create a DDL statement.
:param statement:
A string or unicode string to be executed. Statements will be
processed with Python's string formatting operator. See the
``context`` argument and the ``execute_at`` method.
A literal '%' in a statement must be escaped as '%%'.
SQL bind parameters are not available in DDL statements.
:param context:
Optional dictionary, defaults to None. These values will be
available for use in string substitutions on the DDL statement.
:param bind:
Optional. A :class:`.Connectable`, used by
default when ``execute()`` is invoked without a bind argument.
.. seealso::
:class:`.DDLEvents`
:ref:`event_toplevel`
"""
if not isinstance(statement, util.string_types):
raise exc.ArgumentError(
"Expected a string or unicode SQL statement, got '%r'"
% statement
)
self.statement = statement
self.context = context or {}
self._bind = bind
def __repr__(self):
return "<%s@%s; %s>" % (
type(self).__name__,
id(self),
", ".join(
[repr(self.statement)]
+ [
"%s=%r" % (key, getattr(self, key))
for key in ("on", "context")
if getattr(self, key)
]
),
)
class _CreateDropBase(DDLElement):
"""Base class for DDL constructs that represent CREATE and DROP or
equivalents.
The common theme of _CreateDropBase is a single
``element`` attribute which refers to the element
to be created or dropped.
"""
@util.deprecated_params(
bind=(
"2.0",
"The :paramref:`_ddl.DDLElement.bind` argument is "
"deprecated and "
"will be removed in SQLAlchemy 2.0.",
),
)
def __init__(
self,
element,
bind=None,
if_exists=False,
if_not_exists=False,
_legacy_bind=None,
):
self.element = element
if bind:
self.bind = bind
elif _legacy_bind:
self.bind = _legacy_bind
self.if_exists = if_exists
self.if_not_exists = if_not_exists
@property
def stringify_dialect(self):
return self.element.create_drop_stringify_dialect
def _create_rule_disable(self, compiler):
"""Allow disable of _create_rule using a callable.
Pass to _create_rule using
util.portable_instancemethod(self._create_rule_disable)
to retain serializability.
"""
return False
class CreateSchema(_CreateDropBase):
"""Represent a CREATE SCHEMA statement.
The argument here is the string name of the schema.
"""
__visit_name__ = "create_schema"
def __init__(self, name, quote=None, **kw):
"""Create a new :class:`.CreateSchema` construct."""
self.quote = quote
super(CreateSchema, self).__init__(name, **kw)
class DropSchema(_CreateDropBase):
"""Represent a DROP SCHEMA statement.
The argument here is the string name of the schema.
"""
__visit_name__ = "drop_schema"
def __init__(self, name, quote=None, cascade=False, **kw):
"""Create a new :class:`.DropSchema` construct."""
self.quote = quote
self.cascade = cascade
super(DropSchema, self).__init__(name, **kw)
class CreateTable(_CreateDropBase):
"""Represent a CREATE TABLE statement."""
__visit_name__ = "create_table"
@util.deprecated_params(
bind=(
"2.0",
"The :paramref:`_ddl.CreateTable.bind` argument is deprecated and "
"will be removed in SQLAlchemy 2.0.",
),
)
def __init__(
self,
element,
bind=None,
include_foreign_key_constraints=None,
if_not_exists=False,
):
"""Create a :class:`.CreateTable` construct.
:param element: a :class:`_schema.Table` that's the subject
of the CREATE
:param on: See the description for 'on' in :class:`.DDL`.
:param bind: See the description for 'bind' in :class:`.DDL`.
:param include_foreign_key_constraints: optional sequence of
:class:`_schema.ForeignKeyConstraint` objects that will be included
inline within the CREATE construct; if omitted, all foreign key
constraints that do not specify use_alter=True are included.
.. versionadded:: 1.0.0
:param if_not_exists: if True, an IF NOT EXISTS operator will be
applied to the construct.
.. versionadded:: 1.4.0b2
"""
super(CreateTable, self).__init__(
element, _legacy_bind=bind, if_not_exists=if_not_exists
)
self.columns = [CreateColumn(column) for column in element.columns]
self.include_foreign_key_constraints = include_foreign_key_constraints
class _DropView(_CreateDropBase):
"""Semi-public 'DROP VIEW' construct.
Used by the test suite for dialect-agnostic drops of views.
This object will eventually be part of a public "view" API.
"""
__visit_name__ = "drop_view"
class CreateColumn(_DDLCompiles):
"""Represent a :class:`_schema.Column`
as rendered in a CREATE TABLE statement,
via the :class:`.CreateTable` construct.
This is provided to support custom column DDL within the generation
of CREATE TABLE statements, by using the
compiler extension documented in :ref:`sqlalchemy.ext.compiler_toplevel`
to extend :class:`.CreateColumn`.
Typical integration is to examine the incoming :class:`_schema.Column`
object, and to redirect compilation if a particular flag or condition
is found::
from sqlalchemy import schema
from sqlalchemy.ext.compiler import compiles
@compiles(schema.CreateColumn)
def compile(element, compiler, **kw):
column = element.element
if "special" not in column.info:
return compiler.visit_create_column(element, **kw)
text = "%s SPECIAL DIRECTIVE %s" % (
column.name,
compiler.type_compiler.process(column.type)
)
default = compiler.get_column_default_string(column)
if default is not None:
text += " DEFAULT " + default
if not column.nullable:
text += " NOT NULL"
if column.constraints:
text += " ".join(
compiler.process(const)
for const in column.constraints)
return text
The above construct can be applied to a :class:`_schema.Table`
as follows::
from sqlalchemy import Table, Metadata, Column, Integer, String
from sqlalchemy import schema
metadata = MetaData()
table = Table('mytable', MetaData(),
Column('x', Integer, info={"special":True}, primary_key=True),
Column('y', String(50)),
Column('z', String(20), info={"special":True})
)
metadata.create_all(conn)
Above, the directives we've added to the :attr:`_schema.Column.info`
collection
will be detected by our custom compilation scheme::
CREATE TABLE mytable (
x SPECIAL DIRECTIVE INTEGER NOT NULL,
y VARCHAR(50),
z SPECIAL DIRECTIVE VARCHAR(20),
PRIMARY KEY (x)
)
The :class:`.CreateColumn` construct can also be used to skip certain
columns when producing a ``CREATE TABLE``. This is accomplished by
creating a compilation rule that conditionally returns ``None``.
This is essentially how to produce the same effect as using the
``system=True`` argument on :class:`_schema.Column`, which marks a column
as an implicitly-present "system" column.
For example, suppose we wish to produce a :class:`_schema.Table`
which skips
rendering of the PostgreSQL ``xmin`` column against the PostgreSQL
backend, but on other backends does render it, in anticipation of a
triggered rule. A conditional compilation rule could skip this name only
on PostgreSQL::
from sqlalchemy.schema import CreateColumn
@compiles(CreateColumn, "postgresql")
def skip_xmin(element, compiler, **kw):
if element.element.name == 'xmin':
return None
else:
return compiler.visit_create_column(element, **kw)
my_table = Table('mytable', metadata,
Column('id', Integer, primary_key=True),
Column('xmin', Integer)
)
Above, a :class:`.CreateTable` construct will generate a ``CREATE TABLE``
which only includes the ``id`` column in the string; the ``xmin`` column
will be omitted, but only against the PostgreSQL backend.
"""
__visit_name__ = "create_column"
def __init__(self, element):
self.element = element
class DropTable(_CreateDropBase):
"""Represent a DROP TABLE statement."""
__visit_name__ = "drop_table"
@util.deprecated_params(
bind=(
"2.0",
"The :paramref:`_ddl.DropTable.bind` argument is "
"deprecated and "
"will be removed in SQLAlchemy 2.0.",
),
)
def __init__(self, element, bind=None, if_exists=False):
"""Create a :class:`.DropTable` construct.
:param element: a :class:`_schema.Table` that's the subject
of the DROP.
:param on: See the description for 'on' in :class:`.DDL`.
:param bind: See the description for 'bind' in :class:`.DDL`.
:param if_exists: if True, an IF EXISTS operator will be applied to the
construct.
.. versionadded:: 1.4.0b2
"""
super(DropTable, self).__init__(
element, _legacy_bind=bind, if_exists=if_exists
)
class CreateSequence(_CreateDropBase):
"""Represent a CREATE SEQUENCE statement."""
__visit_name__ = "create_sequence"
class DropSequence(_CreateDropBase):
"""Represent a DROP SEQUENCE statement."""
__visit_name__ = "drop_sequence"
class CreateIndex(_CreateDropBase):
"""Represent a CREATE INDEX statement."""
__visit_name__ = "create_index"
@util.deprecated_params(
bind=(
"2.0",
"The :paramref:`_ddl.CreateIndex.bind` argument is "
"deprecated and "
"will be removed in SQLAlchemy 2.0.",
),
)
def __init__(self, element, bind=None, if_not_exists=False):
"""Create a :class:`.Createindex` construct.
:param element: a :class:`_schema.Index` that's the subject
of the CREATE.
:param on: See the description for 'on' in :class:`.DDL`.
:param bind: See the description for 'bind' in :class:`.DDL`.
:param if_not_exists: if True, an IF NOT EXISTS operator will be
applied to the construct.
.. versionadded:: 1.4.0b2
"""
super(CreateIndex, self).__init__(
element, _legacy_bind=bind, if_not_exists=if_not_exists
)
class DropIndex(_CreateDropBase):
"""Represent a DROP INDEX statement."""
__visit_name__ = "drop_index"
@util.deprecated_params(
bind=(
"2.0",
"The :paramref:`_ddl.DropIndex.bind` argument is "
"deprecated and "
"will be removed in SQLAlchemy 2.0.",
),
)
def __init__(self, element, bind=None, if_exists=False):
"""Create a :class:`.DropIndex` construct.
:param element: a :class:`_schema.Index` that's the subject
of the DROP.
:param on: See the description for 'on' in :class:`.DDL`.
:param bind: See the description for 'bind' in :class:`.DDL`.
:param if_exists: if True, an IF EXISTS operator will be applied to the
construct.
.. versionadded:: 1.4.0b2
"""
super(DropIndex, self).__init__(
element, _legacy_bind=bind, if_exists=if_exists
)
class AddConstraint(_CreateDropBase):
"""Represent an ALTER TABLE ADD CONSTRAINT statement."""
__visit_name__ = "add_constraint"
def __init__(self, element, *args, **kw):
super(AddConstraint, self).__init__(element, *args, **kw)
element._create_rule = util.portable_instancemethod(
self._create_rule_disable
)
class DropConstraint(_CreateDropBase):
"""Represent an ALTER TABLE DROP CONSTRAINT statement."""
__visit_name__ = "drop_constraint"
def __init__(self, element, cascade=False, **kw):
self.cascade = cascade
super(DropConstraint, self).__init__(element, **kw)
element._create_rule = util.portable_instancemethod(
self._create_rule_disable
)
class SetTableComment(_CreateDropBase):
"""Represent a COMMENT ON TABLE IS statement."""
__visit_name__ = "set_table_comment"
class DropTableComment(_CreateDropBase):
"""Represent a COMMENT ON TABLE '' statement.
Note this varies a lot across database backends.
"""
__visit_name__ = "drop_table_comment"
class SetColumnComment(_CreateDropBase):
"""Represent a COMMENT ON COLUMN IS statement."""
__visit_name__ = "set_column_comment"
class DropColumnComment(_CreateDropBase):
"""Represent a COMMENT ON COLUMN IS NULL statement."""
__visit_name__ = "drop_column_comment"
class DDLBase(SchemaVisitor):
def __init__(self, connection):
self.connection = connection
class SchemaGenerator(DDLBase):
def __init__(
self, dialect, connection, checkfirst=False, tables=None, **kwargs
):
super(SchemaGenerator, self).__init__(connection, **kwargs)
self.checkfirst = checkfirst
self.tables = tables
self.preparer = dialect.identifier_preparer
self.dialect = dialect
self.memo = {}
def _can_create_table(self, table):
self.dialect.validate_identifier(table.name)
effective_schema = self.connection.schema_for_object(table)
if effective_schema:
self.dialect.validate_identifier(effective_schema)
return not self.checkfirst or not self.dialect.has_table(
self.connection, table.name, schema=effective_schema
)
def _can_create_index(self, index):
effective_schema = self.connection.schema_for_object(index.table)
if effective_schema:
self.dialect.validate_identifier(effective_schema)
return not self.checkfirst or not self.dialect.has_index(
self.connection,
index.table.name,
index.name,
schema=effective_schema,
)
def _can_create_sequence(self, sequence):
effective_schema = self.connection.schema_for_object(sequence)
return self.dialect.supports_sequences and (
(not self.dialect.sequences_optional or not sequence.optional)
and (
not self.checkfirst
or not self.dialect.has_sequence(
self.connection, sequence.name, schema=effective_schema
)
)
)
def visit_metadata(self, metadata):
if self.tables is not None:
tables = self.tables
else:
tables = list(metadata.tables.values())
collection = sort_tables_and_constraints(
[t for t in tables if self._can_create_table(t)]
)
seq_coll = [
s
for s in metadata._sequences.values()
if s.column is None and self._can_create_sequence(s)
]
event_collection = [t for (t, fks) in collection if t is not None]
metadata.dispatch.before_create(
metadata,
self.connection,
tables=event_collection,
checkfirst=self.checkfirst,
_ddl_runner=self,
)
for seq in seq_coll:
self.traverse_single(seq, create_ok=True)
for table, fkcs in collection:
if table is not None:
self.traverse_single(
table,
create_ok=True,
include_foreign_key_constraints=fkcs,
_is_metadata_operation=True,
)
else:
for fkc in fkcs:
self.traverse_single(fkc)
metadata.dispatch.after_create(
metadata,
self.connection,
tables=event_collection,
checkfirst=self.checkfirst,
_ddl_runner=self,
)
def visit_table(
self,
table,
create_ok=False,
include_foreign_key_constraints=None,
_is_metadata_operation=False,
):
if not create_ok and not self._can_create_table(table):
return
table.dispatch.before_create(
table,
self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation,
)
for column in table.columns:
if column.default is not None:
self.traverse_single(column.default)
if not self.dialect.supports_alter:
# e.g., don't omit any foreign key constraints
include_foreign_key_constraints = None
self.connection.execute(
# fmt: off
CreateTable(
table,
include_foreign_key_constraints= # noqa
include_foreign_key_constraints, # noqa
)
# fmt: on
)
if hasattr(table, "indexes"):
for index in table.indexes:
self.traverse_single(index, create_ok=True)
if self.dialect.supports_comments and not self.dialect.inline_comments:
if table.comment is not None:
self.connection.execute(SetTableComment(table))
for column in table.columns:
if column.comment is not None:
self.connection.execute(SetColumnComment(column))
table.dispatch.after_create(
table,
self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation,
)
def visit_foreign_key_constraint(self, constraint):
if not self.dialect.supports_alter:
return
self.connection.execute(AddConstraint(constraint))
def visit_sequence(self, sequence, create_ok=False):
if not create_ok and not self._can_create_sequence(sequence):
return
self.connection.execute(CreateSequence(sequence))
def visit_index(self, index, create_ok=False):
if not create_ok and not self._can_create_index(index):
return
self.connection.execute(CreateIndex(index))
class SchemaDropper(DDLBase):
def __init__(
self, dialect, connection, checkfirst=False, tables=None, **kwargs
):
super(SchemaDropper, self).__init__(connection, **kwargs)
self.checkfirst = checkfirst
self.tables = tables
self.preparer = dialect.identifier_preparer
self.dialect = dialect
self.memo = {}
def visit_metadata(self, metadata):
if self.tables is not None:
tables = self.tables
else:
tables = list(metadata.tables.values())
try:
unsorted_tables = [t for t in tables if self._can_drop_table(t)]
collection = list(
reversed(
sort_tables_and_constraints(
unsorted_tables,
filter_fn=lambda constraint: False
if not self.dialect.supports_alter
or constraint.name is None
else None,
)
)
)
except exc.CircularDependencyError as err2:
if not self.dialect.supports_alter:
util.warn(
"Can't sort tables for DROP; an "
"unresolvable foreign key "
"dependency exists between tables: %s; and backend does "
"not support ALTER. To restore at least a partial sort, "
"apply use_alter=True to ForeignKey and "
"ForeignKeyConstraint "
"objects involved in the cycle to mark these as known "
"cycles that will be ignored."
% (", ".join(sorted([t.fullname for t in err2.cycles])))
)
collection = [(t, ()) for t in unsorted_tables]
else:
util.raise_(
exc.CircularDependencyError(
err2.args[0],
err2.cycles,
err2.edges,
msg="Can't sort tables for DROP; an "
"unresolvable foreign key "
"dependency exists between tables: %s. Please ensure "
"that the ForeignKey and ForeignKeyConstraint objects "
"involved in the cycle have "
"names so that they can be dropped using "
"DROP CONSTRAINT."
% (
", ".join(
sorted([t.fullname for t in err2.cycles])
)
),
),
from_=err2,
)
seq_coll = [
s
for s in metadata._sequences.values()
if self._can_drop_sequence(s)
]
event_collection = [t for (t, fks) in collection if t is not None]
metadata.dispatch.before_drop(
metadata,
self.connection,
tables=event_collection,
checkfirst=self.checkfirst,
_ddl_runner=self,
)
for table, fkcs in collection:
if table is not None:
self.traverse_single(
table,
drop_ok=True,
_is_metadata_operation=True,
_ignore_sequences=seq_coll,
)
else:
for fkc in fkcs:
self.traverse_single(fkc)
for seq in seq_coll:
self.traverse_single(seq, drop_ok=seq.column is None)
metadata.dispatch.after_drop(
metadata,
self.connection,
tables=event_collection,
checkfirst=self.checkfirst,
_ddl_runner=self,
)
def _can_drop_table(self, table):
self.dialect.validate_identifier(table.name)
effective_schema = self.connection.schema_for_object(table)
if effective_schema:
self.dialect.validate_identifier(effective_schema)
return not self.checkfirst or self.dialect.has_table(
self.connection, table.name, schema=effective_schema
)
def _can_drop_index(self, index):
effective_schema = self.connection.schema_for_object(index.table)
if effective_schema:
self.dialect.validate_identifier(effective_schema)
return not self.checkfirst or self.dialect.has_index(
self.connection,
index.table.name,
index.name,
schema=effective_schema,
)
def _can_drop_sequence(self, sequence):
effective_schema = self.connection.schema_for_object(sequence)
return self.dialect.supports_sequences and (
(not self.dialect.sequences_optional or not sequence.optional)
and (
not self.checkfirst
or self.dialect.has_sequence(
self.connection, sequence.name, schema=effective_schema
)
)
)
def visit_index(self, index, drop_ok=False):
if not drop_ok and not self._can_drop_index(index):
return
self.connection.execute(DropIndex(index))
def visit_table(
self,
table,
drop_ok=False,
_is_metadata_operation=False,
_ignore_sequences=(),
):
if not drop_ok and not self._can_drop_table(table):
return
table.dispatch.before_drop(
table,
self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation,
)
self.connection.execute(DropTable(table))
# traverse client side defaults which may refer to server-side
# sequences. noting that some of these client side defaults may also be
# set up as server side defaults (see https://docs.sqlalchemy.org/en/
# latest/core/defaults.html#associating-a-sequence-as-the-server-side-
# default), so have to be dropped after the table is dropped.
for column in table.columns:
if (
column.default is not None
and column.default not in _ignore_sequences
):
self.traverse_single(column.default)
table.dispatch.after_drop(
table,
self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation,
)
def visit_foreign_key_constraint(self, constraint):
if not self.dialect.supports_alter:
return
self.connection.execute(DropConstraint(constraint))
def visit_sequence(self, sequence, drop_ok=False):
if not drop_ok and not self._can_drop_sequence(sequence):
return
self.connection.execute(DropSequence(sequence))
def sort_tables(
tables,
skip_fn=None,
extra_dependencies=None,
):
"""Sort a collection of :class:`_schema.Table` objects based on
dependency.
This is a dependency-ordered sort which will emit :class:`_schema.Table`
objects such that they will follow their dependent :class:`_schema.Table`
objects.
Tables are dependent on another based on the presence of
:class:`_schema.ForeignKeyConstraint`
objects as well as explicit dependencies
added by :meth:`_schema.Table.add_is_dependent_on`.
.. warning::
The :func:`._schema.sort_tables` function cannot by itself
accommodate automatic resolution of dependency cycles between
tables, which are usually caused by mutually dependent foreign key
constraints. When these cycles are detected, the foreign keys
of these tables are omitted from consideration in the sort.
A warning is emitted when this condition occurs, which will be an
exception raise in a future release. Tables which are not part
of the cycle will still be returned in dependency order.
To resolve these cycles, the
:paramref:`_schema.ForeignKeyConstraint.use_alter` parameter may be
applied to those constraints which create a cycle. Alternatively,
the :func:`_schema.sort_tables_and_constraints` function will
automatically return foreign key constraints in a separate
collection when cycles are detected so that they may be applied
to a schema separately.
.. versionchanged:: 1.3.17 - a warning is emitted when
:func:`_schema.sort_tables` cannot perform a proper sort due to
cyclical dependencies. This will be an exception in a future
release. Additionally, the sort will continue to return
other tables not involved in the cycle in dependency order
which was not the case previously.
:param tables: a sequence of :class:`_schema.Table` objects.
:param skip_fn: optional callable which will be passed a
:class:`_schema.ForeignKey` object; if it returns True, this
constraint will not be considered as a dependency. Note this is
**different** from the same parameter in
:func:`.sort_tables_and_constraints`, which is
instead passed the owning :class:`_schema.ForeignKeyConstraint` object.
:param extra_dependencies: a sequence of 2-tuples of tables which will
also be considered as dependent on each other.
.. seealso::
:func:`.sort_tables_and_constraints`
:attr:`_schema.MetaData.sorted_tables` - uses this function to sort
"""
if skip_fn is not None:
def _skip_fn(fkc):
for fk in fkc.elements:
if skip_fn(fk):
return True
else:
return None
else:
_skip_fn = None
return [
t
for (t, fkcs) in sort_tables_and_constraints(
tables,
filter_fn=_skip_fn,
extra_dependencies=extra_dependencies,
_warn_for_cycles=True,
)
if t is not None
]
def sort_tables_and_constraints(
tables, filter_fn=None, extra_dependencies=None, _warn_for_cycles=False
):
"""Sort a collection of :class:`_schema.Table` /
:class:`_schema.ForeignKeyConstraint`
objects.
This is a dependency-ordered sort which will emit tuples of
``(Table, [ForeignKeyConstraint, ...])`` such that each
:class:`_schema.Table` follows its dependent :class:`_schema.Table`
objects.
Remaining :class:`_schema.ForeignKeyConstraint`
objects that are separate due to
dependency rules not satisfied by the sort are emitted afterwards
as ``(None, [ForeignKeyConstraint ...])``.
Tables are dependent on another based on the presence of
:class:`_schema.ForeignKeyConstraint` objects, explicit dependencies
added by :meth:`_schema.Table.add_is_dependent_on`,
as well as dependencies
stated here using the :paramref:`~.sort_tables_and_constraints.skip_fn`
and/or :paramref:`~.sort_tables_and_constraints.extra_dependencies`
parameters.
:param tables: a sequence of :class:`_schema.Table` objects.
:param filter_fn: optional callable which will be passed a
:class:`_schema.ForeignKeyConstraint` object,
and returns a value based on
whether this constraint should definitely be included or excluded as
an inline constraint, or neither. If it returns False, the constraint
will definitely be included as a dependency that cannot be subject
to ALTER; if True, it will **only** be included as an ALTER result at
the end. Returning None means the constraint is included in the
table-based result unless it is detected as part of a dependency cycle.
:param extra_dependencies: a sequence of 2-tuples of tables which will
also be considered as dependent on each other.
.. versionadded:: 1.0.0
.. seealso::
:func:`.sort_tables`
"""
fixed_dependencies = set()
mutable_dependencies = set()
if extra_dependencies is not None:
fixed_dependencies.update(extra_dependencies)
remaining_fkcs = set()
for table in tables:
for fkc in table.foreign_key_constraints:
if fkc.use_alter is True:
remaining_fkcs.add(fkc)
continue
if filter_fn:
filtered = filter_fn(fkc)
if filtered is True:
remaining_fkcs.add(fkc)
continue
dependent_on = fkc.referred_table
if dependent_on is not table:
mutable_dependencies.add((dependent_on, table))
fixed_dependencies.update(
(parent, table) for parent in table._extra_dependencies
)
try:
candidate_sort = list(
topological.sort(
fixed_dependencies.union(mutable_dependencies),
tables,
)
)
except exc.CircularDependencyError as err:
if _warn_for_cycles:
util.warn(
"Cannot correctly sort tables; there are unresolvable cycles "
'between tables "%s", which is usually caused by mutually '
"dependent foreign key constraints. Foreign key constraints "
"involving these tables will not be considered; this warning "
"may raise an error in a future release."
% (", ".join(sorted(t.fullname for t in err.cycles)),)
)
for edge in err.edges:
if edge in mutable_dependencies:
table = edge[1]
if table not in err.cycles:
continue
can_remove = [
fkc
for fkc in table.foreign_key_constraints
if filter_fn is None or filter_fn(fkc) is not False
]
remaining_fkcs.update(can_remove)
for fkc in can_remove:
dependent_on = fkc.referred_table
if dependent_on is not table:
mutable_dependencies.discard((dependent_on, table))
candidate_sort = list(
topological.sort(
fixed_dependencies.union(mutable_dependencies),
tables,
)
)
return [
(table, table.foreign_key_constraints.difference(remaining_fkcs))
for table in candidate_sort
] + [(None, list(remaining_fkcs))]
| 32.69195 | 79 | 0.608244 |
from . import roles
from .base import _generative
from .base import Executable
from .base import SchemaVisitor
from .elements import ClauseElement
from .. import exc
from .. import util
from ..util import topological
class _DDLCompiles(ClauseElement):
def _compiler(self, dialect, **kw):
return dialect.ddl_compiler(dialect, self, **kw)
def _compile_w_cache(self, *arg, **kw):
raise NotImplementedError()
class DDLElement(roles.DDLRole, Executable, _DDLCompiles):
target = None
on = None
dialect = None
callable_ = None
def _execute_on_connection(
self, connection, distilled_params, execution_options
):
return connection._execute_ddl(
self, distilled_params, execution_options
)
@_generative
def against(self, target):
self.target = target
@_generative
def execute_if(self, dialect=None, callable_=None, state=None):
self.dialect = dialect
self.callable_ = callable_
self.state = state
def _should_execute(self, target, bind, **kw):
if isinstance(self.dialect, util.string_types):
if self.dialect != bind.engine.name:
return False
elif isinstance(self.dialect, (tuple, list, set)):
if bind.engine.name not in self.dialect:
return False
if self.callable_ is not None and not self.callable_(
self, target, bind, state=self.state, **kw
):
return False
return True
def __call__(self, target, bind, **kw):
if self._should_execute(target, bind, **kw):
return bind.execute(self.against(target))
def bind(self):
if self._bind:
return self._bind
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
return s
class DDL(DDLElement):
__visit_name__ = "ddl"
@util.deprecated_params(
bind=(
"2.0",
"The :paramref:`_ddl.DDL.bind` argument is deprecated and "
"will be removed in SQLAlchemy 2.0.",
),
)
def __init__(self, statement, context=None, bind=None):
if not isinstance(statement, util.string_types):
raise exc.ArgumentError(
"Expected a string or unicode SQL statement, got '%r'"
% statement
)
self.statement = statement
self.context = context or {}
self._bind = bind
def __repr__(self):
return "<%s@%s; %s>" % (
type(self).__name__,
id(self),
", ".join(
[repr(self.statement)]
+ [
"%s=%r" % (key, getattr(self, key))
for key in ("on", "context")
if getattr(self, key)
]
),
)
class _CreateDropBase(DDLElement):
@util.deprecated_params(
bind=(
"2.0",
"The :paramref:`_ddl.DDLElement.bind` argument is "
"deprecated and "
"will be removed in SQLAlchemy 2.0.",
),
)
def __init__(
self,
element,
bind=None,
if_exists=False,
if_not_exists=False,
_legacy_bind=None,
):
self.element = element
if bind:
self.bind = bind
elif _legacy_bind:
self.bind = _legacy_bind
self.if_exists = if_exists
self.if_not_exists = if_not_exists
@property
def stringify_dialect(self):
return self.element.create_drop_stringify_dialect
def _create_rule_disable(self, compiler):
return False
class CreateSchema(_CreateDropBase):
__visit_name__ = "create_schema"
def __init__(self, name, quote=None, **kw):
self.quote = quote
super(CreateSchema, self).__init__(name, **kw)
class DropSchema(_CreateDropBase):
__visit_name__ = "drop_schema"
def __init__(self, name, quote=None, cascade=False, **kw):
self.quote = quote
self.cascade = cascade
super(DropSchema, self).__init__(name, **kw)
class CreateTable(_CreateDropBase):
__visit_name__ = "create_table"
@util.deprecated_params(
bind=(
"2.0",
"The :paramref:`_ddl.CreateTable.bind` argument is deprecated and "
"will be removed in SQLAlchemy 2.0.",
),
)
def __init__(
self,
element,
bind=None,
include_foreign_key_constraints=None,
if_not_exists=False,
):
super(CreateTable, self).__init__(
element, _legacy_bind=bind, if_not_exists=if_not_exists
)
self.columns = [CreateColumn(column) for column in element.columns]
self.include_foreign_key_constraints = include_foreign_key_constraints
class _DropView(_CreateDropBase):
__visit_name__ = "drop_view"
class CreateColumn(_DDLCompiles):
__visit_name__ = "create_column"
def __init__(self, element):
self.element = element
class DropTable(_CreateDropBase):
__visit_name__ = "drop_table"
@util.deprecated_params(
bind=(
"2.0",
"The :paramref:`_ddl.DropTable.bind` argument is "
"deprecated and "
"will be removed in SQLAlchemy 2.0.",
),
)
def __init__(self, element, bind=None, if_exists=False):
super(DropTable, self).__init__(
element, _legacy_bind=bind, if_exists=if_exists
)
class CreateSequence(_CreateDropBase):
__visit_name__ = "create_sequence"
class DropSequence(_CreateDropBase):
__visit_name__ = "drop_sequence"
class CreateIndex(_CreateDropBase):
__visit_name__ = "create_index"
@util.deprecated_params(
bind=(
"2.0",
"The :paramref:`_ddl.CreateIndex.bind` argument is "
"deprecated and "
"will be removed in SQLAlchemy 2.0.",
),
)
def __init__(self, element, bind=None, if_not_exists=False):
super(CreateIndex, self).__init__(
element, _legacy_bind=bind, if_not_exists=if_not_exists
)
class DropIndex(_CreateDropBase):
__visit_name__ = "drop_index"
@util.deprecated_params(
bind=(
"2.0",
"The :paramref:`_ddl.DropIndex.bind` argument is "
"deprecated and "
"will be removed in SQLAlchemy 2.0.",
),
)
def __init__(self, element, bind=None, if_exists=False):
super(DropIndex, self).__init__(
element, _legacy_bind=bind, if_exists=if_exists
)
class AddConstraint(_CreateDropBase):
__visit_name__ = "add_constraint"
def __init__(self, element, *args, **kw):
super(AddConstraint, self).__init__(element, *args, **kw)
element._create_rule = util.portable_instancemethod(
self._create_rule_disable
)
class DropConstraint(_CreateDropBase):
__visit_name__ = "drop_constraint"
def __init__(self, element, cascade=False, **kw):
self.cascade = cascade
super(DropConstraint, self).__init__(element, **kw)
element._create_rule = util.portable_instancemethod(
self._create_rule_disable
)
class SetTableComment(_CreateDropBase):
__visit_name__ = "set_table_comment"
class DropTableComment(_CreateDropBase):
__visit_name__ = "drop_table_comment"
class SetColumnComment(_CreateDropBase):
__visit_name__ = "set_column_comment"
class DropColumnComment(_CreateDropBase):
__visit_name__ = "drop_column_comment"
class DDLBase(SchemaVisitor):
def __init__(self, connection):
self.connection = connection
class SchemaGenerator(DDLBase):
def __init__(
self, dialect, connection, checkfirst=False, tables=None, **kwargs
):
super(SchemaGenerator, self).__init__(connection, **kwargs)
self.checkfirst = checkfirst
self.tables = tables
self.preparer = dialect.identifier_preparer
self.dialect = dialect
self.memo = {}
def _can_create_table(self, table):
self.dialect.validate_identifier(table.name)
effective_schema = self.connection.schema_for_object(table)
if effective_schema:
self.dialect.validate_identifier(effective_schema)
return not self.checkfirst or not self.dialect.has_table(
self.connection, table.name, schema=effective_schema
)
def _can_create_index(self, index):
effective_schema = self.connection.schema_for_object(index.table)
if effective_schema:
self.dialect.validate_identifier(effective_schema)
return not self.checkfirst or not self.dialect.has_index(
self.connection,
index.table.name,
index.name,
schema=effective_schema,
)
def _can_create_sequence(self, sequence):
effective_schema = self.connection.schema_for_object(sequence)
return self.dialect.supports_sequences and (
(not self.dialect.sequences_optional or not sequence.optional)
and (
not self.checkfirst
or not self.dialect.has_sequence(
self.connection, sequence.name, schema=effective_schema
)
)
)
def visit_metadata(self, metadata):
if self.tables is not None:
tables = self.tables
else:
tables = list(metadata.tables.values())
collection = sort_tables_and_constraints(
[t for t in tables if self._can_create_table(t)]
)
seq_coll = [
s
for s in metadata._sequences.values()
if s.column is None and self._can_create_sequence(s)
]
event_collection = [t for (t, fks) in collection if t is not None]
metadata.dispatch.before_create(
metadata,
self.connection,
tables=event_collection,
checkfirst=self.checkfirst,
_ddl_runner=self,
)
for seq in seq_coll:
self.traverse_single(seq, create_ok=True)
for table, fkcs in collection:
if table is not None:
self.traverse_single(
table,
create_ok=True,
include_foreign_key_constraints=fkcs,
_is_metadata_operation=True,
)
else:
for fkc in fkcs:
self.traverse_single(fkc)
metadata.dispatch.after_create(
metadata,
self.connection,
tables=event_collection,
checkfirst=self.checkfirst,
_ddl_runner=self,
)
def visit_table(
self,
table,
create_ok=False,
include_foreign_key_constraints=None,
_is_metadata_operation=False,
):
if not create_ok and not self._can_create_table(table):
return
table.dispatch.before_create(
table,
self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation,
)
for column in table.columns:
if column.default is not None:
self.traverse_single(column.default)
if not self.dialect.supports_alter:
include_foreign_key_constraints = None
self.connection.execute(
# fmt: off
CreateTable(
table,
include_foreign_key_constraints= # noqa
include_foreign_key_constraints, # noqa
)
# fmt: on
)
if hasattr(table, "indexes"):
for index in table.indexes:
self.traverse_single(index, create_ok=True)
if self.dialect.supports_comments and not self.dialect.inline_comments:
if table.comment is not None:
self.connection.execute(SetTableComment(table))
for column in table.columns:
if column.comment is not None:
self.connection.execute(SetColumnComment(column))
table.dispatch.after_create(
table,
self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation,
)
def visit_foreign_key_constraint(self, constraint):
if not self.dialect.supports_alter:
return
self.connection.execute(AddConstraint(constraint))
def visit_sequence(self, sequence, create_ok=False):
if not create_ok and not self._can_create_sequence(sequence):
return
self.connection.execute(CreateSequence(sequence))
def visit_index(self, index, create_ok=False):
if not create_ok and not self._can_create_index(index):
return
self.connection.execute(CreateIndex(index))
class SchemaDropper(DDLBase):
def __init__(
self, dialect, connection, checkfirst=False, tables=None, **kwargs
):
super(SchemaDropper, self).__init__(connection, **kwargs)
self.checkfirst = checkfirst
self.tables = tables
self.preparer = dialect.identifier_preparer
self.dialect = dialect
self.memo = {}
def visit_metadata(self, metadata):
if self.tables is not None:
tables = self.tables
else:
tables = list(metadata.tables.values())
try:
unsorted_tables = [t for t in tables if self._can_drop_table(t)]
collection = list(
reversed(
sort_tables_and_constraints(
unsorted_tables,
filter_fn=lambda constraint: False
if not self.dialect.supports_alter
or constraint.name is None
else None,
)
)
)
except exc.CircularDependencyError as err2:
if not self.dialect.supports_alter:
util.warn(
"Can't sort tables for DROP; an "
"unresolvable foreign key "
"dependency exists between tables: %s; and backend does "
"not support ALTER. To restore at least a partial sort, "
"apply use_alter=True to ForeignKey and "
"ForeignKeyConstraint "
"objects involved in the cycle to mark these as known "
"cycles that will be ignored."
% (", ".join(sorted([t.fullname for t in err2.cycles])))
)
collection = [(t, ()) for t in unsorted_tables]
else:
util.raise_(
exc.CircularDependencyError(
err2.args[0],
err2.cycles,
err2.edges,
msg="Can't sort tables for DROP; an "
"unresolvable foreign key "
"dependency exists between tables: %s. Please ensure "
"that the ForeignKey and ForeignKeyConstraint objects "
"involved in the cycle have "
"names so that they can be dropped using "
"DROP CONSTRAINT."
% (
", ".join(
sorted([t.fullname for t in err2.cycles])
)
),
),
from_=err2,
)
seq_coll = [
s
for s in metadata._sequences.values()
if self._can_drop_sequence(s)
]
event_collection = [t for (t, fks) in collection if t is not None]
metadata.dispatch.before_drop(
metadata,
self.connection,
tables=event_collection,
checkfirst=self.checkfirst,
_ddl_runner=self,
)
for table, fkcs in collection:
if table is not None:
self.traverse_single(
table,
drop_ok=True,
_is_metadata_operation=True,
_ignore_sequences=seq_coll,
)
else:
for fkc in fkcs:
self.traverse_single(fkc)
for seq in seq_coll:
self.traverse_single(seq, drop_ok=seq.column is None)
metadata.dispatch.after_drop(
metadata,
self.connection,
tables=event_collection,
checkfirst=self.checkfirst,
_ddl_runner=self,
)
def _can_drop_table(self, table):
self.dialect.validate_identifier(table.name)
effective_schema = self.connection.schema_for_object(table)
if effective_schema:
self.dialect.validate_identifier(effective_schema)
return not self.checkfirst or self.dialect.has_table(
self.connection, table.name, schema=effective_schema
)
def _can_drop_index(self, index):
effective_schema = self.connection.schema_for_object(index.table)
if effective_schema:
self.dialect.validate_identifier(effective_schema)
return not self.checkfirst or self.dialect.has_index(
self.connection,
index.table.name,
index.name,
schema=effective_schema,
)
def _can_drop_sequence(self, sequence):
effective_schema = self.connection.schema_for_object(sequence)
return self.dialect.supports_sequences and (
(not self.dialect.sequences_optional or not sequence.optional)
and (
not self.checkfirst
or self.dialect.has_sequence(
self.connection, sequence.name, schema=effective_schema
)
)
)
def visit_index(self, index, drop_ok=False):
if not drop_ok and not self._can_drop_index(index):
return
self.connection.execute(DropIndex(index))
def visit_table(
self,
table,
drop_ok=False,
_is_metadata_operation=False,
_ignore_sequences=(),
):
if not drop_ok and not self._can_drop_table(table):
return
table.dispatch.before_drop(
table,
self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation,
)
self.connection.execute(DropTable(table))
# traverse client side defaults which may refer to server-side
# sequences. noting that some of these client side defaults may also be
# set up as server side defaults (see https://docs.sqlalchemy.org/en/
# latest/core/defaults.html#associating-a-sequence-as-the-server-side-
# default), so have to be dropped after the table is dropped.
for column in table.columns:
if (
column.default is not None
and column.default not in _ignore_sequences
):
self.traverse_single(column.default)
table.dispatch.after_drop(
table,
self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation,
)
def visit_foreign_key_constraint(self, constraint):
if not self.dialect.supports_alter:
return
self.connection.execute(DropConstraint(constraint))
def visit_sequence(self, sequence, drop_ok=False):
if not drop_ok and not self._can_drop_sequence(sequence):
return
self.connection.execute(DropSequence(sequence))
def sort_tables(
tables,
skip_fn=None,
extra_dependencies=None,
):
if skip_fn is not None:
def _skip_fn(fkc):
for fk in fkc.elements:
if skip_fn(fk):
return True
else:
return None
else:
_skip_fn = None
return [
t
for (t, fkcs) in sort_tables_and_constraints(
tables,
filter_fn=_skip_fn,
extra_dependencies=extra_dependencies,
_warn_for_cycles=True,
)
if t is not None
]
def sort_tables_and_constraints(
tables, filter_fn=None, extra_dependencies=None, _warn_for_cycles=False
):
fixed_dependencies = set()
mutable_dependencies = set()
if extra_dependencies is not None:
fixed_dependencies.update(extra_dependencies)
remaining_fkcs = set()
for table in tables:
for fkc in table.foreign_key_constraints:
if fkc.use_alter is True:
remaining_fkcs.add(fkc)
continue
if filter_fn:
filtered = filter_fn(fkc)
if filtered is True:
remaining_fkcs.add(fkc)
continue
dependent_on = fkc.referred_table
if dependent_on is not table:
mutable_dependencies.add((dependent_on, table))
fixed_dependencies.update(
(parent, table) for parent in table._extra_dependencies
)
try:
candidate_sort = list(
topological.sort(
fixed_dependencies.union(mutable_dependencies),
tables,
)
)
except exc.CircularDependencyError as err:
if _warn_for_cycles:
util.warn(
"Cannot correctly sort tables; there are unresolvable cycles "
'between tables "%s", which is usually caused by mutually '
"dependent foreign key constraints. Foreign key constraints "
"involving these tables will not be considered; this warning "
"may raise an error in a future release."
% (", ".join(sorted(t.fullname for t in err.cycles)),)
)
for edge in err.edges:
if edge in mutable_dependencies:
table = edge[1]
if table not in err.cycles:
continue
can_remove = [
fkc
for fkc in table.foreign_key_constraints
if filter_fn is None or filter_fn(fkc) is not False
]
remaining_fkcs.update(can_remove)
for fkc in can_remove:
dependent_on = fkc.referred_table
if dependent_on is not table:
mutable_dependencies.discard((dependent_on, table))
candidate_sort = list(
topological.sort(
fixed_dependencies.union(mutable_dependencies),
tables,
)
)
return [
(table, table.foreign_key_constraints.difference(remaining_fkcs))
for table in candidate_sort
] + [(None, list(remaining_fkcs))]
| true | true |
f732ff55a4a646485c2089f03e232039dc638275 | 1,579 | py | Python | qcodes/tests/dataset/test_subscribing.py | cgranade/Qcodes | 2d8fd0b8e0fa12d7921a96003318598ad347dd05 | [
"MIT"
] | 1 | 2019-10-12T04:54:30.000Z | 2019-10-12T04:54:30.000Z | qcodes/tests/dataset/test_subscribing.py | cgranade/Qcodes | 2d8fd0b8e0fa12d7921a96003318598ad347dd05 | [
"MIT"
] | null | null | null | qcodes/tests/dataset/test_subscribing.py | cgranade/Qcodes | 2d8fd0b8e0fa12d7921a96003318598ad347dd05 | [
"MIT"
] | null | null | null | # Test some subscription scenarios
from typing import List, Tuple, Dict, Union
from numbers import Number
import pytest
from numpy import ndarray
from qcodes.dataset.param_spec import ParamSpec
# pylint: disable=unused-import
from qcodes.tests.dataset.temporary_databases import (empty_temp_db,
experiment,
dataset)
VALUE = Union[str, Number, List, ndarray, bool]
@pytest.fixture(scope='function')
def basic_subscriber():
"""
A basic subscriber that just puts results and length into
state
"""
def subscriber(results: List[Tuple[VALUE]], length: int,
state: Dict) -> None:
state[length] = results
return subscriber
def test_basic_subscription(dataset, basic_subscriber):
xparam = ParamSpec(name='x', paramtype='numeric', label='x parameter',
unit='V')
yparam = ParamSpec(name='y', paramtype='numeric', label='y parameter',
unit='Hz', depends_on=[xparam])
dataset.add_parameter(xparam)
dataset.add_parameter(yparam)
sub_id = dataset.subscribe(basic_subscriber, min_wait=0, min_count=1,
state={})
assert len(dataset.subscribers) == 1
assert list(dataset.subscribers.keys()) == [sub_id]
expected_state = {}
for x in range(10):
y = -x**2
dataset.add_result({'x': x, 'y': y})
expected_state[x+1] = [(x, y)]
assert dataset.subscribers[sub_id].state == expected_state
| 29.792453 | 74 | 0.609246 |
from typing import List, Tuple, Dict, Union
from numbers import Number
import pytest
from numpy import ndarray
from qcodes.dataset.param_spec import ParamSpec
from qcodes.tests.dataset.temporary_databases import (empty_temp_db,
experiment,
dataset)
VALUE = Union[str, Number, List, ndarray, bool]
@pytest.fixture(scope='function')
def basic_subscriber():
def subscriber(results: List[Tuple[VALUE]], length: int,
state: Dict) -> None:
state[length] = results
return subscriber
def test_basic_subscription(dataset, basic_subscriber):
xparam = ParamSpec(name='x', paramtype='numeric', label='x parameter',
unit='V')
yparam = ParamSpec(name='y', paramtype='numeric', label='y parameter',
unit='Hz', depends_on=[xparam])
dataset.add_parameter(xparam)
dataset.add_parameter(yparam)
sub_id = dataset.subscribe(basic_subscriber, min_wait=0, min_count=1,
state={})
assert len(dataset.subscribers) == 1
assert list(dataset.subscribers.keys()) == [sub_id]
expected_state = {}
for x in range(10):
y = -x**2
dataset.add_result({'x': x, 'y': y})
expected_state[x+1] = [(x, y)]
assert dataset.subscribers[sub_id].state == expected_state
| true | true |
f732ffa63ee18b865a81cf1519498f341dbde992 | 13,600 | py | Python | utils.py | ZeroDesigner/quantum-gan | 76b12fe1be25ac2a5e75fdc472947a08d7065c50 | [
"MIT"
] | 49 | 2021-01-12T04:18:58.000Z | 2022-03-06T03:19:39.000Z | utils.py | jundeli/quantum-gan | 76b12fe1be25ac2a5e75fdc472947a08d7065c50 | [
"MIT"
] | null | null | null | utils.py | jundeli/quantum-gan | 76b12fe1be25ac2a5e75fdc472947a08d7065c50 | [
"MIT"
] | 17 | 2021-01-14T16:01:24.000Z | 2022-01-20T00:49:56.000Z | from sklearn.metrics import classification_report as sk_classification_report
from sklearn.metrics import confusion_matrix
import pickle
import gzip
from rdkit import DataStructs
from rdkit import Chem
from rdkit.Chem import QED
from rdkit.Chem import Crippen
from rdkit.Chem import AllChem
from rdkit.Chem import Draw
import math
import numpy as np
NP_model = pickle.load(gzip.open('data/NP_score.pkl.gz'))
SA_model = {i[j]: float(i[0]) for i in pickle.load(gzip.open('data/SA_score.pkl.gz')) for j in range(1, len(i))}
class MolecularMetrics(object):
@staticmethod
def _avoid_sanitization_error(op):
try:
return op()
except ValueError:
return None
@staticmethod
def remap(x, x_min, x_max):
return (x - x_min) / (x_max - x_min)
@staticmethod
def valid_lambda(x):
return x is not None and Chem.MolToSmiles(x) != ''
@staticmethod
def valid_lambda_special(x):
s = Chem.MolToSmiles(x) if x is not None else ''
return x is not None and '*' not in s and '.' not in s and s != ''
@staticmethod
def valid_scores(mols):
return np.array(list(map(MolecularMetrics.valid_lambda_special, mols)), dtype=np.float32)
@staticmethod
def valid_filter(mols):
return list(filter(MolecularMetrics.valid_lambda, mols))
@staticmethod
def valid_total_score(mols):
return np.array(list(map(MolecularMetrics.valid_lambda, mols)), dtype=np.float32).mean()
@staticmethod
def novel_scores(mols, data):
return np.array(
list(map(lambda x: MolecularMetrics.valid_lambda(x) and Chem.MolToSmiles(x) not in data.smiles, mols)))
@staticmethod
def novel_filter(mols, data):
return list(filter(lambda x: MolecularMetrics.valid_lambda(x) and Chem.MolToSmiles(x) not in data.smiles, mols))
@staticmethod
def novel_total_score(mols, data):
return MolecularMetrics.novel_scores(MolecularMetrics.valid_filter(mols), data).mean()
@staticmethod
def unique_scores(mols):
smiles = list(map(lambda x: Chem.MolToSmiles(x) if MolecularMetrics.valid_lambda(x) else '', mols))
return np.clip(
0.75 + np.array(list(map(lambda x: 1 / smiles.count(x) if x != '' else 0, smiles)), dtype=np.float32), 0, 1)
@staticmethod
def unique_total_score(mols):
v = MolecularMetrics.valid_filter(mols)
s = set(map(lambda x: Chem.MolToSmiles(x), v))
return 0 if len(v) == 0 else len(s) / len(v)
# @staticmethod
# def novel_and_unique_total_score(mols, data):
# return ((MolecularMetrics.unique_scores(mols) == 1).astype(float) * MolecularMetrics.novel_scores(mols,
# data)).sum()
#
# @staticmethod
# def reconstruction_scores(data, model, session, sample=False):
#
# m0, _, _, a, x, _, f, _, _ = data.next_validation_batch()
# feed_dict = {model.edges_labels: a, model.nodes_labels: x, model.node_features: f, model.training: False}
#
# try:
# feed_dict.update({model.variational: False})
# except AttributeError:
# pass
#
# n, e = session.run([model.nodes_gumbel_argmax, model.edges_gumbel_argmax] if sample else [
# model.nodes_argmax, model.edges_argmax], feed_dict=feed_dict)
#
# n, e = np.argmax(n, axis=-1), np.argmax(e, axis=-1)
#
# m1 = [data.matrices2mol(n_, e_, strict=True) for n_, e_ in zip(n, e)]
#
# return np.mean([float(Chem.MolToSmiles(m0_) == Chem.MolToSmiles(m1_)) if m1_ is not None else 0
# for m0_, m1_ in zip(m0, m1)])
@staticmethod
def natural_product_scores(mols, norm=False):
# calculating the score
scores = [sum(NP_model.get(bit, 0)
for bit in Chem.rdMolDescriptors.GetMorganFingerprint(mol,
2).GetNonzeroElements()) / float(
mol.GetNumAtoms()) if mol is not None else None
for mol in mols]
# preventing score explosion for exotic molecules
scores = list(map(lambda score: score if score is None else (
4 + math.log10(score - 4 + 1) if score > 4 else (
-4 - math.log10(-4 - score + 1) if score < -4 else score)), scores))
scores = np.array(list(map(lambda x: -4 if x is None else x, scores)))
scores = np.clip(MolecularMetrics.remap(scores, -3, 1), 0.0, 1.0) if norm else scores
return scores
@staticmethod
def quantitative_estimation_druglikeness_scores(mols, norm=False):
return np.array(list(map(lambda x: 0 if x is None else x, [
MolecularMetrics._avoid_sanitization_error(lambda: QED.qed(mol)) if mol is not None else None for mol in
mols])))
@staticmethod
def water_octanol_partition_coefficient_scores(mols, norm=False):
scores = [MolecularMetrics._avoid_sanitization_error(lambda: Crippen.MolLogP(mol)) if mol is not None else None
for mol in mols]
scores = np.array(list(map(lambda x: -3 if x is None else x, scores)))
scores = np.clip(MolecularMetrics.remap(scores, -2.12178879609, 6.0429063424), 0.0, 1.0) if norm else scores
return scores
@staticmethod
def _compute_SAS(mol):
fp = Chem.rdMolDescriptors.GetMorganFingerprint(mol, 2)
fps = fp.GetNonzeroElements()
score1 = 0.
nf = 0
# for bitId, v in fps.items():
for bitId, v in fps.items():
nf += v
sfp = bitId
score1 += SA_model.get(sfp, -4) * v
score1 /= nf
# features score
nAtoms = mol.GetNumAtoms()
nChiralCenters = len(Chem.FindMolChiralCenters(
mol, includeUnassigned=True))
ri = mol.GetRingInfo()
nSpiro = Chem.rdMolDescriptors.CalcNumSpiroAtoms(mol)
nBridgeheads = Chem.rdMolDescriptors.CalcNumBridgeheadAtoms(mol)
nMacrocycles = 0
for x in ri.AtomRings():
if len(x) > 8:
nMacrocycles += 1
sizePenalty = nAtoms ** 1.005 - nAtoms
stereoPenalty = math.log10(nChiralCenters + 1)
spiroPenalty = math.log10(nSpiro + 1)
bridgePenalty = math.log10(nBridgeheads + 1)
macrocyclePenalty = 0.
# ---------------------------------------
# This differs from the paper, which defines:
# macrocyclePenalty = math.log10(nMacrocycles+1)
# This form generates better results when 2 or more macrocycles are present
if nMacrocycles > 0:
macrocyclePenalty = math.log10(2)
score2 = 0. - sizePenalty - stereoPenalty - \
spiroPenalty - bridgePenalty - macrocyclePenalty
# correction for the fingerprint density
# not in the original publication, added in version 1.1
# to make highly symmetrical molecules easier to synthetise
score3 = 0.
if nAtoms > len(fps):
score3 = math.log(float(nAtoms) / len(fps)) * .5
sascore = score1 + score2 + score3
# need to transform "raw" value into scale between 1 and 10
min = -4.0
max = 2.5
sascore = 11. - (sascore - min + 1) / (max - min) * 9.
# smooth the 10-end
if sascore > 8.:
sascore = 8. + math.log(sascore + 1. - 9.)
if sascore > 10.:
sascore = 10.0
elif sascore < 1.:
sascore = 1.0
return sascore
@staticmethod
def synthetic_accessibility_score_scores(mols, norm=False):
scores = [MolecularMetrics._compute_SAS(mol) if mol is not None else None for mol in mols]
scores = np.array(list(map(lambda x: 10 if x is None else x, scores)))
scores = np.clip(MolecularMetrics.remap(scores, 5, 1.5), 0.0, 1.0) if norm else scores
return scores
@staticmethod
def diversity_scores(mols, data):
rand_mols = np.random.choice(data.data, 100)
fps = [Chem.rdMolDescriptors.GetMorganFingerprintAsBitVect(mol, 4, nBits=2048) for mol in rand_mols]
scores = np.array(
list(map(lambda x: MolecularMetrics.__compute_diversity(x, fps) if x is not None else 0, mols)))
scores = np.clip(MolecularMetrics.remap(scores, 0.9, 0.945), 0.0, 1.0)
return scores
@staticmethod
def __compute_diversity(mol, fps):
ref_fps = Chem.rdMolDescriptors.GetMorganFingerprintAsBitVect(mol, 4, nBits=2048)
dist = DataStructs.BulkTanimotoSimilarity(ref_fps, fps, returnDistance=True)
score = np.mean(dist)
return score
@staticmethod
def drugcandidate_scores(mols, data):
scores = (MolecularMetrics.constant_bump(
MolecularMetrics.water_octanol_partition_coefficient_scores(mols, norm=True), 0.210,
0.945) + MolecularMetrics.synthetic_accessibility_score_scores(mols,
norm=True) + MolecularMetrics.novel_scores(
mols, data) + (1 - MolecularMetrics.novel_scores(mols, data)) * 0.3) / 4
return scores
@staticmethod
def constant_bump(x, x_low, x_high, decay=0.025):
return np.select(condlist=[x <= x_low, x >= x_high],
choicelist=[np.exp(- (x - x_low) ** 2 / decay),
np.exp(- (x - x_high) ** 2 / decay)],
default=np.ones_like(x))
def mols2grid_image(mols, molsPerRow):
mols = [e if e is not None else Chem.RWMol() for e in mols]
for mol in mols:
AllChem.Compute2DCoords(mol)
return Draw.MolsToGridImage(mols, molsPerRow=molsPerRow, subImgSize=(150, 150))
def classification_report(data, model, session, sample=False):
_, _, _, a, x, _, f, _, _ = data.next_validation_batch()
n, e = session.run([model.nodes_gumbel_argmax, model.edges_gumbel_argmax] if sample else [
model.nodes_argmax, model.edges_argmax], feed_dict={model.edges_labels: a, model.nodes_labels: x,
model.node_features: f, model.training: False,
model.variational: False})
n, e = np.argmax(n, axis=-1), np.argmax(e, axis=-1)
y_true = e.flatten()
y_pred = a.flatten()
target_names = [str(Chem.rdchem.BondType.values[int(e)]) for e in data.bond_decoder_m.values()]
print('######## Classification Report ########\n')
print(sk_classification_report(y_true, y_pred, labels=list(range(len(target_names))),
target_names=target_names))
print('######## Confusion Matrix ########\n')
print(confusion_matrix(y_true, y_pred, labels=list(range(len(target_names)))))
y_true = n.flatten()
y_pred = x.flatten()
target_names = [Chem.Atom(e).GetSymbol() for e in data.atom_decoder_m.values()]
print('######## Classification Report ########\n')
print(sk_classification_report(y_true, y_pred, labels=list(range(len(target_names))),
target_names=target_names))
print('\n######## Confusion Matrix ########\n')
print(confusion_matrix(y_true, y_pred, labels=list(range(len(target_names)))))
def reconstructions(data, model, session, batch_dim=10, sample=False):
m0, _, _, a, x, _, f, _, _ = data.next_train_batch(batch_dim)
n, e = session.run([model.nodes_gumbel_argmax, model.edges_gumbel_argmax] if sample else [
model.nodes_argmax, model.edges_argmax], feed_dict={model.edges_labels: a, model.nodes_labels: x,
model.node_features: f, model.training: False,
model.variational: False})
n, e = np.argmax(n, axis=-1), np.argmax(e, axis=-1)
m1 = np.array([e if e is not None else Chem.RWMol() for e in [data.matrices2mol(n_, e_, strict=True)
for n_, e_ in zip(n, e)]])
mols = np.vstack((m0, m1)).T.flatten()
return mols
def samples(data, model, session, embeddings, sample=False):
n, e = session.run([model.nodes_gumbel_argmax, model.edges_gumbel_argmax] if sample else [
model.nodes_argmax, model.edges_argmax], feed_dict={
model.embeddings: embeddings, model.training: False})
n, e = np.argmax(n, axis=-1), np.argmax(e, axis=-1)
mols = [data.matrices2mol(n_, e_, strict=True) for n_, e_ in zip(n, e)]
return mols
def all_scores(mols, data, norm=False, reconstruction=False):
m0 = {k: list(filter(lambda e: e is not None, v)) for k, v in {
'NP score': MolecularMetrics.natural_product_scores(mols, norm=norm),
'QED score': MolecularMetrics.quantitative_estimation_druglikeness_scores(mols),
'logP score': MolecularMetrics.water_octanol_partition_coefficient_scores(mols, norm=norm),
'SA score': MolecularMetrics.synthetic_accessibility_score_scores(mols, norm=norm),
'diversity score': MolecularMetrics.diversity_scores(mols, data),
'drugcandidate score': MolecularMetrics.drugcandidate_scores(mols, data)}.items()}
m1 = {'valid score': MolecularMetrics.valid_total_score(mols) * 100,
'unique score': MolecularMetrics.unique_total_score(mols) * 100,
'novel score': MolecularMetrics.novel_total_score(mols, data) * 100}
return m0, m1
| 41.087613 | 120 | 0.614632 | from sklearn.metrics import classification_report as sk_classification_report
from sklearn.metrics import confusion_matrix
import pickle
import gzip
from rdkit import DataStructs
from rdkit import Chem
from rdkit.Chem import QED
from rdkit.Chem import Crippen
from rdkit.Chem import AllChem
from rdkit.Chem import Draw
import math
import numpy as np
NP_model = pickle.load(gzip.open('data/NP_score.pkl.gz'))
SA_model = {i[j]: float(i[0]) for i in pickle.load(gzip.open('data/SA_score.pkl.gz')) for j in range(1, len(i))}
class MolecularMetrics(object):
@staticmethod
def _avoid_sanitization_error(op):
try:
return op()
except ValueError:
return None
@staticmethod
def remap(x, x_min, x_max):
return (x - x_min) / (x_max - x_min)
@staticmethod
def valid_lambda(x):
return x is not None and Chem.MolToSmiles(x) != ''
@staticmethod
def valid_lambda_special(x):
s = Chem.MolToSmiles(x) if x is not None else ''
return x is not None and '*' not in s and '.' not in s and s != ''
@staticmethod
def valid_scores(mols):
return np.array(list(map(MolecularMetrics.valid_lambda_special, mols)), dtype=np.float32)
@staticmethod
def valid_filter(mols):
return list(filter(MolecularMetrics.valid_lambda, mols))
@staticmethod
def valid_total_score(mols):
return np.array(list(map(MolecularMetrics.valid_lambda, mols)), dtype=np.float32).mean()
@staticmethod
def novel_scores(mols, data):
return np.array(
list(map(lambda x: MolecularMetrics.valid_lambda(x) and Chem.MolToSmiles(x) not in data.smiles, mols)))
@staticmethod
def novel_filter(mols, data):
return list(filter(lambda x: MolecularMetrics.valid_lambda(x) and Chem.MolToSmiles(x) not in data.smiles, mols))
@staticmethod
def novel_total_score(mols, data):
return MolecularMetrics.novel_scores(MolecularMetrics.valid_filter(mols), data).mean()
@staticmethod
def unique_scores(mols):
smiles = list(map(lambda x: Chem.MolToSmiles(x) if MolecularMetrics.valid_lambda(x) else '', mols))
return np.clip(
0.75 + np.array(list(map(lambda x: 1 / smiles.count(x) if x != '' else 0, smiles)), dtype=np.float32), 0, 1)
@staticmethod
def unique_total_score(mols):
v = MolecularMetrics.valid_filter(mols)
s = set(map(lambda x: Chem.MolToSmiles(x), v))
return 0 if len(v) == 0 else len(s) / len(v)
@staticmethod
def natural_product_scores(mols, norm=False):
scores = [sum(NP_model.get(bit, 0)
for bit in Chem.rdMolDescriptors.GetMorganFingerprint(mol,
2).GetNonzeroElements()) / float(
mol.GetNumAtoms()) if mol is not None else None
for mol in mols]
scores = list(map(lambda score: score if score is None else (
4 + math.log10(score - 4 + 1) if score > 4 else (
-4 - math.log10(-4 - score + 1) if score < -4 else score)), scores))
scores = np.array(list(map(lambda x: -4 if x is None else x, scores)))
scores = np.clip(MolecularMetrics.remap(scores, -3, 1), 0.0, 1.0) if norm else scores
return scores
@staticmethod
def quantitative_estimation_druglikeness_scores(mols, norm=False):
return np.array(list(map(lambda x: 0 if x is None else x, [
MolecularMetrics._avoid_sanitization_error(lambda: QED.qed(mol)) if mol is not None else None for mol in
mols])))
@staticmethod
def water_octanol_partition_coefficient_scores(mols, norm=False):
scores = [MolecularMetrics._avoid_sanitization_error(lambda: Crippen.MolLogP(mol)) if mol is not None else None
for mol in mols]
scores = np.array(list(map(lambda x: -3 if x is None else x, scores)))
scores = np.clip(MolecularMetrics.remap(scores, -2.12178879609, 6.0429063424), 0.0, 1.0) if norm else scores
return scores
@staticmethod
def _compute_SAS(mol):
fp = Chem.rdMolDescriptors.GetMorganFingerprint(mol, 2)
fps = fp.GetNonzeroElements()
score1 = 0.
nf = 0
for bitId, v in fps.items():
nf += v
sfp = bitId
score1 += SA_model.get(sfp, -4) * v
score1 /= nf
nAtoms = mol.GetNumAtoms()
nChiralCenters = len(Chem.FindMolChiralCenters(
mol, includeUnassigned=True))
ri = mol.GetRingInfo()
nSpiro = Chem.rdMolDescriptors.CalcNumSpiroAtoms(mol)
nBridgeheads = Chem.rdMolDescriptors.CalcNumBridgeheadAtoms(mol)
nMacrocycles = 0
for x in ri.AtomRings():
if len(x) > 8:
nMacrocycles += 1
sizePenalty = nAtoms ** 1.005 - nAtoms
stereoPenalty = math.log10(nChiralCenters + 1)
spiroPenalty = math.log10(nSpiro + 1)
bridgePenalty = math.log10(nBridgeheads + 1)
macrocyclePenalty = 0.
if nMacrocycles > 0:
macrocyclePenalty = math.log10(2)
score2 = 0. - sizePenalty - stereoPenalty - \
spiroPenalty - bridgePenalty - macrocyclePenalty
score3 = 0.
if nAtoms > len(fps):
score3 = math.log(float(nAtoms) / len(fps)) * .5
sascore = score1 + score2 + score3
min = -4.0
max = 2.5
sascore = 11. - (sascore - min + 1) / (max - min) * 9.
if sascore > 8.:
sascore = 8. + math.log(sascore + 1. - 9.)
if sascore > 10.:
sascore = 10.0
elif sascore < 1.:
sascore = 1.0
return sascore
@staticmethod
def synthetic_accessibility_score_scores(mols, norm=False):
scores = [MolecularMetrics._compute_SAS(mol) if mol is not None else None for mol in mols]
scores = np.array(list(map(lambda x: 10 if x is None else x, scores)))
scores = np.clip(MolecularMetrics.remap(scores, 5, 1.5), 0.0, 1.0) if norm else scores
return scores
@staticmethod
def diversity_scores(mols, data):
rand_mols = np.random.choice(data.data, 100)
fps = [Chem.rdMolDescriptors.GetMorganFingerprintAsBitVect(mol, 4, nBits=2048) for mol in rand_mols]
scores = np.array(
list(map(lambda x: MolecularMetrics.__compute_diversity(x, fps) if x is not None else 0, mols)))
scores = np.clip(MolecularMetrics.remap(scores, 0.9, 0.945), 0.0, 1.0)
return scores
@staticmethod
def __compute_diversity(mol, fps):
ref_fps = Chem.rdMolDescriptors.GetMorganFingerprintAsBitVect(mol, 4, nBits=2048)
dist = DataStructs.BulkTanimotoSimilarity(ref_fps, fps, returnDistance=True)
score = np.mean(dist)
return score
@staticmethod
def drugcandidate_scores(mols, data):
scores = (MolecularMetrics.constant_bump(
MolecularMetrics.water_octanol_partition_coefficient_scores(mols, norm=True), 0.210,
0.945) + MolecularMetrics.synthetic_accessibility_score_scores(mols,
norm=True) + MolecularMetrics.novel_scores(
mols, data) + (1 - MolecularMetrics.novel_scores(mols, data)) * 0.3) / 4
return scores
@staticmethod
def constant_bump(x, x_low, x_high, decay=0.025):
return np.select(condlist=[x <= x_low, x >= x_high],
choicelist=[np.exp(- (x - x_low) ** 2 / decay),
np.exp(- (x - x_high) ** 2 / decay)],
default=np.ones_like(x))
def mols2grid_image(mols, molsPerRow):
mols = [e if e is not None else Chem.RWMol() for e in mols]
for mol in mols:
AllChem.Compute2DCoords(mol)
return Draw.MolsToGridImage(mols, molsPerRow=molsPerRow, subImgSize=(150, 150))
def classification_report(data, model, session, sample=False):
_, _, _, a, x, _, f, _, _ = data.next_validation_batch()
n, e = session.run([model.nodes_gumbel_argmax, model.edges_gumbel_argmax] if sample else [
model.nodes_argmax, model.edges_argmax], feed_dict={model.edges_labels: a, model.nodes_labels: x,
model.node_features: f, model.training: False,
model.variational: False})
n, e = np.argmax(n, axis=-1), np.argmax(e, axis=-1)
y_true = e.flatten()
y_pred = a.flatten()
target_names = [str(Chem.rdchem.BondType.values[int(e)]) for e in data.bond_decoder_m.values()]
print('######## Classification Report ########\n')
print(sk_classification_report(y_true, y_pred, labels=list(range(len(target_names))),
target_names=target_names))
print('######## Confusion Matrix ########\n')
print(confusion_matrix(y_true, y_pred, labels=list(range(len(target_names)))))
y_true = n.flatten()
y_pred = x.flatten()
target_names = [Chem.Atom(e).GetSymbol() for e in data.atom_decoder_m.values()]
print('######## Classification Report ########\n')
print(sk_classification_report(y_true, y_pred, labels=list(range(len(target_names))),
target_names=target_names))
print('\n######## Confusion Matrix ########\n')
print(confusion_matrix(y_true, y_pred, labels=list(range(len(target_names)))))
def reconstructions(data, model, session, batch_dim=10, sample=False):
m0, _, _, a, x, _, f, _, _ = data.next_train_batch(batch_dim)
n, e = session.run([model.nodes_gumbel_argmax, model.edges_gumbel_argmax] if sample else [
model.nodes_argmax, model.edges_argmax], feed_dict={model.edges_labels: a, model.nodes_labels: x,
model.node_features: f, model.training: False,
model.variational: False})
n, e = np.argmax(n, axis=-1), np.argmax(e, axis=-1)
m1 = np.array([e if e is not None else Chem.RWMol() for e in [data.matrices2mol(n_, e_, strict=True)
for n_, e_ in zip(n, e)]])
mols = np.vstack((m0, m1)).T.flatten()
return mols
def samples(data, model, session, embeddings, sample=False):
n, e = session.run([model.nodes_gumbel_argmax, model.edges_gumbel_argmax] if sample else [
model.nodes_argmax, model.edges_argmax], feed_dict={
model.embeddings: embeddings, model.training: False})
n, e = np.argmax(n, axis=-1), np.argmax(e, axis=-1)
mols = [data.matrices2mol(n_, e_, strict=True) for n_, e_ in zip(n, e)]
return mols
def all_scores(mols, data, norm=False, reconstruction=False):
m0 = {k: list(filter(lambda e: e is not None, v)) for k, v in {
'NP score': MolecularMetrics.natural_product_scores(mols, norm=norm),
'QED score': MolecularMetrics.quantitative_estimation_druglikeness_scores(mols),
'logP score': MolecularMetrics.water_octanol_partition_coefficient_scores(mols, norm=norm),
'SA score': MolecularMetrics.synthetic_accessibility_score_scores(mols, norm=norm),
'diversity score': MolecularMetrics.diversity_scores(mols, data),
'drugcandidate score': MolecularMetrics.drugcandidate_scores(mols, data)}.items()}
m1 = {'valid score': MolecularMetrics.valid_total_score(mols) * 100,
'unique score': MolecularMetrics.unique_total_score(mols) * 100,
'novel score': MolecularMetrics.novel_total_score(mols, data) * 100}
return m0, m1
| true | true |
f732ffea86e424f5d7345500e9a63580f2cb690e | 1,887 | py | Python | tools/harness-automation/cases/commissioner_8_1_1.py | ltaoti/openthread | b24192267d56c9a175739d8b2a285bc4b701deaf | [
"BSD-3-Clause"
] | 3 | 2018-06-20T11:13:33.000Z | 2020-12-08T15:15:10.000Z | tools/harness-automation/cases/commissioner_8_1_1.py | ltaoti/openthread | b24192267d56c9a175739d8b2a285bc4b701deaf | [
"BSD-3-Clause"
] | 2 | 2017-03-23T07:47:54.000Z | 2017-08-21T03:12:31.000Z | tools/harness-automation/cases/commissioner_8_1_1.py | ltaoti/openthread | b24192267d56c9a175739d8b2a285bc4b701deaf | [
"BSD-3-Clause"
] | 3 | 2017-08-29T01:31:57.000Z | 2020-05-07T22:56:52.000Z | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class Commissioner_8_1_1(HarnessCase):
role = HarnessCase.ROLE_COMMISSIONER
case = '8 1 1'
golden_devices_required = 1
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| 42.886364 | 77 | 0.772655 |
import unittest
from autothreadharness.harness_case import HarnessCase
class Commissioner_8_1_1(HarnessCase):
role = HarnessCase.ROLE_COMMISSIONER
case = '8 1 1'
golden_devices_required = 1
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| true | true |
f7330023a31679d2fbb1d500dfb418d3b17ea11b | 112,294 | py | Python | keepa/interface.py | akaszynski/keepa | ffc35edc2f7a4601408b0f0a22a8856be88dcb3e | [
"Apache-2.0"
] | 110 | 2019-02-27T20:28:16.000Z | 2022-03-25T12:58:14.000Z | keepa/interface.py | akaszynski/keepa | ffc35edc2f7a4601408b0f0a22a8856be88dcb3e | [
"Apache-2.0"
] | 74 | 2019-02-23T14:31:10.000Z | 2022-03-25T02:55:06.000Z | keepa/interface.py | akaszynski/keepa | ffc35edc2f7a4601408b0f0a22a8856be88dcb3e | [
"Apache-2.0"
] | 50 | 2019-02-26T21:17:37.000Z | 2022-03-24T02:42:36.000Z | """Interface module to download Amazon product and history data from
keepa.com
"""
import requests
import asyncio
import datetime
import json
import logging
import time
from functools import wraps
import aiohttp
import numpy as np
import pandas as pd
from tqdm import tqdm
from keepa.query_keys import DEAL_REQUEST_KEYS, PRODUCT_REQUEST_KEYS
def is_documented_by(original):
"""Avoid copying the documentation"""
def wrapper(target):
target.__doc__ = original.__doc__
return target
return wrapper
log = logging.getLogger(__name__)
log.setLevel('DEBUG')
# hardcoded ordinal time from
KEEPA_ST_ORDINAL = np.datetime64('2011-01-01')
# Request limit
REQUEST_LIMIT = 100
# Status code dictionary/key
SCODES = {'400': 'REQUEST_REJECTED',
'402': 'PAYMENT_REQUIRED',
'405': 'METHOD_NOT_ALLOWED',
'429': 'NOT_ENOUGH_TOKEN'}
# domain codes
# Valid values: [ 1: com | 2: co.uk | 3: de | 4: fr | 5:
# co.jp | 6: ca | 7: cn | 8: it | 9: es | 10: in | 11: com.mx ]
DCODES = ['RESERVED', 'US', 'GB', 'DE', 'FR', 'JP', 'CA', 'CN', 'IT', 'ES',
'IN', 'MX']
# csv indices. used when parsing csv and stats fields.
# https://github.com/keepacom/api_backend
# see api_backend/src/main/java/com/keepa/api/backend/structs/Product.java
# [index in csv, key name, isfloat(is price or rating)]
csv_indices = [[0, 'AMAZON', True],
[1, 'NEW', True],
[2, 'USED', True],
[3, 'SALES', False],
[4, 'LISTPRICE', True],
[5, 'COLLECTIBLE', True],
[6, 'REFURBISHED', True],
[7, 'NEW_FBM_SHIPPING', True],
[8, 'LIGHTNING_DEAL', True],
[9, 'WAREHOUSE', True],
[10, 'NEW_FBA', True],
[11, 'COUNT_NEW', False],
[12, 'COUNT_USED', False],
[13, 'COUNT_REFURBISHED', False],
[14, 'CollectableOffers', False],
[15, 'EXTRA_INFO_UPDATES', False],
[16, 'RATING', True],
[17, 'COUNT_REVIEWS', False],
[18, 'BUY_BOX_SHIPPING', True],
[19, 'USED_NEW_SHIPPING', True],
[20, 'USED_VERY_GOOD_SHIPPING', True],
[21, 'USED_GOOD_SHIPPING', True],
[22, 'USED_ACCEPTABLE_SHIPPING', True],
[23, 'COLLECTIBLE_NEW_SHIPPING', True],
[24, 'COLLECTIBLE_VERY_GOOD_SHIPPING', True],
[25, 'COLLECTIBLE_GOOD_SHIPPING', True],
[26, 'COLLECTIBLE_ACCEPTABLE_SHIPPING', True],
[27, 'REFURBISHED_SHIPPING', True],
[28, 'EBAY_NEW_SHIPPING', True],
[29, 'EBAY_USED_SHIPPING', True],
[30, 'TRADE_IN', True],
[31, 'RENT', False]]
def _parse_stats(stats, to_datetime):
"""Parses *numeric* stats object. There is no need to parse strings or list of strings.\n
Keepa stats object response documentation: https://keepa.com/#!discuss/t/statistics-object/1308"""
stats_keys_parse_not_required = {
'buyBoxSellerId',
'sellerIdsLowestFBA',
'sellerIdsLowestFBM',
'buyBoxShippingCountry',
'buyBoxAvailabilityMessage',
}
stats_parsed = {}
for stat_key, stat_value in stats.items():
if stat_key in stats_keys_parse_not_required:
stat_value = None
elif isinstance(stat_value, int) and stat_value < 0: # -1 or -2 means not exist. 0 doesn't mean not exist.
stat_value = None
if stat_value is not None:
if stat_key == 'lastOffersUpdate':
stats_parsed[stat_key] = keepa_minutes_to_time([stat_value], to_datetime)[0]
elif isinstance(stat_value, list) and len(stat_value) > 0:
stat_value_dict = {}
convert_time_in_value_pair = any(map(lambda v: v is not None and isinstance(v, list), stat_value))
for ind, key, isfloat in csv_indices:
stat_value_item = stat_value[ind] if ind < len(stat_value) else None
def normalize_value(v):
if v < 0:
return None
if isfloat:
v = float(v) / 100
if key == 'RATING':
v = v * 10
return v
if stat_value_item is not None:
if convert_time_in_value_pair:
stat_value_time, stat_value_item = stat_value_item
stat_value_item = normalize_value(stat_value_item)
if stat_value_item is not None:
stat_value_time = keepa_minutes_to_time([stat_value_time], to_datetime)[0]
stat_value_item = (stat_value_time, stat_value_item)
else:
stat_value_item = normalize_value(stat_value_item)
if stat_value_item is not None:
stat_value_dict[key] = stat_value_item
if len(stat_value_dict) > 0:
stats_parsed[stat_key] = stat_value_dict
else:
stats_parsed[stat_key] = stat_value
return stats_parsed
_seller_time_data_keys = ['trackedSince', 'lastUpdate']
def _parse_seller(seller_raw_response, to_datetime):
sellers = list(seller_raw_response.values())
for seller in sellers:
def convert_time_data(key):
date_val = seller.get(key, None)
if date_val is not None:
return (key, keepa_minutes_to_time([date_val], to_datetime)[0])
else:
return None
seller.update(filter(lambda p: p is not None, map(convert_time_data, _seller_time_data_keys)))
return dict(map(lambda seller: (seller['sellerId'], seller), sellers))
def parse_csv(csv, to_datetime=True, out_of_stock_as_nan=True):
"""Parses csv list from keepa into a python dictionary.
Parameters
----------
csv : list
csv list from keepa
to_datetime : bool, optional
Modifies numpy minutes to datetime.datetime values.
Default True.
out_of_stock_as_nan : bool, optional
When True, prices are NAN when price category is out of stock.
When False, prices are -0.01
Default True
Returns
-------
product_data : dict
Dictionary containing the following fields with timestamps:
AMAZON: Amazon price history
NEW: Marketplace/3rd party New price history - Amazon is
considered to be part of the marketplace as well, so if
Amazon has the overall lowest new (!) price, the
marketplace new price in the corresponding time interval
will be identical to the Amazon price (except if there is
only one marketplace offer). Shipping and Handling costs
not included!
USED: Marketplace/3rd party Used price history
SALES: Sales Rank history. Not every product has a Sales Rank.
LISTPRICE: List Price history
5 COLLECTIBLE: Collectible Price history
6 REFURBISHED: Refurbished Price history
7 NEW_FBM_SHIPPING: 3rd party (not including Amazon) New price
history including shipping costs, only fulfilled by
merchant (FBM).
8 LIGHTNING_DEAL: 3rd party (not including Amazon) New price
history including shipping costs, only fulfilled by
merchant (FBM).
9 WAREHOUSE: Amazon Warehouse Deals price history. Mostly of
used condition, rarely new.
10 NEW_FBA: Price history of the lowest 3rd party (not
including Amazon/Warehouse) New offer that is fulfilled
by Amazon
11 COUNT_NEW: New offer count history
12 COUNT_USED: Used offer count history
13 COUNT_REFURBISHED: Refurbished offer count history
14 COUNT_COLLECTIBLE: Collectible offer count history
16 RATING: The product's rating history. A rating is an
integer from 0 to 50 (e.g. 45 = 4.5 stars)
17 COUNT_REVIEWS: The product's review count history.
18 BUY_BOX_SHIPPING: The price history of the buy box. If no
offer qualified for the buy box the price has the value
-1. Including shipping costs. The ``buybox`` parameter
must be True for this field to be in the data.
19 USED_NEW_SHIPPING: "Used - Like New" price history
including shipping costs.
20 USED_VERY_GOOD_SHIPPING: "Used - Very Good" price history
including shipping costs.
21 USED_GOOD_SHIPPING: "Used - Good" price history including
shipping costs.
22 USED_ACCEPTABLE_SHIPPING: "Used - Acceptable" price history
including shipping costs.
23 COLLECTIBLE_NEW_SHIPPING: "Collectible - Like New" price
history including shipping costs.
24 COLLECTIBLE_VERY_GOOD_SHIPPING: "Collectible - Very Good"
price history including shipping costs.
25 COLLECTIBLE_GOOD_SHIPPING: "Collectible - Good" price
history including shipping costs.
26 COLLECTIBLE_ACCEPTABLE_SHIPPING: "Collectible - Acceptable"
price history including shipping costs.
27 REFURBISHED_SHIPPING: Refurbished price history including
shipping costs.
30 TRADE_IN: The trade in price history. Amazon trade-in is
not available for every locale.
31 RENT: Rental price history. Requires use of the rental
and offers parameter. Amazon Rental is only available
for Amazon US.
Notes
-----
Negative prices
"""
product_data = {}
for ind, key, isfloat in csv_indices:
if csv[ind]: # Check if entry it exists
if 'SHIPPING' in key: # shipping price is included
# Data goes [time0, value0, shipping0, time1, value1,
# shipping1, ...]
times = csv[ind][::3]
values = np.array(csv[ind][1::3])
values += np.array(csv[ind][2::3])
else:
# Data goes [time0, value0, time1, value1, ...]
times = csv[ind][::2]
values = np.array(csv[ind][1::2])
# Convert to float price if applicable
if isfloat:
nan_mask = values < 0
values = values.astype(np.float)/100
if out_of_stock_as_nan:
values[nan_mask] = np.nan
if key == 'RATING':
values *= 10
timeval = keepa_minutes_to_time(times, to_datetime)
product_data['%s_time' % key] = timeval
product_data[key] = values
# combine time and value into a data frame using time as index
product_data['df_%s' % key] = pd.DataFrame({'value': values}, index=timeval)
return product_data
def format_items(items):
""" Checks if the input items are valid and formats them """
if isinstance(items, list) or isinstance(items, np.ndarray):
return np.unique(items)
elif isinstance(items, str):
return np.asarray([items])
class Keepa():
"""Support a synchronous Python interface to keepa server.
Initializes API with access key. Access key can be obtained by
signing up for a reoccurring or one time plan at:
https://keepa.com/#!api
Parameters
----------
accesskey : str
64 character access key string.
timeout : float, optional
Default timeout when issuing any request. This is not a time
limit on the entire response download; rather, an exception is
raised if the server has not issued a response for timeout
seconds. Setting this to 0 disables the timeout, but will
cause any request to hang indefiantly should keepa.com be down
Examples
--------
Create the api object
>>> import keepa
>>> mykey = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
>>> api = keepa.Keepa(mykey)
Request data from two ASINs
>>> products = api.query(['0439064872', '1426208081'])
Print item details
>>> print('Item 1')
>>> print('\t ASIN: {:s}'.format(products[0]['asin']))
>>> print('\t Title: {:s}'.format(products[0]['title']))
Print item price
>>> usedprice = products[0]['data']['MarketplaceUsed']
>>> usedtimes = products[0]['data']['MarketplaceUsed_time']
>>> print('\t Used price: ${:.2f}'.format(usedprice[-1]))
>>> print('\t as of: {:s}'.format(str(usedtimes[-1])))
"""
def __init__(self, accesskey, timeout=10):
self.accesskey = accesskey
self.status = None
self.tokens_left = 0
self._timeout = timeout
# Store user's available tokens
log.info('Connecting to keepa using key ending in %s', accesskey[-6:])
self.update_status()
log.info('%d tokens remain', self.tokens_left)
@property
def time_to_refill(self):
""" Returns the time to refill in seconds """
# Get current timestamp in milliseconds from UNIX epoch
now = int(time.time() * 1000)
timeatrefile = self.status['timestamp'] + self.status['refillIn']
# wait plus one second fudge factor
timetorefil = timeatrefile - now + 1000
if timetorefil < 0:
timetorefil = 0
# Account for negative tokens left
if self.tokens_left < 0:
timetorefil += (abs(self.tokens_left) / self.status['refillRate']) * 60000
# Return value in seconds
return timetorefil / 1000.0
def update_status(self):
""" Updates available tokens """
self.status = self._request('token', {'key': self.accesskey}, wait=False)
def wait_for_tokens(self):
"""Checks any remaining tokens and waits if none are available. """
self.update_status()
# Wait if no tokens available
if self.tokens_left <= 0:
tdelay = self.time_to_refill
log.warning('Waiting %.0f seconds for additional tokens' % tdelay)
time.sleep(tdelay)
self.update_status()
def query(self, items, stats=None, domain='US', history=True,
offers=None, update=None, to_datetime=True,
rating=False, out_of_stock_as_nan=True, stock=False,
product_code_is_asin=True, progress_bar=True, buybox=False,
wait=True, days=None, only_live_offers=None, raw=False):
"""Performs a product query of a list, array, or single ASIN.
Returns a list of product data with one entry for each
product.
Parameters
----------
items : str, list, np.ndarray
A list, array, or single asin, UPC, EAN, or ISBN-13
identifying a product. ASINs should be 10 characters and
match a product on Amazon. Items not matching Amazon
product or duplicate Items will return no data. When
using non-ASIN items, set product_code_is_asin to False
stats : int or date, optional
No extra token cost. If specified the product object will
have a stats field with quick access to current prices,
min/max prices and the weighted mean values. If the offers
parameter was used it will also provide stock counts and
buy box information.
You can provide the stats parameter in two forms:
Last x days (positive integer value): calculates the stats
of the last x days, where x is the value of the stats
parameter. Interval: You can provide a date range for the
stats calculation. You can specify the range via two
timestamps (unix epoch time milliseconds) or two date
strings (ISO8601, with or without time in UTC).
domain : str, optional
One of the following Amazon domains: RESERVED, US, GB, DE,
FR, JP, CA, CN, IT, ES, IN, MX Defaults to US.
offers : int, optional
Adds available offers to product data. Default 0. Must
be between 20 and 100.
update : int, optional
if data is older than the input integer, keepa will
update their database and return live data. If set to 0
(live data), request may cost an additional token.
Default None
history : bool, optional
When set to True includes the price, sales, and offer
history of a product. Set to False to reduce request time
if data is not required. Default True
rating : bool, optional
When set to to True, includes the existing RATING and
COUNT_REVIEWS history of the csv field. Default False
to_datetime : bool, optional
Modifies numpy minutes to datetime.datetime values.
Default True.
out_of_stock_as_nan : bool, optional
When True, prices are NAN when price category is out of
stock. When False, prices are -0.01 Default True
stock : bool, optional
Can only be used if the offers parameter is also True. If
True, the stock will be collected for all retrieved live
offers. Note: We can only determine stock up 10 qty. Stock
retrieval takes additional time, expect the request to
take longer. Existing stock history will be included
whether or not the stock parameter is used.
product_code_is_asin : bool, optional
The type of product code you are requesting. True when
product code is an ASIN, an Amazon standard identification
number, or 'code', for UPC, EAN, or ISBN-13 codes.
progress_bar : bool, optional
Display a progress bar using ``tqdm``. Defaults to
``True``.
buybox : bool, optional
Additional token cost: 2 per product). When true the
product and statistics object will include all available
buy box related data:
- current price, price history, and statistical values
- buyBoxSellerIdHistory
- all buy box fields in the statistics object
The buybox parameter
does not trigger a fresh data collection. If the offers
parameter is used the buybox parameter is ignored, as the
offers parameter also provides access to all buy box
related data. To access the statistics object the stats
parameter is required.
wait : bool, optional
Wait available token before doing effective query,
Defaults to ``True``.
only_live_offers : bool, optional
If set to True, the product object will only include live
marketplace offers (when used in combination with the
offers parameter). If you do not need historical offers
use this to have them removed from the response. This can
improve processing time and considerably decrease the size
of the response. Default None
days : int, optional
Any positive integer value. If specified and has positive
value X the product object will limit all historical data
to the recent X days. This includes the csv,
buyBoxSellerIdHistory, salesRanks, offers and
offers.offerCSV fields. If you do not need old historical
data use this to have it removed from the response. This
can improve processing time and considerably decrease the
size of the response. The parameter does not use calendar
days - so 1 day equals the last 24 hours. The oldest data
point of each field may have a date value which is out of
the specified range. This means the value of the field has
not changed since that date and is still active. Default
``None``
raw : bool, optional
When ``True``, return the raw request response. This is
only available in the non-async class.
Returns
-------
list
List of products when ``raw=False``. Each product
within the list is a dictionary. The keys of each item
may vary, so see the keys within each product for further
details.
Each product should contain at a minimum a "data" key
containing a formatted dictionary. For the available
fields see the notes section
When ``raw=True``, a list of unparsed responses are
returned as :class:`requests.models.Response`.
See: https://keepa.com/#!discuss/t/product-object/116
Notes
-----
The following are data fields a product dictionary
AMAZON
Amazon price history
NEW
Marketplace/3rd party New price history - Amazon is
considered to be part of the marketplace as well, so if
Amazon has the overall lowest new (!) price, the
marketplace new price in the corresponding time interval
will be identical to the Amazon price (except if there is
only one marketplace offer). Shipping and Handling costs
not included!
USED
Marketplace/3rd party Used price history
SALES
Sales Rank history. Not every product has a Sales Rank.
LISTPRICE
List Price history
COLLECTIBLE
Collectible Price history
REFURBISHED
Refurbished Price history
NEW_FBM_SHIPPING
3rd party (not including Amazon) New price history
including shipping costs, only fulfilled by merchant
(FBM).
LIGHTNING_DEAL
3rd party (not including Amazon) New price history
including shipping costs, only fulfilled by merchant
(FBM).
WAREHOUSE
Amazon Warehouse Deals price history. Mostly of used
condition, rarely new.
NEW_FBA
Price history of the lowest 3rd party (not including
Amazon/Warehouse) New offer that is fulfilled by Amazon
COUNT_NEW
New offer count history
COUNT_USED
Used offer count history
COUNT_REFURBISHED
Refurbished offer count history
COUNT_COLLECTIBLE
Collectible offer count history
RATING
The product's rating history. A rating is an integer from
0 to 50 (e.g. 45 = 4.5 stars)
COUNT_REVIEWS
The product's review count history.
BUY_BOX_SHIPPING
The price history of the buy box. If no offer qualified
for the buy box the price has the value -1. Including
shipping costs.
USED_NEW_SHIPPING
"Used - Like New" price history including shipping costs.
USED_VERY_GOOD_SHIPPING
"Used - Very Good" price history including shipping costs.
USED_GOOD_SHIPPING
"Used - Good" price history including shipping costs.
USED_ACCEPTABLE_SHIPPING
"Used - Acceptable" price history including shipping costs.
COLLECTIBLE_NEW_SHIPPING
"Collectible - Like New" price history including shipping
costs.
COLLECTIBLE_VERY_GOOD_SHIPPING
"Collectible - Very Good" price history including shipping
costs.
COLLECTIBLE_GOOD_SHIPPING
"Collectible - Good" price history including shipping
costs.
COLLECTIBLE_ACCEPTABLE_SHIPPING
"Collectible - Acceptable" price history including
shipping costs.
REFURBISHED_SHIPPING
Refurbished price history including shipping costs.
TRADE_IN
The trade in price history. Amazon trade-in is not
available for every locale.
BUY_BOX_SHIPPING
The price history of the buy box. If no offer qualified
for the buy box the price has the value -1. Including
shipping costs. The ``buybox`` parameter must be True for
this field to be in the data.
"""
# Format items into numpy array
try:
items = format_items(items)
except BaseException:
raise Exception('Invalid product codes input')
assert len(items), 'No valid product codes'
nitems = len(items)
if nitems == 1:
log.debug('Executing single product query')
else:
log.debug('Executing %d item product query', nitems)
# check offer input
if offers:
if not isinstance(offers, int):
raise TypeError('Parameter "offers" must be an interger')
if offers > 100 or offers < 20:
raise ValueError('Parameter "offers" must be between 20 and 100')
# Report time to completion
tcomplete = float(nitems - self.tokens_left) / self.status['refillRate'] - (
60000 - self.status['refillIn']) / 60000.0
if tcomplete < 0.0:
tcomplete = 0.5
log.debug('Estimated time to complete %d request(s) is %.2f minutes',
nitems, tcomplete)
log.debug('\twith a refill rate of %d token(s) per minute',
self.status['refillRate'])
# product list
products = []
pbar = None
if progress_bar:
pbar = tqdm(total=nitems)
# Number of requests is dependent on the number of items and
# request limit. Use available tokens first
idx = 0 # or number complete
while idx < nitems:
nrequest = nitems - idx
# cap request
if nrequest > REQUEST_LIMIT:
nrequest = REQUEST_LIMIT
# request from keepa and increment current position
item_request = items[idx:idx + nrequest]
response = self._product_query(
item_request,
product_code_is_asin,
stats=stats,
domain=domain, stock=stock,
offers=offers, update=update,
history=history, rating=rating,
to_datetime=to_datetime,
out_of_stock_as_nan=out_of_stock_as_nan,
buybox=buybox,
wait=wait,
days=days,
only_live_offers=only_live_offers,
raw=raw,
)
idx += nrequest
if raw:
products.append(response)
else:
products.extend(response['products'])
if pbar is not None:
pbar.update(nrequest)
return products
def _product_query(self, items, product_code_is_asin=True, **kwargs):
"""Sends query to keepa server and returns parsed JSON result.
Parameters
----------
items : np.ndarray
Array of asins. If UPC, EAN, or ISBN-13, as_asin must be
False. Must be between 1 and 100 ASINs
as_asin : bool, optional
Interpret product codes as ASINs only.
stats : int or date format
Set the stats time for get sales rank inside this range
domain : str
One of the following Amazon domains:
RESERVED, US, GB, DE, FR, JP, CA, CN, IT, ES, IN, MX
offers : bool, optional
Adds product offers to product data.
update : int, optional
If data is older than the input integer, keepa will update
their database and return live data. If set to 0 (live
data), then request may cost an additional token.
history : bool, optional
When set to True includes the price, sales, and offer
history of a product. Set to False to reduce request time
if data is not required.
as_asin : bool, optional
Queries keepa using asin codes. Otherwise, queries using
the code key.
Returns
-------
products : list
List of products. Length equal to number of successful
ASINs.
refillIn : float
Time in milliseconds to the next refill of tokens.
refilRate : float
Number of tokens refilled per minute
timestamp : float
tokensLeft : int
Remaining tokens
tz : int
Timezone. 0 is UTC
"""
# ASINs convert to comma joined string
assert len(items) <= 100
if product_code_is_asin:
kwargs['asin'] = ','.join(items)
else:
kwargs['code'] = ','.join(items)
kwargs['key'] = self.accesskey
kwargs['domain'] = DCODES.index(kwargs['domain'])
# Convert bool values to 0 and 1.
kwargs['stock'] = int(kwargs['stock'])
kwargs['history'] = int(kwargs['history'])
kwargs['rating'] = int(kwargs['rating'])
kwargs['buybox'] = int(kwargs['buybox'])
if kwargs['update'] is None:
del kwargs['update']
else:
kwargs['update'] = int(kwargs['update'])
if kwargs['offers'] is None:
del kwargs['offers']
else:
kwargs['offers'] = int(kwargs['offers'])
if kwargs['only_live_offers'] is None:
del kwargs['only_live_offers']
else:
kwargs['only-live-offers'] = int(kwargs.pop('only_live_offers'))
# Keepa's param actually doesn't use snake_case.
# I believe using snake case throughout the Keepa interface is better.
if kwargs['days'] is None:
del kwargs['days']
else:
assert kwargs['days'] > 0
if kwargs['stats'] is None:
del kwargs['stats']
out_of_stock_as_nan = kwargs.pop('out_of_stock_as_nan', True)
to_datetime = kwargs.pop('to_datetime', True)
# Query and replace csv with parsed data if history enabled
wait = kwargs.get("wait")
kwargs.pop("wait", None)
raw_response = kwargs.pop('raw', False)
response = self._request('product', kwargs, wait=wait,
raw_response=raw_response)
if kwargs['history'] and not raw_response:
for product in response['products']:
if product['csv']: # if data exists
product['data'] = parse_csv(product['csv'],
to_datetime,
out_of_stock_as_nan)
if kwargs.get('stats', None) and not raw_response:
for product in response['products']:
stats = product.get('stats', None)
if stats:
product['stats_parsed'] = _parse_stats(stats, to_datetime)
return response
def best_sellers_query(self, category, rank_avg_range=0, domain='US', wait=True):
"""
Retrieve an ASIN list of the most popular products based on
sales in a specific category or product group. See
"search_for_categories" for information on how to get a
category.
Root category lists (e.g. "Home & Kitchen") or product group
lists contain up to 100,000 ASINs.
Sub-category lists (e.g. "Home Entertainment Furniture")
contain up to 3,000 ASINs. As we only have access to the
product's primary sales rank and not the ones of all
categories it is listed in, the sub-category lists are created
by us based on the product's primary sales rank and do not
reflect the actual ordering on Amazon.
Lists are ordered, starting with the best selling product.
Lists are updated daily. If a product does not have an
accessible sales rank it will not be included in the
lists. This in particular affects many products in the
Clothing and Sports & Outdoors categories.
We can not correctly identify the sales rank reference
category in all cases, so some products may be misplaced.
Parameters
----------
category : str
The category node id of the category you want to request
the best sellers list for. You can find category node ids
via the category search "search_for_categories"
domain : str
Amazon locale you want to access. Must be one of the following
RESERVED, US, GB, DE, FR, JP, CA, CN, IT, ES, IN, MX
Default US
wait : bool, optional
Wait available token before doing effective query.
Defaults to ``True``.
Returns
-------
best_sellers : list
List of best seller ASINs
"""
assert domain in DCODES, 'Invalid domain code'
payload = {'key': self.accesskey,
'domain': DCODES.index(domain),
'category': category,
'range': rank_avg_range}
response = self._request('bestsellers', payload, wait=wait)
if 'bestSellersList' in response:
return response['bestSellersList']['asinList']
else: # pragma: no cover
log.info('Best sellers search results not yet available')
def search_for_categories(self, searchterm, domain='US', wait=True):
"""Searches for categories from Amazon.
Parameters
----------
searchterm : str
Input search term.
wait : bool, optional
Wait available token before doing effective query.
Defaults to ``True``.
Returns
-------
categories : list
The response contains a categories list with all matching
categories.
Examples
--------
Print all categories from science
>>> categories = api.search_for_categories('science')
>>> for cat_id in categories:
>>> print(cat_id, categories[cat_id]['name'])
"""
assert domain in DCODES, 'Invalid domain code'
payload = {'key': self.accesskey,
'domain': DCODES.index(domain),
'type': 'category',
'term': searchterm}
response = self._request('search', payload, wait=wait)
if response['categories'] == {}: # pragma no cover
raise Exception('Categories search results not yet available ' +
'or no search terms found.')
else:
return response['categories']
def category_lookup(self, category_id, domain='US',
include_parents=0, wait=True):
"""
Return root categories given a categoryId.
Parameters
----------
category_id : int
ID for specific category or 0 to return a list of root
categories.
domain : str
Amazon locale you want to access. Must be one of the following
RESERVED, US, GB, DE, FR, JP, CA, CN, IT, ES, IN, MX
Default US
include_parents : int
Include parents.
wait : bool, optional
Wait available token before doing effective query.
Defaults to ``True``.
Returns
-------
categories : list
Output format is the same as search_for_categories.
Examples
--------
Use 0 to return all root categories
>>> categories = api.category_lookup(0)
Print all root categories
>>> for cat_id in categories:
>>> print(cat_id, categories[cat_id]['name'])
"""
assert domain in DCODES, 'Invalid domain code'
payload = {'key': self.accesskey,
'domain': DCODES.index(domain),
'category': category_id,
'parents': include_parents}
response = self._request('category', payload, wait=wait)
if response['categories'] == {}: # pragma no cover
raise Exception('Category lookup results not yet available or no' +
'match found.')
else:
return response['categories']
def seller_query(self, seller_id, domain='US', to_datetime=True,
storefront=False, update=None, wait=True):
"""Receives seller information for a given seller id. If a
seller is not found no tokens will be consumed.
Token cost: 1 per requested seller
Parameters
----------
seller_id : str or list
The seller id of the merchant you want to request. For
batch requests, you may submit a list of 100 seller_ids.
The seller id can also be found on Amazon on seller
profile pages in the seller parameter of the URL as well
as in the offers results from a product query.
domain : str, optional
One of the following Amazon domains: RESERVED, US, GB, DE,
FR, JP, CA, CN, IT, ES, IN, MX Defaults to US.
storefront : bool, optional
If specified the seller object will contain additional
information about what items the seller is listing on Amazon.
This includes a list of ASINs as well as the total amount of
items the seller has listed. The following seller object
fields will be set if data is available: asinList,
asinListLastSeen, totalStorefrontAsinsCSV. If no data is
available no additional tokens will be consumed. The ASIN
list can contain up to 100,000 items. As using the storefront
parameter does not trigger any new collection it does not
increase the processing time of the request, though the
response may be much bigger in size. The total storefront
ASIN count will not be updated, only historical data will
be provided (when available).
update : int, optional
Positive integer value. If the last live data collection from
the Amazon storefront page is older than update hours force a
new collection. Use this parameter in conjunction with the
storefront parameter. Token cost will only be applied if a new
collection is triggered.
Using this parameter you can achieve the following:
- Retrieve data from Amazon: a storefront ASIN list
containing up to 2,400 ASINs, in addition to all ASINs
already collected through our database.
- Force a refresh: Always retrieve live data with the
value 0.
- Retrieve the total number of listings of this seller:
the totalStorefrontAsinsCSV field of the seller object
will be updated.
wait : bool, optional
Wait available token before doing effective query.
Defaults to ``True``.
Returns
-------
seller_info : dict
Dictionary containing one entry per input ``seller_id``.
Examples
--------
>>> seller_info = api.seller_query('A2L77EE7U53NWQ', 'US')
Notes
-----
Seller data is not available for Amazon China.
"""
if isinstance(seller_id, list):
if len(seller_id) > 100:
err_str = 'seller_id can contain at maximum 100 sellers'
raise RuntimeError(err_str)
seller = ','.join(seller_id)
else:
seller = seller_id
payload = {'key': self.accesskey,
'domain': DCODES.index(domain),
'seller': seller}
if storefront:
payload["storefront"] = int(storefront)
if update:
payload["update"] = update
response = self._request('seller', payload, wait=wait)
return _parse_seller(response['sellers'], to_datetime)
def product_finder(self, product_parms, domain='US', wait=True):
"""Query the keepa product database to find products matching
your criteria. Almost all product fields can be searched for
and sorted by.
Parameters
----------
product_parms : dict
Dictionary containing one or more of the following keys:
- ``'author': str``
- ``'availabilityAmazon': int``
- ``'avg180_AMAZON_lte': int``
- ``'avg180_AMAZON_gte': int``
- ``'avg180_BUY_BOX_SHIPPING_lte': int``
- ``'avg180_BUY_BOX_SHIPPING_gte': int``
- ``'avg180_COLLECTIBLE_lte': int``
- ``'avg180_COLLECTIBLE_gte': int``
- ``'avg180_COUNT_COLLECTIBLE_lte': int``
- ``'avg180_COUNT_COLLECTIBLE_gte': int``
- ``'avg180_COUNT_NEW_lte': int``
- ``'avg180_COUNT_NEW_gte': int``
- ``'avg180_COUNT_REFURBISHED_lte': int``
- ``'avg180_COUNT_REFURBISHED_gte': int``
- ``'avg180_COUNT_REVIEWS_lte': int``
- ``'avg180_COUNT_REVIEWS_gte': int``
- ``'avg180_COUNT_USED_lte': int``
- ``'avg180_COUNT_USED_gte': int``
- ``'avg180_EBAY_NEW_SHIPPING_lte': int``
- ``'avg180_EBAY_NEW_SHIPPING_gte': int``
- ``'avg180_EBAY_USED_SHIPPING_lte': int``
- ``'avg180_EBAY_USED_SHIPPING_gte': int``
- ``'avg180_LIGHTNING_DEAL_lte': int``
- ``'avg180_LIGHTNING_DEAL_gte': int``
- ``'avg180_LISTPRICE_lte': int``
- ``'avg180_LISTPRICE_gte': int``
- ``'avg180_NEW_lte': int``
- ``'avg180_NEW_gte': int``
- ``'avg180_NEW_FBA_lte': int``
- ``'avg180_NEW_FBA_gte': int``
- ``'avg180_NEW_FBM_SHIPPING_lte': int``
- ``'avg180_NEW_FBM_SHIPPING_gte': int``
- ``'avg180_RATING_lte': int``
- ``'avg180_RATING_gte': int``
- ``'avg180_REFURBISHED_lte': int``
- ``'avg180_REFURBISHED_gte': int``
- ``'avg180_REFURBISHED_SHIPPING_lte': int``
- ``'avg180_REFURBISHED_SHIPPING_gte': int``
- ``'avg180_RENT_lte': int``
- ``'avg180_RENT_gte': int``
- ``'avg180_SALES_lte': int``
- ``'avg180_SALES_gte': int``
- ``'avg180_TRADE_IN_lte': int``
- ``'avg180_TRADE_IN_gte': int``
- ``'avg180_USED_lte': int``
- ``'avg180_USED_gte': int``
- ``'avg180_USED_ACCEPTABLE_SHIPPING_lte': int``
- ``'avg180_USED_ACCEPTABLE_SHIPPING_gte': int``
- ``'avg180_USED_GOOD_SHIPPING_lte': int``
- ``'avg180_USED_GOOD_SHIPPING_gte': int``
- ``'avg180_USED_NEW_SHIPPING_lte': int``
- ``'avg180_USED_NEW_SHIPPING_gte': int``
- ``'avg180_USED_VERY_GOOD_SHIPPING_lte': int``
- ``'avg180_USED_VERY_GOOD_SHIPPING_gte': int``
- ``'avg180_WAREHOUSE_lte': int``
- ``'avg180_WAREHOUSE_gte': int``
- ``'avg1_AMAZON_lte': int``
- ``'avg1_AMAZON_gte': int``
- ``'avg1_BUY_BOX_SHIPPING_lte': int``
- ``'avg1_BUY_BOX_SHIPPING_gte': int``
- ``'avg1_COLLECTIBLE_lte': int``
- ``'avg1_COLLECTIBLE_gte': int``
- ``'avg1_COUNT_COLLECTIBLE_lte': int``
- ``'avg1_COUNT_COLLECTIBLE_gte': int``
- ``'avg1_COUNT_NEW_lte': int``
- ``'avg1_COUNT_NEW_gte': int``
- ``'avg1_COUNT_REFURBISHED_lte': int``
- ``'avg1_COUNT_REFURBISHED_gte': int``
- ``'avg1_COUNT_REVIEWS_lte': int``
- ``'avg1_COUNT_REVIEWS_gte': int``
- ``'avg1_COUNT_USED_lte': int``
- ``'avg1_COUNT_USED_gte': int``
- ``'avg1_EBAY_NEW_SHIPPING_lte': int``
- ``'avg1_EBAY_NEW_SHIPPING_gte': int``
- ``'avg1_EBAY_USED_SHIPPING_lte': int``
- ``'avg1_EBAY_USED_SHIPPING_gte': int``
- ``'avg1_LIGHTNING_DEAL_lte': int``
- ``'avg1_LIGHTNING_DEAL_gte': int``
- ``'avg1_LISTPRICE_lte': int``
- ``'avg1_LISTPRICE_gte': int``
- ``'avg1_NEW_lte': int``
- ``'avg1_NEW_gte': int``
- ``'avg1_NEW_FBA_lte': int``
- ``'avg1_NEW_FBA_gte': int``
- ``'avg1_NEW_FBM_SHIPPING_lte': int``
- ``'avg1_NEW_FBM_SHIPPING_gte': int``
- ``'avg1_RATING_lte': int``
- ``'avg1_RATING_gte': int``
- ``'avg1_REFURBISHED_lte': int``
- ``'avg1_REFURBISHED_gte': int``
- ``'avg1_REFURBISHED_SHIPPING_lte': int``
- ``'avg1_REFURBISHED_SHIPPING_gte': int``
- ``'avg1_RENT_lte': int``
- ``'avg1_RENT_gte': int``
- ``'avg1_SALES_lte': int``
- ``'avg1_SALES_lte': int``
- ``'avg1_SALES_gte': int``
- ``'avg1_TRADE_IN_lte': int``
- ``'avg1_TRADE_IN_gte': int``
- ``'avg1_USED_lte': int``
- ``'avg1_USED_gte': int``
- ``'avg1_USED_ACCEPTABLE_SHIPPING_lte': int``
- ``'avg1_USED_ACCEPTABLE_SHIPPING_gte': int``
- ``'avg1_USED_GOOD_SHIPPING_lte': int``
- ``'avg1_USED_GOOD_SHIPPING_gte': int``
- ``'avg1_USED_NEW_SHIPPING_lte': int``
- ``'avg1_USED_NEW_SHIPPING_gte': int``
- ``'avg1_USED_VERY_GOOD_SHIPPING_lte': int``
- ``'avg1_USED_VERY_GOOD_SHIPPING_gte': int``
- ``'avg1_WAREHOUSE_lte': int``
- ``'avg1_WAREHOUSE_gte': int``
- ``'avg30_AMAZON_lte': int``
- ``'avg30_AMAZON_gte': int``
- ``'avg30_BUY_BOX_SHIPPING_lte': int``
- ``'avg30_BUY_BOX_SHIPPING_gte': int``
- ``'avg30_COLLECTIBLE_lte': int``
- ``'avg30_COLLECTIBLE_gte': int``
- ``'avg30_COUNT_COLLECTIBLE_lte': int``
- ``'avg30_COUNT_COLLECTIBLE_gte': int``
- ``'avg30_COUNT_NEW_lte': int``
- ``'avg30_COUNT_NEW_gte': int``
- ``'avg30_COUNT_REFURBISHED_lte': int``
- ``'avg30_COUNT_REFURBISHED_gte': int``
- ``'avg30_COUNT_REVIEWS_lte': int``
- ``'avg30_COUNT_REVIEWS_gte': int``
- ``'avg30_COUNT_USED_lte': int``
- ``'avg30_COUNT_USED_gte': int``
- ``'avg30_EBAY_NEW_SHIPPING_lte': int``
- ``'avg30_EBAY_NEW_SHIPPING_gte': int``
- ``'avg30_EBAY_USED_SHIPPING_lte': int``
- ``'avg30_EBAY_USED_SHIPPING_gte': int``
- ``'avg30_LIGHTNING_DEAL_lte': int``
- ``'avg30_LIGHTNING_DEAL_gte': int``
- ``'avg30_LISTPRICE_lte': int``
- ``'avg30_LISTPRICE_gte': int``
- ``'avg30_NEW_lte': int``
- ``'avg30_NEW_gte': int``
- ``'avg30_NEW_FBA_lte': int``
- ``'avg30_NEW_FBA_gte': int``
- ``'avg30_NEW_FBM_SHIPPING_lte': int``
- ``'avg30_NEW_FBM_SHIPPING_gte': int``
- ``'avg30_RATING_lte': int``
- ``'avg30_RATING_gte': int``
- ``'avg30_REFURBISHED_lte': int``
- ``'avg30_REFURBISHED_gte': int``
- ``'avg30_REFURBISHED_SHIPPING_lte': int``
- ``'avg30_REFURBISHED_SHIPPING_gte': int``
- ``'avg30_RENT_lte': int``
- ``'avg30_RENT_gte': int``
- ``'avg30_SALES_lte': int``
- ``'avg30_SALES_gte': int``
- ``'avg30_TRADE_IN_lte': int``
- ``'avg30_TRADE_IN_gte': int``
- ``'avg30_USED_lte': int``
- ``'avg30_USED_gte': int``
- ``'avg30_USED_ACCEPTABLE_SHIPPING_lte': int``
- ``'avg30_USED_ACCEPTABLE_SHIPPING_gte': int``
- ``'avg30_USED_GOOD_SHIPPING_lte': int``
- ``'avg30_USED_GOOD_SHIPPING_gte': int``
- ``'avg30_USED_NEW_SHIPPING_lte': int``
- ``'avg30_USED_NEW_SHIPPING_gte': int``
- ``'avg30_USED_VERY_GOOD_SHIPPING_lte': int``
- ``'avg30_USED_VERY_GOOD_SHIPPING_gte': int``
- ``'avg30_WAREHOUSE_lte': int``
- ``'avg30_WAREHOUSE_gte': int``
- ``'avg7_AMAZON_lte': int``
- ``'avg7_AMAZON_gte': int``
- ``'avg7_BUY_BOX_SHIPPING_lte': int``
- ``'avg7_BUY_BOX_SHIPPING_gte': int``
- ``'avg7_COLLECTIBLE_lte': int``
- ``'avg7_COLLECTIBLE_gte': int``
- ``'avg7_COUNT_COLLECTIBLE_lte': int``
- ``'avg7_COUNT_COLLECTIBLE_gte': int``
- ``'avg7_COUNT_NEW_lte': int``
- ``'avg7_COUNT_NEW_gte': int``
- ``'avg7_COUNT_REFURBISHED_lte': int``
- ``'avg7_COUNT_REFURBISHED_gte': int``
- ``'avg7_COUNT_REVIEWS_lte': int``
- ``'avg7_COUNT_REVIEWS_gte': int``
- ``'avg7_COUNT_USED_lte': int``
- ``'avg7_COUNT_USED_gte': int``
- ``'avg7_EBAY_NEW_SHIPPING_lte': int``
- ``'avg7_EBAY_NEW_SHIPPING_gte': int``
- ``'avg7_EBAY_USED_SHIPPING_lte': int``
- ``'avg7_EBAY_USED_SHIPPING_gte': int``
- ``'avg7_LIGHTNING_DEAL_lte': int``
- ``'avg7_LIGHTNING_DEAL_gte': int``
- ``'avg7_LISTPRICE_lte': int``
- ``'avg7_LISTPRICE_gte': int``
- ``'avg7_NEW_lte': int``
- ``'avg7_NEW_gte': int``
- ``'avg7_NEW_FBA_lte': int``
- ``'avg7_NEW_FBA_gte': int``
- ``'avg7_NEW_FBM_SHIPPING_lte': int``
- ``'avg7_NEW_FBM_SHIPPING_gte': int``
- ``'avg7_RATING_lte': int``
- ``'avg7_RATING_gte': int``
- ``'avg7_REFURBISHED_lte': int``
- ``'avg7_REFURBISHED_gte': int``
- ``'avg7_REFURBISHED_SHIPPING_lte': int``
- ``'avg7_REFURBISHED_SHIPPING_gte': int``
- ``'avg7_RENT_lte': int``
- ``'avg7_RENT_gte': int``
- ``'avg7_SALES_lte': int``
- ``'avg7_SALES_gte': int``
- ``'avg7_TRADE_IN_lte': int``
- ``'avg7_TRADE_IN_gte': int``
- ``'avg7_USED_lte': int``
- ``'avg7_USED_gte': int``
- ``'avg7_USED_ACCEPTABLE_SHIPPING_lte': int``
- ``'avg7_USED_ACCEPTABLE_SHIPPING_gte': int``
- ``'avg7_USED_GOOD_SHIPPING_lte': int``
- ``'avg7_USED_GOOD_SHIPPING_gte': int``
- ``'avg7_USED_NEW_SHIPPING_lte': int``
- ``'avg7_USED_NEW_SHIPPING_gte': int``
- ``'avg7_USED_VERY_GOOD_SHIPPING_lte': int``
- ``'avg7_USED_VERY_GOOD_SHIPPING_gte': int``
- ``'avg7_WAREHOUSE_lte': int``
- ``'avg7_WAREHOUSE_gte': int``
- ``'avg90_AMAZON_lte': int``
- ``'avg90_AMAZON_gte': int``
- ``'avg90_BUY_BOX_SHIPPING_lte': int``
- ``'avg90_BUY_BOX_SHIPPING_gte': int``
- ``'avg90_COLLECTIBLE_lte': int``
- ``'avg90_COLLECTIBLE_gte': int``
- ``'avg90_COUNT_COLLECTIBLE_lte': int``
- ``'avg90_COUNT_COLLECTIBLE_gte': int``
- ``'avg90_COUNT_NEW_lte': int``
- ``'avg90_COUNT_NEW_gte': int``
- ``'avg90_COUNT_REFURBISHED_lte': int``
- ``'avg90_COUNT_REFURBISHED_gte': int``
- ``'avg90_COUNT_REVIEWS_lte': int``
- ``'avg90_COUNT_REVIEWS_gte': int``
- ``'avg90_COUNT_USED_lte': int``
- ``'avg90_COUNT_USED_gte': int``
- ``'avg90_EBAY_NEW_SHIPPING_lte': int``
- ``'avg90_EBAY_NEW_SHIPPING_gte': int``
- ``'avg90_EBAY_USED_SHIPPING_lte': int``
- ``'avg90_EBAY_USED_SHIPPING_gte': int``
- ``'avg90_LIGHTNING_DEAL_lte': int``
- ``'avg90_LIGHTNING_DEAL_gte': int``
- ``'avg90_LISTPRICE_lte': int``
- ``'avg90_LISTPRICE_gte': int``
- ``'avg90_NEW_lte': int``
- ``'avg90_NEW_gte': int``
- ``'avg90_NEW_FBA_lte': int``
- ``'avg90_NEW_FBA_gte': int``
- ``'avg90_NEW_FBM_SHIPPING_lte': int``
- ``'avg90_NEW_FBM_SHIPPING_gte': int``
- ``'avg90_RATING_lte': int``
- ``'avg90_RATING_gte': int``
- ``'avg90_REFURBISHED_lte': int``
- ``'avg90_REFURBISHED_gte': int``
- ``'avg90_REFURBISHED_SHIPPING_lte': int``
- ``'avg90_REFURBISHED_SHIPPING_gte': int``
- ``'avg90_RENT_lte': int``
- ``'avg90_RENT_gte': int``
- ``'avg90_SALES_lte': int``
- ``'avg90_SALES_gte': int``
- ``'avg90_TRADE_IN_lte': int``
- ``'avg90_TRADE_IN_gte': int``
- ``'avg90_USED_lte': int``
- ``'avg90_USED_gte': int``
- ``'avg90_USED_ACCEPTABLE_SHIPPING_lte': int``
- ``'avg90_USED_ACCEPTABLE_SHIPPING_gte': int``
- ``'avg90_USED_GOOD_SHIPPING_lte': int``
- ``'avg90_USED_GOOD_SHIPPING_gte': int``
- ``'avg90_USED_NEW_SHIPPING_lte': int``
- ``'avg90_USED_NEW_SHIPPING_gte': int``
- ``'avg90_USED_VERY_GOOD_SHIPPING_lte': int``
- ``'avg90_USED_VERY_GOOD_SHIPPING_gte': int``
- ``'avg90_WAREHOUSE_lte': int``
- ``'avg90_WAREHOUSE_gte': int``
- ``'backInStock_AMAZON': bool``
- ``'backInStock_BUY_BOX_SHIPPING': bool``
- ``'backInStock_COLLECTIBLE': bool``
- ``'backInStock_COUNT_COLLECTIBLE': bool``
- ``'backInStock_COUNT_NEW': bool``
- ``'backInStock_COUNT_REFURBISHED': bool``
- ``'backInStock_COUNT_REVIEWS': bool``
- ``'backInStock_COUNT_USED': bool``
- ``'backInStock_EBAY_NEW_SHIPPING': bool``
- ``'backInStock_EBAY_USED_SHIPPING': bool``
- ``'backInStock_LIGHTNING_DEAL': bool``
- ``'backInStock_LISTPRICE': bool``
- ``'backInStock_NEW': bool``
- ``'backInStock_NEW_FBA': bool``
- ``'backInStock_NEW_FBM_SHIPPING': bool``
- ``'backInStock_RATING': bool``
- ``'backInStock_REFURBISHED': bool``
- ``'backInStock_REFURBISHED_SHIPPING': bool``
- ``'backInStock_RENT': bool``
- ``'backInStock_SALES': bool``
- ``'backInStock_TRADE_IN': bool``
- ``'backInStock_USED': bool``
- ``'backInStock_USED_ACCEPTABLE_SHIPPING': bool``
- ``'backInStock_USED_GOOD_SHIPPING': bool``
- ``'backInStock_USED_NEW_SHIPPING': bool``
- ``'backInStock_USED_VERY_GOOD_SHIPPING': bool``
- ``'backInStock_WAREHOUSE': bool``
- ``'binding': str``
- ``'brand': str``
- ``'buyBoxSellerId': str``
- ``'color': str``
- ``'couponOneTimeAbsolute_lte': int``
- ``'couponOneTimeAbsolute_gte': int``
- ``'couponOneTimePercent_lte': int``
- ``'couponOneTimePercent_gte': int``
- ``'couponSNSAbsolute_lte': int``
- ``'couponSNSAbsolute_gte': int``
- ``'couponSNSPercent_lte': int``
- ``'couponSNSPercent_gte': int``
- ``'current_AMAZON_lte': int``
- ``'current_AMAZON_gte': int``
- ``'current_BUY_BOX_SHIPPING_lte': int``
- ``'current_BUY_BOX_SHIPPING_gte': int``
- ``'current_COLLECTIBLE_lte': int``
- ``'current_COLLECTIBLE_gte': int``
- ``'current_COUNT_COLLECTIBLE_lte': int``
- ``'current_COUNT_COLLECTIBLE_gte': int``
- ``'current_COUNT_NEW_lte': int``
- ``'current_COUNT_NEW_gte': int``
- ``'current_COUNT_REFURBISHED_lte': int``
- ``'current_COUNT_REFURBISHED_gte': int``
- ``'current_COUNT_REVIEWS_lte': int``
- ``'current_COUNT_REVIEWS_gte': int``
- ``'current_COUNT_USED_lte': int``
- ``'current_COUNT_USED_gte': int``
- ``'current_EBAY_NEW_SHIPPING_lte': int``
- ``'current_EBAY_NEW_SHIPPING_gte': int``
- ``'current_EBAY_USED_SHIPPING_lte': int``
- ``'current_EBAY_USED_SHIPPING_gte': int``
- ``'current_LIGHTNING_DEAL_lte': int``
- ``'current_LIGHTNING_DEAL_gte': int``
- ``'current_LISTPRICE_lte': int``
- ``'current_LISTPRICE_gte': int``
- ``'current_NEW_lte': int``
- ``'current_NEW_gte': int``
- ``'current_NEW_FBA_lte': int``
- ``'current_NEW_FBA_gte': int``
- ``'current_NEW_FBM_SHIPPING_lte': int``
- ``'current_NEW_FBM_SHIPPING_gte': int``
- ``'current_RATING_lte': int``
- ``'current_RATING_gte': int``
- ``'current_REFURBISHED_lte': int``
- ``'current_REFURBISHED_gte': int``
- ``'current_REFURBISHED_SHIPPING_lte': int``
- ``'current_REFURBISHED_SHIPPING_gte': int``
- ``'current_RENT_lte': int``
- ``'current_RENT_gte': int``
- ``'current_SALES_lte': int``
- ``'current_SALES_gte': int``
- ``'current_TRADE_IN_lte': int``
- ``'current_TRADE_IN_gte': int``
- ``'current_USED_lte': int``
- ``'current_USED_gte': int``
- ``'current_USED_ACCEPTABLE_SHIPPING_lte': int``
- ``'current_USED_ACCEPTABLE_SHIPPING_gte': int``
- ``'current_USED_GOOD_SHIPPING_lte': int``
- ``'current_USED_GOOD_SHIPPING_gte': int``
- ``'current_USED_NEW_SHIPPING_lte': int``
- ``'current_USED_NEW_SHIPPING_gte': int``
- ``'current_USED_VERY_GOOD_SHIPPING_lte': int``
- ``'current_USED_VERY_GOOD_SHIPPING_gte': int``
- ``'current_WAREHOUSE_lte': int``
- ``'current_WAREHOUSE_gte': int``
- ``'delta1_AMAZON_lte': int``
- ``'delta1_AMAZON_gte': int``
- ``'delta1_BUY_BOX_SHIPPING_lte': int``
- ``'delta1_BUY_BOX_SHIPPING_gte': int``
- ``'delta1_COLLECTIBLE_lte': int``
- ``'delta1_COLLECTIBLE_gte': int``
- ``'delta1_COUNT_COLLECTIBLE_lte': int``
- ``'delta1_COUNT_COLLECTIBLE_gte': int``
- ``'delta1_COUNT_NEW_lte': int``
- ``'delta1_COUNT_NEW_gte': int``
- ``'delta1_COUNT_REFURBISHED_lte': int``
- ``'delta1_COUNT_REFURBISHED_gte': int``
- ``'delta1_COUNT_REVIEWS_lte': int``
- ``'delta1_COUNT_REVIEWS_gte': int``
- ``'delta1_COUNT_USED_lte': int``
- ``'delta1_COUNT_USED_gte': int``
- ``'delta1_EBAY_NEW_SHIPPING_lte': int``
- ``'delta1_EBAY_NEW_SHIPPING_gte': int``
- ``'delta1_EBAY_USED_SHIPPING_lte': int``
- ``'delta1_EBAY_USED_SHIPPING_gte': int``
- ``'delta1_LIGHTNING_DEAL_lte': int``
- ``'delta1_LIGHTNING_DEAL_gte': int``
- ``'delta1_LISTPRICE_lte': int``
- ``'delta1_LISTPRICE_gte': int``
- ``'delta1_NEW_lte': int``
- ``'delta1_NEW_gte': int``
- ``'delta1_NEW_FBA_lte': int``
- ``'delta1_NEW_FBA_gte': int``
- ``'delta1_NEW_FBM_SHIPPING_lte': int``
- ``'delta1_NEW_FBM_SHIPPING_gte': int``
- ``'delta1_RATING_lte': int``
- ``'delta1_RATING_gte': int``
- ``'delta1_REFURBISHED_lte': int``
- ``'delta1_REFURBISHED_gte': int``
- ``'delta1_REFURBISHED_SHIPPING_lte': int``
- ``'delta1_REFURBISHED_SHIPPING_gte': int``
- ``'delta1_RENT_lte': int``
- ``'delta1_RENT_gte': int``
- ``'delta1_SALES_lte': int``
- ``'delta1_SALES_gte': int``
- ``'delta1_TRADE_IN_lte': int``
- ``'delta1_TRADE_IN_gte': int``
- ``'delta1_USED_lte': int``
- ``'delta1_USED_gte': int``
- ``'delta1_USED_ACCEPTABLE_SHIPPING_lte': int``
- ``'delta1_USED_ACCEPTABLE_SHIPPING_gte': int``
- ``'delta1_USED_GOOD_SHIPPING_lte': int``
- ``'delta1_USED_GOOD_SHIPPING_gte': int``
- ``'delta1_USED_NEW_SHIPPING_lte': int``
- ``'delta1_USED_NEW_SHIPPING_gte': int``
- ``'delta1_USED_VERY_GOOD_SHIPPING_lte': int``
- ``'delta1_USED_VERY_GOOD_SHIPPING_gte': int``
- ``'delta1_WAREHOUSE_lte': int``
- ``'delta1_WAREHOUSE_gte': int``
- ``'delta30_AMAZON_lte': int``
- ``'delta30_AMAZON_gte': int``
- ``'delta30_BUY_BOX_SHIPPING_lte': int``
- ``'delta30_BUY_BOX_SHIPPING_gte': int``
- ``'delta30_COLLECTIBLE_lte': int``
- ``'delta30_COLLECTIBLE_gte': int``
- ``'delta30_COUNT_COLLECTIBLE_lte': int``
- ``'delta30_COUNT_COLLECTIBLE_gte': int``
- ``'delta30_COUNT_NEW_lte': int``
- ``'delta30_COUNT_NEW_gte': int``
- ``'delta30_COUNT_REFURBISHED_lte': int``
- ``'delta30_COUNT_REFURBISHED_gte': int``
- ``'delta30_COUNT_REVIEWS_lte': int``
- ``'delta30_COUNT_REVIEWS_gte': int``
- ``'delta30_COUNT_USED_lte': int``
- ``'delta30_COUNT_USED_gte': int``
- ``'delta30_EBAY_NEW_SHIPPING_lte': int``
- ``'delta30_EBAY_NEW_SHIPPING_gte': int``
- ``'delta30_EBAY_USED_SHIPPING_lte': int``
- ``'delta30_EBAY_USED_SHIPPING_gte': int``
- ``'delta30_LIGHTNING_DEAL_lte': int``
- ``'delta30_LIGHTNING_DEAL_gte': int``
- ``'delta30_LISTPRICE_lte': int``
- ``'delta30_LISTPRICE_gte': int``
- ``'delta30_NEW_lte': int``
- ``'delta30_NEW_gte': int``
- ``'delta30_NEW_FBA_lte': int``
- ``'delta30_NEW_FBA_gte': int``
- ``'delta30_NEW_FBM_SHIPPING_lte': int``
- ``'delta30_NEW_FBM_SHIPPING_gte': int``
- ``'delta30_RATING_lte': int``
- ``'delta30_RATING_gte': int``
- ``'delta30_REFURBISHED_lte': int``
- ``'delta30_REFURBISHED_gte': int``
- ``'delta30_REFURBISHED_SHIPPING_lte': int``
- ``'delta30_REFURBISHED_SHIPPING_gte': int``
- ``'delta30_RENT_lte': int``
- ``'delta30_RENT_gte': int``
- ``'delta30_SALES_lte': int``
- ``'delta30_SALES_gte': int``
- ``'delta30_TRADE_IN_lte': int``
- ``'delta30_TRADE_IN_gte': int``
- ``'delta30_USED_lte': int``
- ``'delta30_USED_gte': int``
- ``'delta30_USED_ACCEPTABLE_SHIPPING_lte': int``
- ``'delta30_USED_ACCEPTABLE_SHIPPING_gte': int``
- ``'delta30_USED_GOOD_SHIPPING_lte': int``
- ``'delta30_USED_GOOD_SHIPPING_gte': int``
- ``'delta30_USED_NEW_SHIPPING_lte': int``
- ``'delta30_USED_NEW_SHIPPING_gte': int``
- ``'delta30_USED_VERY_GOOD_SHIPPING_lte': int``
- ``'delta30_USED_VERY_GOOD_SHIPPING_gte': int``
- ``'delta30_WAREHOUSE_lte': int``
- ``'delta30_WAREHOUSE_gte': int``
- ``'delta7_AMAZON_lte': int``
- ``'delta7_AMAZON_gte': int``
- ``'delta7_BUY_BOX_SHIPPING_lte': int``
- ``'delta7_BUY_BOX_SHIPPING_gte': int``
- ``'delta7_COLLECTIBLE_lte': int``
- ``'delta7_COLLECTIBLE_gte': int``
- ``'delta7_COUNT_COLLECTIBLE_lte': int``
- ``'delta7_COUNT_COLLECTIBLE_gte': int``
- ``'delta7_COUNT_NEW_lte': int``
- ``'delta7_COUNT_NEW_gte': int``
- ``'delta7_COUNT_REFURBISHED_lte': int``
- ``'delta7_COUNT_REFURBISHED_gte': int``
- ``'delta7_COUNT_REVIEWS_lte': int``
- ``'delta7_COUNT_REVIEWS_gte': int``
- ``'delta7_COUNT_USED_lte': int``
- ``'delta7_COUNT_USED_gte': int``
- ``'delta7_EBAY_NEW_SHIPPING_lte': int``
- ``'delta7_EBAY_NEW_SHIPPING_gte': int``
- ``'delta7_EBAY_USED_SHIPPING_lte': int``
- ``'delta7_EBAY_USED_SHIPPING_gte': int``
- ``'delta7_LIGHTNING_DEAL_lte': int``
- ``'delta7_LIGHTNING_DEAL_gte': int``
- ``'delta7_LISTPRICE_lte': int``
- ``'delta7_LISTPRICE_gte': int``
- ``'delta7_NEW_lte': int``
- ``'delta7_NEW_gte': int``
- ``'delta7_NEW_FBA_lte': int``
- ``'delta7_NEW_FBA_gte': int``
- ``'delta7_NEW_FBM_SHIPPING_lte': int``
- ``'delta7_NEW_FBM_SHIPPING_gte': int``
- ``'delta7_RATING_lte': int``
- ``'delta7_RATING_gte': int``
- ``'delta7_REFURBISHED_lte': int``
- ``'delta7_REFURBISHED_gte': int``
- ``'delta7_REFURBISHED_SHIPPING_lte': int``
- ``'delta7_REFURBISHED_SHIPPING_gte': int``
- ``'delta7_RENT_lte': int``
- ``'delta7_RENT_gte': int``
- ``'delta7_SALES_lte': int``
- ``'delta7_SALES_gte': int``
- ``'delta7_TRADE_IN_lte': int``
- ``'delta7_TRADE_IN_gte': int``
- ``'delta7_USED_lte': int``
- ``'delta7_USED_gte': int``
- ``'delta7_USED_ACCEPTABLE_SHIPPING_lte': int``
- ``'delta7_USED_ACCEPTABLE_SHIPPING_gte': int``
- ``'delta7_USED_GOOD_SHIPPING_lte': int``
- ``'delta7_USED_GOOD_SHIPPING_gte': int``
- ``'delta7_USED_NEW_SHIPPING_lte': int``
- ``'delta7_USED_NEW_SHIPPING_gte': int``
- ``'delta7_USED_VERY_GOOD_SHIPPING_lte': int``
- ``'delta7_USED_VERY_GOOD_SHIPPING_gte': int``
- ``'delta7_WAREHOUSE_lte': int``
- ``'delta7_WAREHOUSE_gte': int``
- ``'delta90_AMAZON_lte': int``
- ``'delta90_AMAZON_gte': int``
- ``'delta90_BUY_BOX_SHIPPING_lte': int``
- ``'delta90_BUY_BOX_SHIPPING_gte': int``
- ``'delta90_COLLECTIBLE_lte': int``
- ``'delta90_COLLECTIBLE_gte': int``
- ``'delta90_COUNT_COLLECTIBLE_lte': int``
- ``'delta90_COUNT_COLLECTIBLE_gte': int``
- ``'delta90_COUNT_NEW_lte': int``
- ``'delta90_COUNT_NEW_gte': int``
- ``'delta90_COUNT_REFURBISHED_lte': int``
- ``'delta90_COUNT_REFURBISHED_gte': int``
- ``'delta90_COUNT_REVIEWS_lte': int``
- ``'delta90_COUNT_REVIEWS_gte': int``
- ``'delta90_COUNT_USED_lte': int``
- ``'delta90_COUNT_USED_gte': int``
- ``'delta90_EBAY_NEW_SHIPPING_lte': int``
- ``'delta90_EBAY_NEW_SHIPPING_gte': int``
- ``'delta90_EBAY_USED_SHIPPING_lte': int``
- ``'delta90_EBAY_USED_SHIPPING_gte': int``
- ``'delta90_LIGHTNING_DEAL_lte': int``
- ``'delta90_LIGHTNING_DEAL_gte': int``
- ``'delta90_LISTPRICE_lte': int``
- ``'delta90_LISTPRICE_gte': int``
- ``'delta90_NEW_lte': int``
- ``'delta90_NEW_gte': int``
- ``'delta90_NEW_FBA_lte': int``
- ``'delta90_NEW_FBA_gte': int``
- ``'delta90_NEW_FBM_SHIPPING_lte': int``
- ``'delta90_NEW_FBM_SHIPPING_gte': int``
- ``'delta90_RATING_lte': int``
- ``'delta90_RATING_gte': int``
- ``'delta90_REFURBISHED_lte': int``
- ``'delta90_REFURBISHED_gte': int``
- ``'delta90_REFURBISHED_SHIPPING_lte': int``
- ``'delta90_REFURBISHED_SHIPPING_gte': int``
- ``'delta90_RENT_lte': int``
- ``'delta90_RENT_gte': int``
- ``'delta90_SALES_lte': int``
- ``'delta90_SALES_gte': int``
- ``'delta90_TRADE_IN_lte': int``
- ``'delta90_TRADE_IN_gte': int``
- ``'delta90_USED_lte': int``
- ``'delta90_USED_gte': int``
- ``'delta90_USED_ACCEPTABLE_SHIPPING_lte': int``
- ``'delta90_USED_ACCEPTABLE_SHIPPING_gte': int``
- ``'delta90_USED_GOOD_SHIPPING_lte': int``
- ``'delta90_USED_GOOD_SHIPPING_gte': int``
- ``'delta90_USED_NEW_SHIPPING_lte': int``
- ``'delta90_USED_NEW_SHIPPING_gte': int``
- ``'delta90_USED_VERY_GOOD_SHIPPING_lte': int``
- ``'delta90_USED_VERY_GOOD_SHIPPING_gte': int``
- ``'delta90_WAREHOUSE_lte': int``
- ``'delta90_WAREHOUSE_gte': int``
- ``'deltaLast_AMAZON_lte': int``
- ``'deltaLast_AMAZON_gte': int``
- ``'deltaLast_BUY_BOX_SHIPPING_lte': int``
- ``'deltaLast_BUY_BOX_SHIPPING_gte': int``
- ``'deltaLast_COLLECTIBLE_lte': int``
- ``'deltaLast_COLLECTIBLE_gte': int``
- ``'deltaLast_COUNT_COLLECTIBLE_lte': int``
- ``'deltaLast_COUNT_COLLECTIBLE_gte': int``
- ``'deltaLast_COUNT_NEW_lte': int``
- ``'deltaLast_COUNT_NEW_gte': int``
- ``'deltaLast_COUNT_REFURBISHED_lte': int``
- ``'deltaLast_COUNT_REFURBISHED_gte': int``
- ``'deltaLast_COUNT_REVIEWS_lte': int``
- ``'deltaLast_COUNT_REVIEWS_gte': int``
- ``'deltaLast_COUNT_USED_lte': int``
- ``'deltaLast_COUNT_USED_gte': int``
- ``'deltaLast_EBAY_NEW_SHIPPING_lte': int``
- ``'deltaLast_EBAY_NEW_SHIPPING_gte': int``
- ``'deltaLast_EBAY_USED_SHIPPING_lte': int``
- ``'deltaLast_EBAY_USED_SHIPPING_gte': int``
- ``'deltaLast_LIGHTNING_DEAL_lte': int``
- ``'deltaLast_LIGHTNING_DEAL_gte': int``
- ``'deltaLast_LISTPRICE_lte': int``
- ``'deltaLast_LISTPRICE_gte': int``
- ``'deltaLast_NEW_lte': int``
- ``'deltaLast_NEW_gte': int``
- ``'deltaLast_NEW_FBA_lte': int``
- ``'deltaLast_NEW_FBA_gte': int``
- ``'deltaLast_NEW_FBM_SHIPPING_lte': int``
- ``'deltaLast_NEW_FBM_SHIPPING_gte': int``
- ``'deltaLast_RATING_lte': int``
- ``'deltaLast_RATING_gte': int``
- ``'deltaLast_REFURBISHED_lte': int``
- ``'deltaLast_REFURBISHED_gte': int``
- ``'deltaLast_REFURBISHED_SHIPPING_lte': int``
- ``'deltaLast_REFURBISHED_SHIPPING_gte': int``
- ``'deltaLast_RENT_lte': int``
- ``'deltaLast_RENT_gte': int``
- ``'deltaLast_SALES_lte': int``
- ``'deltaLast_SALES_gte': int``
- ``'deltaLast_TRADE_IN_lte': int``
- ``'deltaLast_TRADE_IN_gte': int``
- ``'deltaLast_USED_lte': int``
- ``'deltaLast_USED_gte': int``
- ``'deltaLast_USED_ACCEPTABLE_SHIPPING_lte': int``
- ``'deltaLast_USED_ACCEPTABLE_SHIPPING_gte': int``
- ``'deltaLast_USED_GOOD_SHIPPING_lte': int``
- ``'deltaLast_USED_GOOD_SHIPPING_gte': int``
- ``'deltaLast_USED_NEW_SHIPPING_lte': int``
- ``'deltaLast_USED_NEW_SHIPPING_gte': int``
- ``'deltaLast_USED_VERY_GOOD_SHIPPING_lte': int``
- ``'deltaLast_USED_VERY_GOOD_SHIPPING_gte': int``
- ``'deltaLast_WAREHOUSE_lte': int``
- ``'deltaLast_WAREHOUSE_gte': int``
- ``'deltaPercent1_AMAZON_lte': int``
- ``'deltaPercent1_AMAZON_gte': int``
- ``'deltaPercent1_BUY_BOX_SHIPPING_lte': int``
- ``'deltaPercent1_BUY_BOX_SHIPPING_gte': int``
- ``'deltaPercent1_COLLECTIBLE_lte': int``
- ``'deltaPercent1_COLLECTIBLE_gte': int``
- ``'deltaPercent1_COUNT_COLLECTIBLE_lte': int``
- ``'deltaPercent1_COUNT_COLLECTIBLE_gte': int``
- ``'deltaPercent1_COUNT_NEW_lte': int``
- ``'deltaPercent1_COUNT_NEW_gte': int``
- ``'deltaPercent1_COUNT_REFURBISHED_lte': int``
- ``'deltaPercent1_COUNT_REFURBISHED_gte': int``
- ``'deltaPercent1_COUNT_REVIEWS_lte': int``
- ``'deltaPercent1_COUNT_REVIEWS_gte': int``
- ``'deltaPercent1_COUNT_USED_lte': int``
- ``'deltaPercent1_COUNT_USED_gte': int``
- ``'deltaPercent1_EBAY_NEW_SHIPPING_lte': int``
- ``'deltaPercent1_EBAY_NEW_SHIPPING_gte': int``
- ``'deltaPercent1_EBAY_USED_SHIPPING_lte': int``
- ``'deltaPercent1_EBAY_USED_SHIPPING_gte': int``
- ``'deltaPercent1_LIGHTNING_DEAL_lte': int``
- ``'deltaPercent1_LIGHTNING_DEAL_gte': int``
- ``'deltaPercent1_LISTPRICE_lte': int``
- ``'deltaPercent1_LISTPRICE_gte': int``
- ``'deltaPercent1_NEW_lte': int``
- ``'deltaPercent1_NEW_gte': int``
- ``'deltaPercent1_NEW_FBA_lte': int``
- ``'deltaPercent1_NEW_FBA_gte': int``
- ``'deltaPercent1_NEW_FBM_SHIPPING_lte': int``
- ``'deltaPercent1_NEW_FBM_SHIPPING_gte': int``
- ``'deltaPercent1_RATING_lte': int``
- ``'deltaPercent1_RATING_gte': int``
- ``'deltaPercent1_REFURBISHED_lte': int``
- ``'deltaPercent1_REFURBISHED_gte': int``
- ``'deltaPercent1_REFURBISHED_SHIPPING_lte': int``
- ``'deltaPercent1_REFURBISHED_SHIPPING_gte': int``
- ``'deltaPercent1_RENT_lte': int``
- ``'deltaPercent1_RENT_gte': int``
- ``'deltaPercent1_SALES_lte': int``
- ``'deltaPercent1_SALES_gte': int``
- ``'deltaPercent1_TRADE_IN_lte': int``
- ``'deltaPercent1_TRADE_IN_gte': int``
- ``'deltaPercent1_USED_lte': int``
- ``'deltaPercent1_USED_gte': int``
- ``'deltaPercent1_USED_ACCEPTABLE_SHIPPING_lte': int``
- ``'deltaPercent1_USED_ACCEPTABLE_SHIPPING_gte': int``
- ``'deltaPercent1_USED_GOOD_SHIPPING_lte': int``
- ``'deltaPercent1_USED_GOOD_SHIPPING_gte': int``
- ``'deltaPercent1_USED_NEW_SHIPPING_lte': int``
- ``'deltaPercent1_USED_NEW_SHIPPING_gte': int``
- ``'deltaPercent1_USED_VERY_GOOD_SHIPPING_lte': int``
- ``'deltaPercent1_USED_VERY_GOOD_SHIPPING_gte': int``
- ``'deltaPercent1_WAREHOUSE_lte': int``
- ``'deltaPercent1_WAREHOUSE_gte': int``
- ``'deltaPercent30_AMAZON_lte': int``
- ``'deltaPercent30_AMAZON_gte': int``
- ``'deltaPercent30_BUY_BOX_SHIPPING_lte': int``
- ``'deltaPercent30_BUY_BOX_SHIPPING_gte': int``
- ``'deltaPercent30_COLLECTIBLE_lte': int``
- ``'deltaPercent30_COLLECTIBLE_gte': int``
- ``'deltaPercent30_COUNT_COLLECTIBLE_lte': int``
- ``'deltaPercent30_COUNT_COLLECTIBLE_gte': int``
- ``'deltaPercent30_COUNT_NEW_lte': int``
- ``'deltaPercent30_COUNT_NEW_gte': int``
- ``'deltaPercent30_COUNT_REFURBISHED_lte': int``
- ``'deltaPercent30_COUNT_REFURBISHED_gte': int``
- ``'deltaPercent30_COUNT_REVIEWS_lte': int``
- ``'deltaPercent30_COUNT_REVIEWS_gte': int``
- ``'deltaPercent30_COUNT_USED_lte': int``
- ``'deltaPercent30_COUNT_USED_gte': int``
- ``'deltaPercent30_EBAY_NEW_SHIPPING_lte': int``
- ``'deltaPercent30_EBAY_NEW_SHIPPING_gte': int``
- ``'deltaPercent30_EBAY_USED_SHIPPING_lte': int``
- ``'deltaPercent30_EBAY_USED_SHIPPING_gte': int``
- ``'deltaPercent30_LIGHTNING_DEAL_lte': int``
- ``'deltaPercent30_LIGHTNING_DEAL_gte': int``
- ``'deltaPercent30_LISTPRICE_lte': int``
- ``'deltaPercent30_LISTPRICE_gte': int``
- ``'deltaPercent30_NEW_lte': int``
- ``'deltaPercent30_NEW_gte': int``
- ``'deltaPercent30_NEW_FBA_lte': int``
- ``'deltaPercent30_NEW_FBA_gte': int``
- ``'deltaPercent30_NEW_FBM_SHIPPING_lte': int``
- ``'deltaPercent30_NEW_FBM_SHIPPING_gte': int``
- ``'deltaPercent30_RATING_lte': int``
- ``'deltaPercent30_RATING_gte': int``
- ``'deltaPercent30_REFURBISHED_lte': int``
- ``'deltaPercent30_REFURBISHED_gte': int``
- ``'deltaPercent30_REFURBISHED_SHIPPING_lte': int``
- ``'deltaPercent30_REFURBISHED_SHIPPING_gte': int``
- ``'deltaPercent30_RENT_lte': int``
- ``'deltaPercent30_RENT_gte': int``
- ``'deltaPercent30_SALES_lte': int``
- ``'deltaPercent30_SALES_gte': int``
- ``'deltaPercent30_TRADE_IN_lte': int``
- ``'deltaPercent30_TRADE_IN_gte': int``
- ``'deltaPercent30_USED_lte': int``
- ``'deltaPercent30_USED_gte': int``
- ``'deltaPercent30_USED_ACCEPTABLE_SHIPPING_lte': int``
- ``'deltaPercent30_USED_ACCEPTABLE_SHIPPING_gte': int``
- ``'deltaPercent30_USED_GOOD_SHIPPING_lte': int``
- ``'deltaPercent30_USED_GOOD_SHIPPING_gte': int``
- ``'deltaPercent30_USED_NEW_SHIPPING_lte': int``
- ``'deltaPercent30_USED_NEW_SHIPPING_gte': int``
- ``'deltaPercent30_USED_VERY_GOOD_SHIPPING_lte': int``
- ``'deltaPercent30_USED_VERY_GOOD_SHIPPING_gte': int``
- ``'deltaPercent30_WAREHOUSE_lte': int``
- ``'deltaPercent30_WAREHOUSE_gte': int``
- ``'deltaPercent7_AMAZON_lte': int``
- ``'deltaPercent7_AMAZON_gte': int``
- ``'deltaPercent7_BUY_BOX_SHIPPING_lte': int``
- ``'deltaPercent7_BUY_BOX_SHIPPING_gte': int``
- ``'deltaPercent7_COLLECTIBLE_lte': int``
- ``'deltaPercent7_COLLECTIBLE_gte': int``
- ``'deltaPercent7_COUNT_COLLECTIBLE_lte': int``
- ``'deltaPercent7_COUNT_COLLECTIBLE_gte': int``
- ``'deltaPercent7_COUNT_NEW_lte': int``
- ``'deltaPercent7_COUNT_NEW_gte': int``
- ``'deltaPercent7_COUNT_REFURBISHED_lte': int``
- ``'deltaPercent7_COUNT_REFURBISHED_gte': int``
- ``'deltaPercent7_COUNT_REVIEWS_lte': int``
- ``'deltaPercent7_COUNT_REVIEWS_gte': int``
- ``'deltaPercent7_COUNT_USED_lte': int``
- ``'deltaPercent7_COUNT_USED_gte': int``
- ``'deltaPercent7_EBAY_NEW_SHIPPING_lte': int``
- ``'deltaPercent7_EBAY_NEW_SHIPPING_gte': int``
- ``'deltaPercent7_EBAY_USED_SHIPPING_lte': int``
- ``'deltaPercent7_EBAY_USED_SHIPPING_gte': int``
- ``'deltaPercent7_LIGHTNING_DEAL_lte': int``
- ``'deltaPercent7_LIGHTNING_DEAL_gte': int``
- ``'deltaPercent7_LISTPRICE_lte': int``
- ``'deltaPercent7_LISTPRICE_gte': int``
- ``'deltaPercent7_NEW_lte': int``
- ``'deltaPercent7_NEW_gte': int``
- ``'deltaPercent7_NEW_FBA_lte': int``
- ``'deltaPercent7_NEW_FBA_gte': int``
- ``'deltaPercent7_NEW_FBM_SHIPPING_lte': int``
- ``'deltaPercent7_NEW_FBM_SHIPPING_gte': int``
- ``'deltaPercent7_RATING_lte': int``
- ``'deltaPercent7_RATING_gte': int``
- ``'deltaPercent7_REFURBISHED_lte': int``
- ``'deltaPercent7_REFURBISHED_gte': int``
- ``'deltaPercent7_REFURBISHED_SHIPPING_lte': int``
- ``'deltaPercent7_REFURBISHED_SHIPPING_gte': int``
- ``'deltaPercent7_RENT_lte': int``
- ``'deltaPercent7_RENT_gte': int``
- ``'deltaPercent7_SALES_lte': int``
- ``'deltaPercent7_SALES_gte': int``
- ``'deltaPercent7_TRADE_IN_lte': int``
- ``'deltaPercent7_TRADE_IN_gte': int``
- ``'deltaPercent7_USED_lte': int``
- ``'deltaPercent7_USED_gte': int``
- ``'deltaPercent7_USED_ACCEPTABLE_SHIPPING_lte': int``
- ``'deltaPercent7_USED_ACCEPTABLE_SHIPPING_gte': int``
- ``'deltaPercent7_USED_GOOD_SHIPPING_lte': int``
- ``'deltaPercent7_USED_GOOD_SHIPPING_gte': int``
- ``'deltaPercent7_USED_NEW_SHIPPING_lte': int``
- ``'deltaPercent7_USED_NEW_SHIPPING_gte': int``
- ``'deltaPercent7_USED_VERY_GOOD_SHIPPING_lte': int``
- ``'deltaPercent7_USED_VERY_GOOD_SHIPPING_gte': int``
- ``'deltaPercent7_WAREHOUSE_lte': int``
- ``'deltaPercent7_WAREHOUSE_gte': int``
- ``'deltaPercent90_AMAZON_lte': int``
- ``'deltaPercent90_AMAZON_gte': int``
- ``'deltaPercent90_BUY_BOX_SHIPPING_lte': int``
- ``'deltaPercent90_BUY_BOX_SHIPPING_gte': int``
- ``'deltaPercent90_COLLECTIBLE_lte': int``
- ``'deltaPercent90_COLLECTIBLE_gte': int``
- ``'deltaPercent90_COUNT_COLLECTIBLE_lte': int``
- ``'deltaPercent90_COUNT_COLLECTIBLE_gte': int``
- ``'deltaPercent90_COUNT_NEW_lte': int``
- ``'deltaPercent90_COUNT_NEW_gte': int``
- ``'deltaPercent90_COUNT_REFURBISHED_lte': int``
- ``'deltaPercent90_COUNT_REFURBISHED_gte': int``
- ``'deltaPercent90_COUNT_REVIEWS_lte': int``
- ``'deltaPercent90_COUNT_REVIEWS_gte': int``
- ``'deltaPercent90_COUNT_USED_lte': int``
- ``'deltaPercent90_COUNT_USED_gte': int``
- ``'deltaPercent90_EBAY_NEW_SHIPPING_lte': int``
- ``'deltaPercent90_EBAY_NEW_SHIPPING_gte': int``
- ``'deltaPercent90_EBAY_USED_SHIPPING_lte': int``
- ``'deltaPercent90_EBAY_USED_SHIPPING_gte': int``
- ``'deltaPercent90_LIGHTNING_DEAL_lte': int``
- ``'deltaPercent90_LIGHTNING_DEAL_gte': int``
- ``'deltaPercent90_LISTPRICE_lte': int``
- ``'deltaPercent90_LISTPRICE_gte': int``
- ``'deltaPercent90_NEW_lte': int``
- ``'deltaPercent90_NEW_gte': int``
- ``'deltaPercent90_NEW_FBA_lte': int``
- ``'deltaPercent90_NEW_FBA_gte': int``
- ``'deltaPercent90_NEW_FBM_SHIPPING_lte': int``
- ``'deltaPercent90_NEW_FBM_SHIPPING_gte': int``
- ``'deltaPercent90_RATING_lte': int``
- ``'deltaPercent90_RATING_gte': int``
- ``'deltaPercent90_REFURBISHED_lte': int``
- ``'deltaPercent90_REFURBISHED_gte': int``
- ``'deltaPercent90_REFURBISHED_SHIPPING_lte': int``
- ``'deltaPercent90_REFURBISHED_SHIPPING_gte': int``
- ``'deltaPercent90_RENT_lte': int``
- ``'deltaPercent90_RENT_gte': int``
- ``'deltaPercent90_SALES_lte': int``
- ``'deltaPercent90_SALES_gte': int``
- ``'deltaPercent90_TRADE_IN_lte': int``
- ``'deltaPercent90_TRADE_IN_gte': int``
- ``'deltaPercent90_USED_lte': int``
- ``'deltaPercent90_USED_gte': int``
- ``'deltaPercent90_USED_ACCEPTABLE_SHIPPING_lte': int``
- ``'deltaPercent90_USED_ACCEPTABLE_SHIPPING_gte': int``
- ``'deltaPercent90_USED_GOOD_SHIPPING_lte': int``
- ``'deltaPercent90_USED_GOOD_SHIPPING_gte': int``
- ``'deltaPercent90_USED_NEW_SHIPPING_lte': int``
- ``'deltaPercent90_USED_NEW_SHIPPING_gte': int``
- ``'deltaPercent90_USED_VERY_GOOD_SHIPPING_lte': int``
- ``'deltaPercent90_USED_VERY_GOOD_SHIPPING_gte': int``
- ``'deltaPercent90_WAREHOUSE_lte': int``
- ``'deltaPercent90_WAREHOUSE_gte': int``
- ``'department': str``
- ``'edition': str``
- ``'fbaFees_lte': int``
- ``'fbaFees_gte': int``
- ``'format': str``
- ``'genre': str``
- ``'hasParentASIN': bool``
- ``'hasReviews': bool``
- ``'hazardousMaterialType_lte': int``
- ``'hazardousMaterialType_gte': int``
- ``'isAdultProduct': bool``
- ``'isEligibleForSuperSaverShipping': bool``
- ``'isEligibleForTradeIn': bool``
- ``'isHighestOffer': bool``
- ``'isHighest_AMAZON': bool``
- ``'isHighest_BUY_BOX_SHIPPING': bool``
- ``'isHighest_COLLECTIBLE': bool``
- ``'isHighest_COUNT_COLLECTIBLE': bool``
- ``'isHighest_COUNT_NEW': bool``
- ``'isHighest_COUNT_REFURBISHED': bool``
- ``'isHighest_COUNT_REVIEWS': bool``
- ``'isHighest_COUNT_USED': bool``
- ``'isHighest_EBAY_NEW_SHIPPING': bool``
- ``'isHighest_EBAY_USED_SHIPPING': bool``
- ``'isHighest_LIGHTNING_DEAL': bool``
- ``'isHighest_LISTPRICE': bool``
- ``'isHighest_NEW': bool``
- ``'isHighest_NEW_FBA': bool``
- ``'isHighest_NEW_FBM_SHIPPING': bool``
- ``'isHighest_RATING': bool``
- ``'isHighest_REFURBISHED': bool``
- ``'isHighest_REFURBISHED_SHIPPING': bool``
- ``'isHighest_RENT': bool``
- ``'isHighest_SALES': bool``
- ``'isHighest_TRADE_IN': bool``
- ``'isHighest_USED': bool``
- ``'isHighest_USED_ACCEPTABLE_SHIPPING': bool``
- ``'isHighest_USED_GOOD_SHIPPING': bool``
- ``'isHighest_USED_NEW_SHIPPING': bool``
- ``'isHighest_USED_VERY_GOOD_SHIPPING': bool``
- ``'isHighest_WAREHOUSE': bool``
- ``'isLowestOffer': bool``
- ``'isLowest_AMAZON': bool``
- ``'isLowest_BUY_BOX_SHIPPING': bool``
- ``'isLowest_COLLECTIBLE': bool``
- ``'isLowest_COUNT_COLLECTIBLE': bool``
- ``'isLowest_COUNT_NEW': bool``
- ``'isLowest_COUNT_REFURBISHED': bool``
- ``'isLowest_COUNT_REVIEWS': bool``
- ``'isLowest_COUNT_USED': bool``
- ``'isLowest_EBAY_NEW_SHIPPING': bool``
- ``'isLowest_EBAY_USED_SHIPPING': bool``
- ``'isLowest_LIGHTNING_DEAL': bool``
- ``'isLowest_LISTPRICE': bool``
- ``'isLowest_NEW': bool``
- ``'isLowest_NEW_FBA': bool``
- ``'isLowest_NEW_FBM_SHIPPING': bool``
- ``'isLowest_RATING': bool``
- ``'isLowest_REFURBISHED': bool``
- ``'isLowest_REFURBISHED_SHIPPING': bool``
- ``'isLowest_RENT': bool``
- ``'isLowest_SALES': bool``
- ``'isLowest_TRADE_IN': bool``
- ``'isLowest_USED': bool``
- ``'isLowest_USED_ACCEPTABLE_SHIPPING': bool``
- ``'isLowest_USED_GOOD_SHIPPING': bool``
- ``'isLowest_USED_NEW_SHIPPING': bool``
- ``'isLowest_USED_VERY_GOOD_SHIPPING': bool``
- ``'isLowest_WAREHOUSE': bool``
- ``'isPrimeExclusive': bool``
- ``'isSNS': bool``
- ``'label': str``
- ``'languages': str``
- ``'lastOffersUpdate_lte': int``
- ``'lastOffersUpdate_gte': int``
- ``'lastPriceChange_lte': int``
- ``'lastPriceChange_gte': int``
- ``'lastRatingUpdate_lte': int``
- ``'lastRatingUpdate_gte': int``
- ``'lastUpdate_lte': int``
- ``'lastUpdate_gte': int``
- ``'lightningEnd_lte': int``
- ``'lightningEnd_gte': int``
- ``'lightningStart_lte': int``
- ``'lightningStart_gte': int``
- ``'listedSince_lte': int``
- ``'listedSince_gte': int``
- ``'manufacturer': str``
- ``'model': str``
- ``'newPriceIsMAP': bool``
- ``'nextUpdate_lte': int``
- ``'nextUpdate_gte': int``
- ``'numberOfItems_lte': int``
- ``'numberOfItems_gte': int``
- ``'numberOfPages_lte': int``
- ``'numberOfPages_gte': int``
- ``'numberOfTrackings_lte': int``
- ``'numberOfTrackings_gte': int``
- ``'offerCountFBA_lte': int``
- ``'offerCountFBA_gte': int``
- ``'offerCountFBM_lte': int``
- ``'offerCountFBM_gte': int``
- ``'outOfStockPercentageInInterval_lte': int``
- ``'outOfStockPercentageInInterval_gte': int``
- ``'packageDimension_lte': int``
- ``'packageDimension_gte': int``
- ``'packageHeight_lte': int``
- ``'packageHeight_gte': int``
- ``'packageLength_lte': int``
- ``'packageLength_gte': int``
- ``'packageQuantity_lte': int``
- ``'packageQuantity_gte': int``
- ``'packageWeight_lte': int``
- ``'packageWeight_gte': int``
- ``'packageWidth_lte': int``
- ``'packageWidth_gte': int``
- ``'partNumber': str``
- ``'platform': str``
- ``'productGroup': str``
- ``'productType': int``
- ``'promotions': int``
- ``'publicationDate_lte': int``
- ``'publicationDate_gte': int``
- ``'publisher': str``
- ``'releaseDate_lte': int``
- ``'releaseDate_gte': int``
- ``'rootCategory': int``
- ``'sellerIds': str``
- ``'sellerIdsLowestFBA': str``
- ``'sellerIdsLowestFBM': str``
- ``'size': str``
- ``'salesRankDrops180_lte': int``
- ``'salesRankDrops180_gte': int``
- ``'salesRankDrops90_lte': int``
- ``'salesRankDrops90_gte': int``
- ``'salesRankDrops30_lte': int``
- ``'salesRankDrops30_gte': int``
- ``'stockAmazon_lte': int``
- ``'stockAmazon_gte': int``
- ``'stockBuyBox_lte': int``
- ``'stockBuyBox_gte': int``
- ``'studio': str``
- ``'title': str``
- ``'title_flag': str``
- ``'trackingSince_lte': int``
- ``'trackingSince_gte': int``
- ``'type': str``
- ``'mpn': str``
- ``'outOfStockPercentage90_lte': int``
- ``'outOfStockPercentage90_gte': int``
- ``'categories_include': int``
- ``'categories_exclude': int``
domain : str, optional
One of the following Amazon domains: RESERVED, US, GB, DE,
FR, JP, CA, CN, IT, ES, IN, MX Defaults to US.
wait : bool, optional
Wait available token before doing effective query, Defaults to ``True``.
Examples
--------
Query for all of Jim Butcher's books
>>> import keepa
>>> api = keepa.AsyncKeepa('ENTER_ACTUAL_KEY_HERE')
>>> product_parms = {'author': 'jim butcher'}
>>> products = api.product_finder(product_parms)
"""
# verify valid keys
for key in product_parms:
if key not in PRODUCT_REQUEST_KEYS:
raise RuntimeError('Invalid key "%s"' % key)
# verify json type
key_type = PRODUCT_REQUEST_KEYS[key]
product_parms[key] = key_type(product_parms[key])
payload = {'key': self.accesskey,
'domain': DCODES.index(domain),
'selection': json.dumps(product_parms)}
response = self._request('query', payload, wait=wait)
return response['asinList']
def deals(self, deal_parms, domain='US', wait=True):
"""Query the Keepa API for product deals.
You can find products that recently changed and match your
search criteria. A single request will return a maximum of
150 deals. Try out the deals page to first get accustomed to
the options:
https://keepa.com/#!deals
For more details please visit:
https://keepa.com/#!discuss/t/browsing-deals/338
Parameters
----------
deal_parms : dict
Dictionary containing one or more of the following keys:
- ``"page"``: int
- ``"domainId"``: int
- ``"excludeCategories"``: list
- ``"includeCategories"``: list
- ``"priceTypes"``: list
- ``"deltaRange"``: list
- ``"deltaPercentRange"``: list
- ``"deltaLastRange"``: list
- ``"salesRankRange"``: list
- ``"currentRange"``: list
- ``"minRating"``: int
- ``"isLowest"``: bool
- ``"isLowestOffer"``: bool
- ``"isOutOfStock"``: bool
- ``"titleSearch"``: String
- ``"isRangeEnabled"``: bool
- ``"isFilterEnabled"``: bool
- ``"hasReviews"``: bool
- ``"filterErotic"``: bool
- ``"sortType"``: int
- ``"dateRange"``: int
domain : str, optional
One of the following Amazon domains: RESERVED, US, GB, DE,
FR, JP, CA, CN, IT, ES, IN, MX Defaults to US.
wait : bool, optional
Wait available token before doing effective query, Defaults to ``True``.
Examples
--------
>>> import keepa
>>> api = keepa.AsyncKeepa('ENTER_YOUR_KEY_HERE')
>>> deal_parms = {"page": 0,
"domainId": 1,
"excludeCategories": [1064954, 11091801],
"includeCategories": [16310101]}
>>> deals = api.deals(deal_parms)
>>> print(deals[:5])
['B00U20FN1Y', 'B078HR932T', 'B00L88ERK2',
'B07G5TDMZ7', 'B00GYMQAM0']
"""
# verify valid keys
for key in deal_parms:
if key not in DEAL_REQUEST_KEYS:
raise RuntimeError('Invalid key "%s"' % key)
# verify json type
key_type = DEAL_REQUEST_KEYS[key]
deal_parms[key] = key_type(deal_parms[key])
payload = {'key': self.accesskey,
'domain': DCODES.index(domain),
'selection': json.dumps(deal_parms)}
response = self._request('query', payload, wait=wait)
return response['asinList']
def _request(self, request_type, payload, wait=True, raw_response=False):
"""Queries keepa api server.
Parses raw response from keepa into a json format. Handles
errors and waits for available tokens if allowed.
"""
if wait:
self.wait_for_tokens()
while True:
raw = requests.get(f'https://api.keepa.com/{request_type}/?', payload,
timeout=self._timeout)
status_code = str(raw.status_code)
if status_code != '200':
if status_code in SCODES:
if status_code == '429' and wait:
print('Response from server: %s' % SCODES[status_code])
self.wait_for_tokens()
continue
else:
raise Exception(SCODES[status_code])
else:
raise Exception('REQUEST_FAILED')
break
response = raw.json()
if 'tokensConsumed' in response:
log.debug('%d tokens consumed', response['tokensConsumed'])
if 'error' in response:
if response['error']:
raise Exception(response['error']['message'])
# always update tokens
self.tokens_left = response['tokensLeft']
if raw_response:
return raw
return response
class AsyncKeepa():
"""Class to support an asynchronous Python interface to keepa server.
Initializes API with access key. Access key can be obtained by
signing up for a reoccurring or one time plan at:
https://keepa.com/#!api
Parameters
----------
accesskey : str
64 character access key string.
timeout : float, optional
Default timeout when issuing any request. This is not a time
limit on the entire response download; rather, an exception is
raised if the server has not issued a response for timeout
seconds. Setting this to 0 disables the timeout, but will
cause any request to hang indefiantly should keepa.com be down
Examples
--------
Create the api object
>>> import keepa
>>> mykey = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
>>> api = await keepa.AsyncKeepa.create(mykey)
Request data from two ASINs
>>> products = await api.query(['0439064872', '1426208081'])
Print item details
>>> print('Item 1')
>>> print('\t ASIN: {:s}'.format(products[0]['asin']))
>>> print('\t Title: {:s}'.format(products[0]['title']))
Print item price
>>> usedprice = products[0]['data']['MarketplaceUsed']
>>> usedtimes = products[0]['data']['MarketplaceUsed_time']
>>> print('\t Used price: ${:.2f}'.format(usedprice[-1]))
>>> print('\t as of: {:s}'.format(str(usedtimes[-1])))
"""
@classmethod
async def create(cls, accesskey, timeout=10):
self = AsyncKeepa()
self.accesskey = accesskey
self.status = None
self.tokens_left = 0
self._timeout = timeout
# Store user's available tokens
log.info('Connecting to keepa using key ending in %s', accesskey[-6:])
await self.update_status()
log.info('%d tokens remain', self.tokens_left)
return self
@property
def time_to_refill(self):
""" Returns the time to refill in seconds """
# Get current timestamp in milliseconds from UNIX epoch
now = int(time.time() * 1000)
timeatrefile = self.status['timestamp'] + self.status['refillIn']
# wait plus one second fudge factor
timetorefil = timeatrefile - now + 1000
if timetorefil < 0:
timetorefil = 0
# Account for negative tokens left
if self.tokens_left < 0:
timetorefil += (abs(self.tokens_left) / self.status['refillRate']) * 60000
# Return value in seconds
return timetorefil / 1000.0
async def update_status(self):
""" Updates available tokens """
self.status = await self._request('token', {'key': self.accesskey}, wait=False)
async def wait_for_tokens(self):
"""Checks any remaining tokens and waits if none are available. """
await self.update_status()
# Wait if no tokens available
if self.tokens_left <= 0:
tdelay = self.time_to_refill
log.warning('Waiting %.0f seconds for additional tokens' % tdelay)
await asyncio.sleep(tdelay)
await self.update_status()
@is_documented_by(Keepa.query)
async def query(self, items, stats=None, domain='US', history=True,
offers=None, update=None, to_datetime=True,
rating=False, out_of_stock_as_nan=True, stock=False,
product_code_is_asin=True, progress_bar=True, buybox=False,
wait=True, days=None, only_live_offers=None, raw=False):
if raw:
raise ValueError('Raw response is only available in the non-async class')
# Format items into numpy array
try:
items = format_items(items)
except BaseException:
raise Exception('Invalid product codes input')
assert len(items), 'No valid product codes'
nitems = len(items)
if nitems == 1:
log.debug('Executing single product query')
else:
log.debug('Executing %d item product query', nitems)
# check offer input
if offers:
if not isinstance(offers, int):
raise TypeError('Parameter "offers" must be an interger')
if offers > 100 or offers < 20:
raise ValueError('Parameter "offers" must be between 20 and 100')
# Report time to completion
tcomplete = float(nitems - self.tokens_left) / self.status['refillRate'] - (
60000 - self.status['refillIn']) / 60000.0
if tcomplete < 0.0:
tcomplete = 0.5
log.debug('Estimated time to complete %d request(s) is %.2f minutes',
nitems, tcomplete)
log.debug('\twith a refill rate of %d token(s) per minute',
self.status['refillRate'])
# product list
products = []
pbar = None
if progress_bar:
pbar = tqdm(total=nitems)
# Number of requests is dependent on the number of items and
# request limit. Use available tokens first
idx = 0 # or number complete
while idx < nitems:
nrequest = nitems - idx
# cap request
if nrequest > REQUEST_LIMIT:
nrequest = REQUEST_LIMIT
# request from keepa and increment current position
item_request = items[idx:idx + nrequest]
response = await self._product_query(
item_request,
product_code_is_asin,
stats=stats,
domain=domain, stock=stock,
offers=offers, update=update,
history=history, rating=rating,
to_datetime=to_datetime,
out_of_stock_as_nan=out_of_stock_as_nan,
buybox=buybox,
wait=wait,
days=days,
only_live_offers=only_live_offers,
)
idx += nrequest
products.extend(response['products'])
if pbar is not None:
pbar.update(nrequest)
return products
@is_documented_by(Keepa._product_query)
async def _product_query(self, items, product_code_is_asin=True, **kwargs):
# ASINs convert to comma joined string
assert len(items) <= 100
if product_code_is_asin:
kwargs['asin'] = ','.join(items)
else:
kwargs['code'] = ','.join(items)
kwargs['key'] = self.accesskey
kwargs['domain'] = DCODES.index(kwargs['domain'])
# Convert bool values to 0 and 1.
kwargs['stock'] = int(kwargs['stock'])
kwargs['history'] = int(kwargs['history'])
kwargs['rating'] = int(kwargs['rating'])
kwargs['buybox'] = int(kwargs['buybox'])
if kwargs['update'] is None:
del kwargs['update']
else:
kwargs['update'] = int(kwargs['update'])
if kwargs['offers'] is None:
del kwargs['offers']
else:
kwargs['offers'] = int(kwargs['offers'])
if kwargs['only_live_offers'] is None:
del kwargs['only_live_offers']
else:
kwargs['only-live-offers'] = int(kwargs.pop('only_live_offers'))
# Keepa's param actually doesn't use snake_case.
# I believe using snake case throughout the Keepa interface is better.
if kwargs['days'] is None:
del kwargs['days']
else:
assert kwargs['days'] > 0
if kwargs['stats'] is None:
del kwargs['stats']
out_of_stock_as_nan = kwargs.pop('out_of_stock_as_nan', True)
to_datetime = kwargs.pop('to_datetime', True)
# Query and replace csv with parsed data if history enabled
wait = kwargs.get("wait")
kwargs.pop("wait", None)
response = await self._request('product', kwargs, wait=wait)
if kwargs['history']:
for product in response['products']:
if product['csv']: # if data exists
product['data'] = parse_csv(product['csv'],
to_datetime,
out_of_stock_as_nan)
if kwargs.get('stats', None):
for product in response['products']:
stats = product.get('stats', None)
if stats:
product['stats_parsed'] = _parse_stats(stats, to_datetime)
return response
@is_documented_by(Keepa.best_sellers_query)
async def best_sellers_query(self, category, rank_avg_range=0,
domain='US', wait=True):
assert domain in DCODES, 'Invalid domain code'
payload = {'key': self.accesskey,
'domain': DCODES.index(domain),
'category': category,
'range': rank_avg_range}
response = await self._request('bestsellers', payload, wait=wait)
if 'bestSellersList' in response:
return response['bestSellersList']['asinList']
else: # pragma: no cover
log.info('Best sellers search results not yet available')
@is_documented_by(Keepa.search_for_categories)
async def search_for_categories(self, searchterm, domain='US', wait=True):
assert domain in DCODES, 'Invalid domain code'
payload = {'key': self.accesskey,
'domain': DCODES.index(domain),
'type': 'category',
'term': searchterm}
response = await self._request('search', payload, wait=wait)
if response['categories'] == {}: # pragma no cover
raise Exception('Categories search results not yet available ' +
'or no search terms found.')
else:
return response['categories']
@is_documented_by(Keepa.category_lookup)
async def category_lookup(self, category_id, domain='US',
include_parents=0, wait=True):
assert domain in DCODES, 'Invalid domain code'
payload = {'key': self.accesskey,
'domain': DCODES.index(domain),
'category': category_id,
'parents': include_parents}
response = await self._request('category', payload, wait=wait)
if response['categories'] == {}: # pragma no cover
raise Exception('Category lookup results not yet available or no' +
'match found.')
else:
return response['categories']
@is_documented_by(Keepa.seller_query)
async def seller_query(self, seller_id, domain='US', to_datetime=True,
storefront=False, update=None, wait=True):
if isinstance(seller_id, list):
if len(seller_id) > 100:
err_str = 'seller_id can contain at maximum 100 sellers'
raise RuntimeError(err_str)
seller = ','.join(seller_id)
else:
seller = seller_id
payload = {'key': self.accesskey,
'domain': DCODES.index(domain),
'seller': seller}
if storefront:
payload["storefront"] = int(storefront)
if update:
payload["update"] = update
response = await self._request('seller', payload, wait=wait)
return _parse_seller(response['sellers'], to_datetime)
@is_documented_by(Keepa.product_finder)
async def product_finder(self, product_parms, domain='US', wait=True):
# verify valid keys
for key in product_parms:
if key not in PRODUCT_REQUEST_KEYS:
raise RuntimeError('Invalid key "%s"' % key)
# verify json type
key_type = PRODUCT_REQUEST_KEYS[key]
product_parms[key] = key_type(product_parms[key])
payload = {'key': self.accesskey,
'domain': DCODES.index(domain),
'selection': json.dumps(product_parms)}
response = await self._request('query', payload, wait=wait)
return response['asinList']
@is_documented_by(Keepa.deals)
async def deals(self, deal_parms, domain='US', wait=True):
# verify valid keys
for key in deal_parms:
if key not in DEAL_REQUEST_KEYS:
raise RuntimeError('Invalid key "%s"' % key)
# verify json type
key_type = DEAL_REQUEST_KEYS[key]
deal_parms[key] = key_type(deal_parms[key])
payload = {'key': self.accesskey,
'domain': DCODES.index(domain),
'selection': json.dumps(deal_parms)}
response = await self._request('query', payload, wait=wait)
return response['asinList']
async def _request(self, request_type, payload, wait=True):
"""Queries keepa api server. Parses raw response from keepa
into a json format. Handles errors and waits for available
tokens if allowed.
"""
while True:
async with aiohttp.ClientSession() as session:
async with session.get(
f'https://api.keepa.com/{request_type}/?', params=payload,
timeout=self._timeout
) as raw:
status_code = str(raw.status)
if status_code != '200':
if status_code in SCODES:
if status_code == '429' and wait:
await self.wait_for_tokens()
continue
else:
raise Exception(SCODES[status_code])
else:
raise Exception('REQUEST_FAILED')
response = await raw.json()
if 'error' in response:
if response['error']:
raise Exception(response['error']['message'])
# always update tokens
self.tokens_left = response['tokensLeft']
return response
break
def convert_offer_history(csv, to_datetime=True):
"""Converts an offer history to human readable values.
Parameters
----------
csv : list
Offer list csv obtained from ``['offerCSV']``
to_datetime : bool, optional
Modifies ``numpy`` minutes to ``datetime.datetime`` values.
Default ``True``.
Returns
-------
times : numpy.ndarray
List of time values for an offer history.
prices : numpy.ndarray
Price (including shipping) of an offer for each time at an
index of times.
"""
# convert these values to numpy arrays
times = csv[::3]
values = np.array(csv[1::3])
values += np.array(csv[2::3]) # add in shipping
# convert to dollars and datetimes
times = keepa_minutes_to_time(times, to_datetime)
prices = values/100.0
return times, prices
def keepa_minutes_to_time(minutes, to_datetime=True):
"""Accepts an array or list of minutes and converts it to a numpy
datetime array. Assumes that keepa time is from keepa minutes
from ordinal.
"""
# Convert to timedelta64 and shift
dt = np.array(minutes, dtype='timedelta64[m]')
dt = dt + KEEPA_ST_ORDINAL # shift from ordinal
# Convert to datetime if requested
if to_datetime:
return dt.astype(datetime.datetime)
else:
return dt
def run_and_get(coro):
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
task = loop.create_task(coro)
loop.run_until_complete(task)
return task.result()
| 41.178585 | 115 | 0.562639 |
import requests
import asyncio
import datetime
import json
import logging
import time
from functools import wraps
import aiohttp
import numpy as np
import pandas as pd
from tqdm import tqdm
from keepa.query_keys import DEAL_REQUEST_KEYS, PRODUCT_REQUEST_KEYS
def is_documented_by(original):
def wrapper(target):
target.__doc__ = original.__doc__
return target
return wrapper
log = logging.getLogger(__name__)
log.setLevel('DEBUG')
KEEPA_ST_ORDINAL = np.datetime64('2011-01-01')
REQUEST_LIMIT = 100
SCODES = {'400': 'REQUEST_REJECTED',
'402': 'PAYMENT_REQUIRED',
'405': 'METHOD_NOT_ALLOWED',
'429': 'NOT_ENOUGH_TOKEN'}
DCODES = ['RESERVED', 'US', 'GB', 'DE', 'FR', 'JP', 'CA', 'CN', 'IT', 'ES',
'IN', 'MX']
csv_indices = [[0, 'AMAZON', True],
[1, 'NEW', True],
[2, 'USED', True],
[3, 'SALES', False],
[4, 'LISTPRICE', True],
[5, 'COLLECTIBLE', True],
[6, 'REFURBISHED', True],
[7, 'NEW_FBM_SHIPPING', True],
[8, 'LIGHTNING_DEAL', True],
[9, 'WAREHOUSE', True],
[10, 'NEW_FBA', True],
[11, 'COUNT_NEW', False],
[12, 'COUNT_USED', False],
[13, 'COUNT_REFURBISHED', False],
[14, 'CollectableOffers', False],
[15, 'EXTRA_INFO_UPDATES', False],
[16, 'RATING', True],
[17, 'COUNT_REVIEWS', False],
[18, 'BUY_BOX_SHIPPING', True],
[19, 'USED_NEW_SHIPPING', True],
[20, 'USED_VERY_GOOD_SHIPPING', True],
[21, 'USED_GOOD_SHIPPING', True],
[22, 'USED_ACCEPTABLE_SHIPPING', True],
[23, 'COLLECTIBLE_NEW_SHIPPING', True],
[24, 'COLLECTIBLE_VERY_GOOD_SHIPPING', True],
[25, 'COLLECTIBLE_GOOD_SHIPPING', True],
[26, 'COLLECTIBLE_ACCEPTABLE_SHIPPING', True],
[27, 'REFURBISHED_SHIPPING', True],
[28, 'EBAY_NEW_SHIPPING', True],
[29, 'EBAY_USED_SHIPPING', True],
[30, 'TRADE_IN', True],
[31, 'RENT', False]]
def _parse_stats(stats, to_datetime):
stats_keys_parse_not_required = {
'buyBoxSellerId',
'sellerIdsLowestFBA',
'sellerIdsLowestFBM',
'buyBoxShippingCountry',
'buyBoxAvailabilityMessage',
}
stats_parsed = {}
for stat_key, stat_value in stats.items():
if stat_key in stats_keys_parse_not_required:
stat_value = None
elif isinstance(stat_value, int) and stat_value < 0:
stat_value = None
if stat_value is not None:
if stat_key == 'lastOffersUpdate':
stats_parsed[stat_key] = keepa_minutes_to_time([stat_value], to_datetime)[0]
elif isinstance(stat_value, list) and len(stat_value) > 0:
stat_value_dict = {}
convert_time_in_value_pair = any(map(lambda v: v is not None and isinstance(v, list), stat_value))
for ind, key, isfloat in csv_indices:
stat_value_item = stat_value[ind] if ind < len(stat_value) else None
def normalize_value(v):
if v < 0:
return None
if isfloat:
v = float(v) / 100
if key == 'RATING':
v = v * 10
return v
if stat_value_item is not None:
if convert_time_in_value_pair:
stat_value_time, stat_value_item = stat_value_item
stat_value_item = normalize_value(stat_value_item)
if stat_value_item is not None:
stat_value_time = keepa_minutes_to_time([stat_value_time], to_datetime)[0]
stat_value_item = (stat_value_time, stat_value_item)
else:
stat_value_item = normalize_value(stat_value_item)
if stat_value_item is not None:
stat_value_dict[key] = stat_value_item
if len(stat_value_dict) > 0:
stats_parsed[stat_key] = stat_value_dict
else:
stats_parsed[stat_key] = stat_value
return stats_parsed
_seller_time_data_keys = ['trackedSince', 'lastUpdate']
def _parse_seller(seller_raw_response, to_datetime):
sellers = list(seller_raw_response.values())
for seller in sellers:
def convert_time_data(key):
date_val = seller.get(key, None)
if date_val is not None:
return (key, keepa_minutes_to_time([date_val], to_datetime)[0])
else:
return None
seller.update(filter(lambda p: p is not None, map(convert_time_data, _seller_time_data_keys)))
return dict(map(lambda seller: (seller['sellerId'], seller), sellers))
def parse_csv(csv, to_datetime=True, out_of_stock_as_nan=True):
product_data = {}
for ind, key, isfloat in csv_indices:
if csv[ind]: # Check if entry it exists
if 'SHIPPING' in key: # shipping price is included
# Data goes [time0, value0, shipping0, time1, value1,
# shipping1, ...]
times = csv[ind][::3]
values = np.array(csv[ind][1::3])
values += np.array(csv[ind][2::3])
else:
# Data goes [time0, value0, time1, value1, ...]
times = csv[ind][::2]
values = np.array(csv[ind][1::2])
# Convert to float price if applicable
if isfloat:
nan_mask = values < 0
values = values.astype(np.float)/100
if out_of_stock_as_nan:
values[nan_mask] = np.nan
if key == 'RATING':
values *= 10
timeval = keepa_minutes_to_time(times, to_datetime)
product_data['%s_time' % key] = timeval
product_data[key] = values
# combine time and value into a data frame using time as index
product_data['df_%s' % key] = pd.DataFrame({'value': values}, index=timeval)
return product_data
def format_items(items):
if isinstance(items, list) or isinstance(items, np.ndarray):
return np.unique(items)
elif isinstance(items, str):
return np.asarray([items])
class Keepa():
def __init__(self, accesskey, timeout=10):
self.accesskey = accesskey
self.status = None
self.tokens_left = 0
self._timeout = timeout
# Store user's available tokens
log.info('Connecting to keepa using key ending in %s', accesskey[-6:])
self.update_status()
log.info('%d tokens remain', self.tokens_left)
@property
def time_to_refill(self):
now = int(time.time() * 1000)
timeatrefile = self.status['timestamp'] + self.status['refillIn']
timetorefil = timeatrefile - now + 1000
if timetorefil < 0:
timetorefil = 0
if self.tokens_left < 0:
timetorefil += (abs(self.tokens_left) / self.status['refillRate']) * 60000
return timetorefil / 1000.0
def update_status(self):
self.status = self._request('token', {'key': self.accesskey}, wait=False)
def wait_for_tokens(self):
self.update_status()
if self.tokens_left <= 0:
tdelay = self.time_to_refill
log.warning('Waiting %.0f seconds for additional tokens' % tdelay)
time.sleep(tdelay)
self.update_status()
def query(self, items, stats=None, domain='US', history=True,
offers=None, update=None, to_datetime=True,
rating=False, out_of_stock_as_nan=True, stock=False,
product_code_is_asin=True, progress_bar=True, buybox=False,
wait=True, days=None, only_live_offers=None, raw=False):
try:
items = format_items(items)
except BaseException:
raise Exception('Invalid product codes input')
assert len(items), 'No valid product codes'
nitems = len(items)
if nitems == 1:
log.debug('Executing single product query')
else:
log.debug('Executing %d item product query', nitems)
if offers:
if not isinstance(offers, int):
raise TypeError('Parameter "offers" must be an interger')
if offers > 100 or offers < 20:
raise ValueError('Parameter "offers" must be between 20 and 100')
tcomplete = float(nitems - self.tokens_left) / self.status['refillRate'] - (
60000 - self.status['refillIn']) / 60000.0
if tcomplete < 0.0:
tcomplete = 0.5
log.debug('Estimated time to complete %d request(s) is %.2f minutes',
nitems, tcomplete)
log.debug('\twith a refill rate of %d token(s) per minute',
self.status['refillRate'])
products = []
pbar = None
if progress_bar:
pbar = tqdm(total=nitems)
idx = 0
while idx < nitems:
nrequest = nitems - idx
if nrequest > REQUEST_LIMIT:
nrequest = REQUEST_LIMIT
item_request = items[idx:idx + nrequest]
response = self._product_query(
item_request,
product_code_is_asin,
stats=stats,
domain=domain, stock=stock,
offers=offers, update=update,
history=history, rating=rating,
to_datetime=to_datetime,
out_of_stock_as_nan=out_of_stock_as_nan,
buybox=buybox,
wait=wait,
days=days,
only_live_offers=only_live_offers,
raw=raw,
)
idx += nrequest
if raw:
products.append(response)
else:
products.extend(response['products'])
if pbar is not None:
pbar.update(nrequest)
return products
def _product_query(self, items, product_code_is_asin=True, **kwargs):
assert len(items) <= 100
if product_code_is_asin:
kwargs['asin'] = ','.join(items)
else:
kwargs['code'] = ','.join(items)
kwargs['key'] = self.accesskey
kwargs['domain'] = DCODES.index(kwargs['domain'])
kwargs['stock'] = int(kwargs['stock'])
kwargs['history'] = int(kwargs['history'])
kwargs['rating'] = int(kwargs['rating'])
kwargs['buybox'] = int(kwargs['buybox'])
if kwargs['update'] is None:
del kwargs['update']
else:
kwargs['update'] = int(kwargs['update'])
if kwargs['offers'] is None:
del kwargs['offers']
else:
kwargs['offers'] = int(kwargs['offers'])
if kwargs['only_live_offers'] is None:
del kwargs['only_live_offers']
else:
kwargs['only-live-offers'] = int(kwargs.pop('only_live_offers'))
if kwargs['days'] is None:
del kwargs['days']
else:
assert kwargs['days'] > 0
if kwargs['stats'] is None:
del kwargs['stats']
out_of_stock_as_nan = kwargs.pop('out_of_stock_as_nan', True)
to_datetime = kwargs.pop('to_datetime', True)
wait = kwargs.get("wait")
kwargs.pop("wait", None)
raw_response = kwargs.pop('raw', False)
response = self._request('product', kwargs, wait=wait,
raw_response=raw_response)
if kwargs['history'] and not raw_response:
for product in response['products']:
if product['csv']:
product['data'] = parse_csv(product['csv'],
to_datetime,
out_of_stock_as_nan)
if kwargs.get('stats', None) and not raw_response:
for product in response['products']:
stats = product.get('stats', None)
if stats:
product['stats_parsed'] = _parse_stats(stats, to_datetime)
return response
def best_sellers_query(self, category, rank_avg_range=0, domain='US', wait=True):
assert domain in DCODES, 'Invalid domain code'
payload = {'key': self.accesskey,
'domain': DCODES.index(domain),
'category': category,
'range': rank_avg_range}
response = self._request('bestsellers', payload, wait=wait)
if 'bestSellersList' in response:
return response['bestSellersList']['asinList']
else:
log.info('Best sellers search results not yet available')
def search_for_categories(self, searchterm, domain='US', wait=True):
assert domain in DCODES, 'Invalid domain code'
payload = {'key': self.accesskey,
'domain': DCODES.index(domain),
'type': 'category',
'term': searchterm}
response = self._request('search', payload, wait=wait)
if response['categories'] == {}:
raise Exception('Categories search results not yet available ' +
'or no search terms found.')
else:
return response['categories']
def category_lookup(self, category_id, domain='US',
include_parents=0, wait=True):
assert domain in DCODES, 'Invalid domain code'
payload = {'key': self.accesskey,
'domain': DCODES.index(domain),
'category': category_id,
'parents': include_parents}
response = self._request('category', payload, wait=wait)
if response['categories'] == {}:
raise Exception('Category lookup results not yet available or no' +
'match found.')
else:
return response['categories']
def seller_query(self, seller_id, domain='US', to_datetime=True,
storefront=False, update=None, wait=True):
if isinstance(seller_id, list):
if len(seller_id) > 100:
err_str = 'seller_id can contain at maximum 100 sellers'
raise RuntimeError(err_str)
seller = ','.join(seller_id)
else:
seller = seller_id
payload = {'key': self.accesskey,
'domain': DCODES.index(domain),
'seller': seller}
if storefront:
payload["storefront"] = int(storefront)
if update:
payload["update"] = update
response = self._request('seller', payload, wait=wait)
return _parse_seller(response['sellers'], to_datetime)
def product_finder(self, product_parms, domain='US', wait=True):
for key in product_parms:
if key not in PRODUCT_REQUEST_KEYS:
raise RuntimeError('Invalid key "%s"' % key)
key_type = PRODUCT_REQUEST_KEYS[key]
product_parms[key] = key_type(product_parms[key])
payload = {'key': self.accesskey,
'domain': DCODES.index(domain),
'selection': json.dumps(product_parms)}
response = self._request('query', payload, wait=wait)
return response['asinList']
def deals(self, deal_parms, domain='US', wait=True):
for key in deal_parms:
if key not in DEAL_REQUEST_KEYS:
raise RuntimeError('Invalid key "%s"' % key)
key_type = DEAL_REQUEST_KEYS[key]
deal_parms[key] = key_type(deal_parms[key])
payload = {'key': self.accesskey,
'domain': DCODES.index(domain),
'selection': json.dumps(deal_parms)}
response = self._request('query', payload, wait=wait)
return response['asinList']
def _request(self, request_type, payload, wait=True, raw_response=False):
if wait:
self.wait_for_tokens()
while True:
raw = requests.get(f'https://api.keepa.com/{request_type}/?', payload,
timeout=self._timeout)
status_code = str(raw.status_code)
if status_code != '200':
if status_code in SCODES:
if status_code == '429' and wait:
print('Response from server: %s' % SCODES[status_code])
self.wait_for_tokens()
continue
else:
raise Exception(SCODES[status_code])
else:
raise Exception('REQUEST_FAILED')
break
response = raw.json()
if 'tokensConsumed' in response:
log.debug('%d tokens consumed', response['tokensConsumed'])
if 'error' in response:
if response['error']:
raise Exception(response['error']['message'])
self.tokens_left = response['tokensLeft']
if raw_response:
return raw
return response
class AsyncKeepa():
@classmethod
async def create(cls, accesskey, timeout=10):
self = AsyncKeepa()
self.accesskey = accesskey
self.status = None
self.tokens_left = 0
self._timeout = timeout
log.info('Connecting to keepa using key ending in %s', accesskey[-6:])
await self.update_status()
log.info('%d tokens remain', self.tokens_left)
return self
@property
def time_to_refill(self):
# Get current timestamp in milliseconds from UNIX epoch
now = int(time.time() * 1000)
timeatrefile = self.status['timestamp'] + self.status['refillIn']
# wait plus one second fudge factor
timetorefil = timeatrefile - now + 1000
if timetorefil < 0:
timetorefil = 0
# Account for negative tokens left
if self.tokens_left < 0:
timetorefil += (abs(self.tokens_left) / self.status['refillRate']) * 60000
# Return value in seconds
return timetorefil / 1000.0
async def update_status(self):
self.status = await self._request('token', {'key': self.accesskey}, wait=False)
async def wait_for_tokens(self):
await self.update_status()
# Wait if no tokens available
if self.tokens_left <= 0:
tdelay = self.time_to_refill
log.warning('Waiting %.0f seconds for additional tokens' % tdelay)
await asyncio.sleep(tdelay)
await self.update_status()
@is_documented_by(Keepa.query)
async def query(self, items, stats=None, domain='US', history=True,
offers=None, update=None, to_datetime=True,
rating=False, out_of_stock_as_nan=True, stock=False,
product_code_is_asin=True, progress_bar=True, buybox=False,
wait=True, days=None, only_live_offers=None, raw=False):
if raw:
raise ValueError('Raw response is only available in the non-async class')
# Format items into numpy array
try:
items = format_items(items)
except BaseException:
raise Exception('Invalid product codes input')
assert len(items), 'No valid product codes'
nitems = len(items)
if nitems == 1:
log.debug('Executing single product query')
else:
log.debug('Executing %d item product query', nitems)
# check offer input
if offers:
if not isinstance(offers, int):
raise TypeError('Parameter "offers" must be an interger')
if offers > 100 or offers < 20:
raise ValueError('Parameter "offers" must be between 20 and 100')
# Report time to completion
tcomplete = float(nitems - self.tokens_left) / self.status['refillRate'] - (
60000 - self.status['refillIn']) / 60000.0
if tcomplete < 0.0:
tcomplete = 0.5
log.debug('Estimated time to complete %d request(s) is %.2f minutes',
nitems, tcomplete)
log.debug('\twith a refill rate of %d token(s) per minute',
self.status['refillRate'])
# product list
products = []
pbar = None
if progress_bar:
pbar = tqdm(total=nitems)
# Number of requests is dependent on the number of items and
# request limit. Use available tokens first
idx = 0 # or number complete
while idx < nitems:
nrequest = nitems - idx
# cap request
if nrequest > REQUEST_LIMIT:
nrequest = REQUEST_LIMIT
# request from keepa and increment current position
item_request = items[idx:idx + nrequest]
response = await self._product_query(
item_request,
product_code_is_asin,
stats=stats,
domain=domain, stock=stock,
offers=offers, update=update,
history=history, rating=rating,
to_datetime=to_datetime,
out_of_stock_as_nan=out_of_stock_as_nan,
buybox=buybox,
wait=wait,
days=days,
only_live_offers=only_live_offers,
)
idx += nrequest
products.extend(response['products'])
if pbar is not None:
pbar.update(nrequest)
return products
@is_documented_by(Keepa._product_query)
async def _product_query(self, items, product_code_is_asin=True, **kwargs):
# ASINs convert to comma joined string
assert len(items) <= 100
if product_code_is_asin:
kwargs['asin'] = ','.join(items)
else:
kwargs['code'] = ','.join(items)
kwargs['key'] = self.accesskey
kwargs['domain'] = DCODES.index(kwargs['domain'])
# Convert bool values to 0 and 1.
kwargs['stock'] = int(kwargs['stock'])
kwargs['history'] = int(kwargs['history'])
kwargs['rating'] = int(kwargs['rating'])
kwargs['buybox'] = int(kwargs['buybox'])
if kwargs['update'] is None:
del kwargs['update']
else:
kwargs['update'] = int(kwargs['update'])
if kwargs['offers'] is None:
del kwargs['offers']
else:
kwargs['offers'] = int(kwargs['offers'])
if kwargs['only_live_offers'] is None:
del kwargs['only_live_offers']
else:
kwargs['only-live-offers'] = int(kwargs.pop('only_live_offers'))
# Keepa's param actually doesn't use snake_case.
# I believe using snake case throughout the Keepa interface is better.
if kwargs['days'] is None:
del kwargs['days']
else:
assert kwargs['days'] > 0
if kwargs['stats'] is None:
del kwargs['stats']
out_of_stock_as_nan = kwargs.pop('out_of_stock_as_nan', True)
to_datetime = kwargs.pop('to_datetime', True)
# Query and replace csv with parsed data if history enabled
wait = kwargs.get("wait")
kwargs.pop("wait", None)
response = await self._request('product', kwargs, wait=wait)
if kwargs['history']:
for product in response['products']:
if product['csv']: # if data exists
product['data'] = parse_csv(product['csv'],
to_datetime,
out_of_stock_as_nan)
if kwargs.get('stats', None):
for product in response['products']:
stats = product.get('stats', None)
if stats:
product['stats_parsed'] = _parse_stats(stats, to_datetime)
return response
@is_documented_by(Keepa.best_sellers_query)
async def best_sellers_query(self, category, rank_avg_range=0,
domain='US', wait=True):
assert domain in DCODES, 'Invalid domain code'
payload = {'key': self.accesskey,
'domain': DCODES.index(domain),
'category': category,
'range': rank_avg_range}
response = await self._request('bestsellers', payload, wait=wait)
if 'bestSellersList' in response:
return response['bestSellersList']['asinList']
else: # pragma: no cover
log.info('Best sellers search results not yet available')
@is_documented_by(Keepa.search_for_categories)
async def search_for_categories(self, searchterm, domain='US', wait=True):
assert domain in DCODES, 'Invalid domain code'
payload = {'key': self.accesskey,
'domain': DCODES.index(domain),
'type': 'category',
'term': searchterm}
response = await self._request('search', payload, wait=wait)
if response['categories'] == {}: # pragma no cover
raise Exception('Categories search results not yet available ' +
'or no search terms found.')
else:
return response['categories']
@is_documented_by(Keepa.category_lookup)
async def category_lookup(self, category_id, domain='US',
include_parents=0, wait=True):
assert domain in DCODES, 'Invalid domain code'
payload = {'key': self.accesskey,
'domain': DCODES.index(domain),
'category': category_id,
'parents': include_parents}
response = await self._request('category', payload, wait=wait)
if response['categories'] == {}: # pragma no cover
raise Exception('Category lookup results not yet available or no' +
'match found.')
else:
return response['categories']
@is_documented_by(Keepa.seller_query)
async def seller_query(self, seller_id, domain='US', to_datetime=True,
storefront=False, update=None, wait=True):
if isinstance(seller_id, list):
if len(seller_id) > 100:
err_str = 'seller_id can contain at maximum 100 sellers'
raise RuntimeError(err_str)
seller = ','.join(seller_id)
else:
seller = seller_id
payload = {'key': self.accesskey,
'domain': DCODES.index(domain),
'seller': seller}
if storefront:
payload["storefront"] = int(storefront)
if update:
payload["update"] = update
response = await self._request('seller', payload, wait=wait)
return _parse_seller(response['sellers'], to_datetime)
@is_documented_by(Keepa.product_finder)
async def product_finder(self, product_parms, domain='US', wait=True):
# verify valid keys
for key in product_parms:
if key not in PRODUCT_REQUEST_KEYS:
raise RuntimeError('Invalid key "%s"' % key)
# verify json type
key_type = PRODUCT_REQUEST_KEYS[key]
product_parms[key] = key_type(product_parms[key])
payload = {'key': self.accesskey,
'domain': DCODES.index(domain),
'selection': json.dumps(product_parms)}
response = await self._request('query', payload, wait=wait)
return response['asinList']
@is_documented_by(Keepa.deals)
async def deals(self, deal_parms, domain='US', wait=True):
# verify valid keys
for key in deal_parms:
if key not in DEAL_REQUEST_KEYS:
raise RuntimeError('Invalid key "%s"' % key)
# verify json type
key_type = DEAL_REQUEST_KEYS[key]
deal_parms[key] = key_type(deal_parms[key])
payload = {'key': self.accesskey,
'domain': DCODES.index(domain),
'selection': json.dumps(deal_parms)}
response = await self._request('query', payload, wait=wait)
return response['asinList']
async def _request(self, request_type, payload, wait=True):
while True:
async with aiohttp.ClientSession() as session:
async with session.get(
f'https://api.keepa.com/{request_type}/?', params=payload,
timeout=self._timeout
) as raw:
status_code = str(raw.status)
if status_code != '200':
if status_code in SCODES:
if status_code == '429' and wait:
await self.wait_for_tokens()
continue
else:
raise Exception(SCODES[status_code])
else:
raise Exception('REQUEST_FAILED')
response = await raw.json()
if 'error' in response:
if response['error']:
raise Exception(response['error']['message'])
# always update tokens
self.tokens_left = response['tokensLeft']
return response
break
def convert_offer_history(csv, to_datetime=True):
# convert these values to numpy arrays
times = csv[::3]
values = np.array(csv[1::3])
values += np.array(csv[2::3]) # add in shipping
# convert to dollars and datetimes
times = keepa_minutes_to_time(times, to_datetime)
prices = values/100.0
return times, prices
def keepa_minutes_to_time(minutes, to_datetime=True):
# Convert to timedelta64 and shift
dt = np.array(minutes, dtype='timedelta64[m]')
dt = dt + KEEPA_ST_ORDINAL # shift from ordinal
# Convert to datetime if requested
if to_datetime:
return dt.astype(datetime.datetime)
else:
return dt
def run_and_get(coro):
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
task = loop.create_task(coro)
loop.run_until_complete(task)
return task.result()
| true | true |
f7330032a0c00b8ed90a653c8a08895ffa52cf4e | 5,999 | py | Python | mpc/kmpc_casadi/utility/casadi-example_pack-v3.4.4/python/vdp_collocation2.py | se-hwan/MIT_Driverless | 7674b29887ba518c134cfba805432f9c98f92270 | [
"MIT"
] | 2 | 2021-03-22T08:50:29.000Z | 2021-08-18T03:04:18.000Z | flip-optimization-master/casadi/ex/casadi-example_pack-v3.4.0/python/vdp_collocation2.py | WatsonZhouAnda/Cheetah-Software | 05e416fb26f968300826f0deb0953be9afb22bfe | [
"MIT"
] | null | null | null | flip-optimization-master/casadi/ex/casadi-example_pack-v3.4.0/python/vdp_collocation2.py | WatsonZhouAnda/Cheetah-Software | 05e416fb26f968300826f0deb0953be9afb22bfe | [
"MIT"
] | 2 | 2022-01-14T04:28:41.000Z | 2022-01-14T05:29:01.000Z | #
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010-2014 Joel Andersson, Joris Gillis, Moritz Diehl,
# K.U. Leuven. All rights reserved.
# Copyright (C) 2011-2014 Greg Horn
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# -*- coding: utf-8 -*-
from casadi import *
from casadi.tools import *
import numpy as NP
import matplotlib.pyplot as plt
nk = 20 # Control discretization
tf = 10.0 # End time
# Declare variables (use scalar graph)
t = SX.sym("t") # time
u = SX.sym("u") # control
states = struct_symSX([
entry('x',shape=2), # vdp oscillator states
entry('L') # helper state: Langrange integrand
])
# Create a structure for the right hand side
rhs = struct_SX(states)
x = states['x']
rhs["x"] = vertcat((1 - x[1]*x[1])*x[0] - x[1] + u, x[0])
rhs["L"] = x[0]*x[0] + x[1]*x[1] + u*u
# ODE right hand side function
f = Function('f', [t,states,u],[rhs])
# Objective function (meyer term)
m = Function('m', [t,states,u],[states["L"]])
# Control bounds
u_min = -0.75
u_max = 1.0
u_init = 0.0
u_lb = NP.array([u_min])
u_ub = NP.array([u_max])
u_init = NP.array([u_init])
# State bounds and initial guess
x_min = [-inf, -inf, -inf]
x_max = [ inf, inf, inf]
xi_min = [ 0.0, 1.0, 0.0]
xi_max = [ 0.0, 1.0, 0.0]
xf_min = [ 0.0, 0.0, -inf]
xf_max = [ 0.0, 0.0, inf]
x_init = [ 0.0, 0.0, 0.0]
# Dimensions
nx = 3
nu = 1
# Choose collocation points
tau_root = [0] + collocation_points(3,"radau")
# Degree of interpolating polynomial
d = len(tau_root)-1
# Size of the finite elements
h = tf/nk
# Coefficients of the collocation equation
C = NP.zeros((d+1,d+1))
# Coefficients of the continuity equation
D = NP.zeros(d+1)
# Dimensionless time inside one control interval
tau = SX.sym("tau")
# All collocation time points
T = NP.zeros((nk,d+1))
for k in range(nk):
for j in range(d+1):
T[k,j] = h*(k + tau_root[j])
# For all collocation points
for j in range(d+1):
# Construct Lagrange polynomials to get the polynomial basis at the collocation point
L = 1
for r in range(d+1):
if r != j:
L *= (tau-tau_root[r])/(tau_root[j]-tau_root[r])
# Evaluate the polynomial at the final time to get the coefficients of the continuity equation
lfcn = Function('lfcn', [tau],[L])
D[j] = lfcn(1.0)
# Evaluate the time derivative of the polynomial at all collocation points to get the coefficients of the continuity equation
tfcn = Function('tfcn', [tau],[tangent(L,tau)])
for r in range(d+1):
C[j,r] = tfcn(tau_root[r])
# Structure holding NLP variables
V = struct_symMX([
(
entry("X",repeat=[nk+1,d+1],struct=states),
entry("U",repeat=[nk],shape=nu)
)
])
vars_lb = V()
vars_ub = V()
vars_init = V()
# Set states and its bounds
vars_init["X",:,:] = repeated(repeated(x_init))
vars_lb["X",:,:] = repeated(repeated(x_min))
vars_ub["X",:,:] = repeated(repeated(x_max))
# Set controls and its bounds
vars_init["U",:] = repeated(u_init)
vars_lb["U",:] = repeated(u_min)
vars_ub["U",:] = repeated(u_max)
# State at initial time
vars_lb["X",0,0] = xi_min
vars_ub["X",0,0] = xi_max
# State at end time
vars_lb["X",-1,0] = xf_min
vars_ub["X",-1,0] = xf_max
# Constraint function for the NLP
g = []
lbg = []
ubg = []
# For all finite elements
for k in range(nk):
# For all collocation points
for j in range(1,d+1):
# Get an expression for the state derivative at the collocation point
xp_jk = 0
for r in range (d+1):
xp_jk += C[r,j]*V["X",k,r]
# Add collocation equations to the NLP
fk = f(T[k][j], V["X",k,j], V["U",k])
g.append(h*fk - xp_jk)
lbg.append(NP.zeros(nx)) # equality constraints
ubg.append(NP.zeros(nx)) # equality constraints
# Get an expression for the state at the end of the finite element
xf_k = 0
for r in range(d+1):
xf_k += D[r]*V["X",k,r]
# Add continuity equation to NLP
g.append(V["X",k+1,0] - xf_k)
lbg.append(NP.zeros(nx))
ubg.append(NP.zeros(nx))
# Concatenate constraints
g = vertcat(*g)
# Objective function
f = m(T[nk-1][d],V["X",nk,0],V["U",nk-1])
# NLP
nlp = {'x':V, 'f':f, 'g':g}
## ----
## SOLVE THE NLP
## ----
# Set options
opts = {}
opts["expand"] = True
#opts["ipopt.max_iter"] = 4
opts["ipopt.linear_solver"] = 'ma27'
# Allocate an NLP solver
solver = nlpsol("solver", "ipopt", nlp, opts)
arg = {}
# Initial condition
arg["x0"] = vars_init
# Bounds on x
arg["lbx"] = vars_lb
arg["ubx"] = vars_ub
# Bounds on g
arg["lbg"] = NP.concatenate(lbg)
arg["ubg"] = NP.concatenate(ubg)
# Solve the problem
res = solver(**arg)
# Print the optimal cost
print("optimal cost: ", float(res["f"]))
# Retrieve the solution
opt = V(res["x"])
# Get values at the beginning of each finite element
x0_opt = opt["X",:,0,"x",0]
x1_opt = opt["X",:,0,"x",1]
x2_opt = opt["X",:,0,"L"]
u_opt = opt["U",:,0]
tgrid = NP.linspace(0,tf,nk+1)
tgrid_u = NP.linspace(0,tf,nk)
# Plot the results
plt.figure(1)
plt.clf()
plt.plot(tgrid,x0_opt,'--')
plt.plot(tgrid,x1_opt,'-.')
plt.step(tgrid_u,u_opt,'-')
plt.title("Van der Pol optimization")
plt.xlabel('time')
plt.legend(['x[0] trajectory','x[1] trajectory','u trajectory'])
plt.grid()
plt.show()
| 24.586066 | 127 | 0.635939 |
from casadi import *
from casadi.tools import *
import numpy as NP
import matplotlib.pyplot as plt
nk = 20
tf = 10.0
t = SX.sym("t")
u = SX.sym("u")
states = struct_symSX([
entry('x',shape=2),
entry('L')
])
rhs = struct_SX(states)
x = states['x']
rhs["x"] = vertcat((1 - x[1]*x[1])*x[0] - x[1] + u, x[0])
rhs["L"] = x[0]*x[0] + x[1]*x[1] + u*u
f = Function('f', [t,states,u],[rhs])
m = Function('m', [t,states,u],[states["L"]])
u_min = -0.75
u_max = 1.0
u_init = 0.0
u_lb = NP.array([u_min])
u_ub = NP.array([u_max])
u_init = NP.array([u_init])
x_min = [-inf, -inf, -inf]
x_max = [ inf, inf, inf]
xi_min = [ 0.0, 1.0, 0.0]
xi_max = [ 0.0, 1.0, 0.0]
xf_min = [ 0.0, 0.0, -inf]
xf_max = [ 0.0, 0.0, inf]
x_init = [ 0.0, 0.0, 0.0]
nx = 3
nu = 1
tau_root = [0] + collocation_points(3,"radau")
d = len(tau_root)-1
h = tf/nk
C = NP.zeros((d+1,d+1))
D = NP.zeros(d+1)
tau = SX.sym("tau")
T = NP.zeros((nk,d+1))
for k in range(nk):
for j in range(d+1):
T[k,j] = h*(k + tau_root[j])
for j in range(d+1):
L = 1
for r in range(d+1):
if r != j:
L *= (tau-tau_root[r])/(tau_root[j]-tau_root[r])
lfcn = Function('lfcn', [tau],[L])
D[j] = lfcn(1.0)
tfcn = Function('tfcn', [tau],[tangent(L,tau)])
for r in range(d+1):
C[j,r] = tfcn(tau_root[r])
V = struct_symMX([
(
entry("X",repeat=[nk+1,d+1],struct=states),
entry("U",repeat=[nk],shape=nu)
)
])
vars_lb = V()
vars_ub = V()
vars_init = V()
vars_init["X",:,:] = repeated(repeated(x_init))
vars_lb["X",:,:] = repeated(repeated(x_min))
vars_ub["X",:,:] = repeated(repeated(x_max))
vars_init["U",:] = repeated(u_init)
vars_lb["U",:] = repeated(u_min)
vars_ub["U",:] = repeated(u_max)
vars_lb["X",0,0] = xi_min
vars_ub["X",0,0] = xi_max
vars_lb["X",-1,0] = xf_min
vars_ub["X",-1,0] = xf_max
g = []
lbg = []
ubg = []
for k in range(nk):
for j in range(1,d+1):
xp_jk = 0
for r in range (d+1):
xp_jk += C[r,j]*V["X",k,r]
fk = f(T[k][j], V["X",k,j], V["U",k])
g.append(h*fk - xp_jk)
lbg.append(NP.zeros(nx))
ubg.append(NP.zeros(nx))
xf_k = 0
for r in range(d+1):
xf_k += D[r]*V["X",k,r]
g.append(V["X",k+1,0] - xf_k)
lbg.append(NP.zeros(nx))
ubg.append(NP.zeros(nx))
g = vertcat(*g)
f = m(T[nk-1][d],V["X",nk,0],V["U",nk-1])
nlp = {'x':V, 'f':f, 'g':g}
"] = True
opts["ipopt.linear_solver"] = 'ma27'
solver = nlpsol("solver", "ipopt", nlp, opts)
arg = {}
arg["x0"] = vars_init
arg["lbx"] = vars_lb
arg["ubx"] = vars_ub
arg["lbg"] = NP.concatenate(lbg)
arg["ubg"] = NP.concatenate(ubg)
res = solver(**arg)
print("optimal cost: ", float(res["f"]))
opt = V(res["x"])
x0_opt = opt["X",:,0,"x",0]
x1_opt = opt["X",:,0,"x",1]
x2_opt = opt["X",:,0,"L"]
u_opt = opt["U",:,0]
tgrid = NP.linspace(0,tf,nk+1)
tgrid_u = NP.linspace(0,tf,nk)
plt.figure(1)
plt.clf()
plt.plot(tgrid,x0_opt,'--')
plt.plot(tgrid,x1_opt,'-.')
plt.step(tgrid_u,u_opt,'-')
plt.title("Van der Pol optimization")
plt.xlabel('time')
plt.legend(['x[0] trajectory','x[1] trajectory','u trajectory'])
plt.grid()
plt.show()
| true | true |
f73300828bf5ac52017e45fc1c5e8a5c2c0df975 | 3,509 | py | Python | bj-web/format-web-templates.py | pastly/blackjack | df59614cdde6a0151b2b3dffcca92705e5394a3d | [
"MIT"
] | 1 | 2021-07-26T20:44:22.000Z | 2021-07-26T20:44:22.000Z | bj-web/format-web-templates.py | pastly/blackjack-rs | df59614cdde6a0151b2b3dffcca92705e5394a3d | [
"MIT"
] | 4 | 2019-12-06T17:48:39.000Z | 2020-03-12T01:18:01.000Z | bj-web/format-web-templates.py | pastly/blackjack | df59614cdde6a0151b2b3dffcca92705e5394a3d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from glob import iglob
import os
import logging
from subprocess import run
BIN_EXTS = [
'.wasm',
'.png',
]
VERSION_STR = None
logging.basicConfig(level=logging.INFO)
def abspath(p):
op = os.path
return op.abspath(op.expanduser(op.expandvars(p)))
def find_files(dname):
for name in iglob(f'{dname}/**', recursive=True):
if os.path.isfile(name):
yield name
def replace_prefix(s, bad_prefix, new_prefix):
assert s.startswith(bad_prefix)
return new_prefix + s[len(bad_prefix):]
def is_binary_file(fname):
ext = os.path.splitext(fname)[1]
return ext in BIN_EXTS
def get_version_str():
global VERSION_STR
if VERSION_STR is not None:
return VERSION_STR
proc = run(
'git rev-parse --short HEAD'.split(),
text=True, capture_output=True)
commit = proc.stdout.strip()
proc = run(
'git show -s --format=%cd --date=format:%Y-%m-%d'.split() + [commit],
text=True, capture_output=True)
date = proc.stdout.strip()
VERSION_STR = f'{date} ({commit})'
return VERSION_STR
def get_google_shit():
return '''
<!-- Global site tag (gtag.js) - Google Analytics -->
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-160379782-1"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-160379782-1');
</script>
<script data-ad-client="ca-pub-3834375319956666" async src="https://pagead2.googlesyndication.com/pagead/js/adsbygoogle.js"></script>
'''
def get_nav_bar():
return '''
<nav>
<a href='index.html'><img alt="BJ logo" id=logo src="static/logo.png" /></a>
<ul>
<li><a href='index.html'>Game</a></li>
<li><a href='custom-card.html'>Customize</a></li>
</ul>
</nav>
'''
def main(args):
for in_fname in find_files(args.input):
out_fname = replace_prefix(in_fname, args.input, args.output)
dirname = os.path.dirname(out_fname)
logging.debug(f'Making sure {dirname} exists')
os.makedirs(dirname, exist_ok=True)
rmode = 'rb' if is_binary_file(in_fname) else 'rt'
wmode = 'wb' if is_binary_file(out_fname) else 'wt'
with open(in_fname, rmode) as ifd, open(out_fname, wmode) as ofd:
if is_binary_file(in_fname):
logging.info(f'Considering {in_fname} a binary file')
ofd.write(ifd.read())
continue
logging.info(f'Considering {in_fname} a text file')
s = ifd.read()
s = s.replace('<!-- BJ_TMPL_NAV_BAR -->', get_nav_bar())
s = s.replace('<!-- BJ_TMPL_VERSION -->', get_version_str())
s = s.replace('<!-- GOOGLE_SHIT -->', get_google_shit())
ofd.write(s)
if __name__ == '__main__':
p = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
p.add_argument(
'-i', '--input', type=str, default='www', help='Input directory')
p.add_argument(
'-o', '--output', type=str, default='www-out', help='Output directory')
args = p.parse_args()
args.input = abspath(args.input)
args.output = abspath(args.output)
assert os.path.isdir(args.input), f'{args.input} does not exist'
if os.path.isdir(args.output):
logging.warning(
f'{args.output} exists. Files inside will be overwritten')
exit(main(args))
| 30.513043 | 135 | 0.634369 |
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from glob import iglob
import os
import logging
from subprocess import run
BIN_EXTS = [
'.wasm',
'.png',
]
VERSION_STR = None
logging.basicConfig(level=logging.INFO)
def abspath(p):
op = os.path
return op.abspath(op.expanduser(op.expandvars(p)))
def find_files(dname):
for name in iglob(f'{dname}/**', recursive=True):
if os.path.isfile(name):
yield name
def replace_prefix(s, bad_prefix, new_prefix):
assert s.startswith(bad_prefix)
return new_prefix + s[len(bad_prefix):]
def is_binary_file(fname):
ext = os.path.splitext(fname)[1]
return ext in BIN_EXTS
def get_version_str():
global VERSION_STR
if VERSION_STR is not None:
return VERSION_STR
proc = run(
'git rev-parse --short HEAD'.split(),
text=True, capture_output=True)
commit = proc.stdout.strip()
proc = run(
'git show -s --format=%cd --date=format:%Y-%m-%d'.split() + [commit],
text=True, capture_output=True)
date = proc.stdout.strip()
VERSION_STR = f'{date} ({commit})'
return VERSION_STR
def get_google_shit():
return '''
<!-- Global site tag (gtag.js) - Google Analytics -->
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-160379782-1"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-160379782-1');
</script>
<script data-ad-client="ca-pub-3834375319956666" async src="https://pagead2.googlesyndication.com/pagead/js/adsbygoogle.js"></script>
'''
def get_nav_bar():
return '''
<nav>
<a href='index.html'><img alt="BJ logo" id=logo src="static/logo.png" /></a>
<ul>
<li><a href='index.html'>Game</a></li>
<li><a href='custom-card.html'>Customize</a></li>
</ul>
</nav>
'''
def main(args):
for in_fname in find_files(args.input):
out_fname = replace_prefix(in_fname, args.input, args.output)
dirname = os.path.dirname(out_fname)
logging.debug(f'Making sure {dirname} exists')
os.makedirs(dirname, exist_ok=True)
rmode = 'rb' if is_binary_file(in_fname) else 'rt'
wmode = 'wb' if is_binary_file(out_fname) else 'wt'
with open(in_fname, rmode) as ifd, open(out_fname, wmode) as ofd:
if is_binary_file(in_fname):
logging.info(f'Considering {in_fname} a binary file')
ofd.write(ifd.read())
continue
logging.info(f'Considering {in_fname} a text file')
s = ifd.read()
s = s.replace('<!-- BJ_TMPL_NAV_BAR -->', get_nav_bar())
s = s.replace('<!-- BJ_TMPL_VERSION -->', get_version_str())
s = s.replace('<!-- GOOGLE_SHIT -->', get_google_shit())
ofd.write(s)
if __name__ == '__main__':
p = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
p.add_argument(
'-i', '--input', type=str, default='www', help='Input directory')
p.add_argument(
'-o', '--output', type=str, default='www-out', help='Output directory')
args = p.parse_args()
args.input = abspath(args.input)
args.output = abspath(args.output)
assert os.path.isdir(args.input), f'{args.input} does not exist'
if os.path.isdir(args.output):
logging.warning(
f'{args.output} exists. Files inside will be overwritten')
exit(main(args))
| true | true |
f733010399f5ba012473da30aa9be866dc57e355 | 7,556 | py | Python | grplot/features/plot/treemaps.py | ghiffaryr/grplot | 43ea08febac4ffecbce0a6a3d679850f5013aa28 | [
"BSD-3-Clause"
] | null | null | null | grplot/features/plot/treemaps.py | ghiffaryr/grplot | 43ea08febac4ffecbce0a6a3d679850f5013aa28 | [
"BSD-3-Clause"
] | null | null | null | grplot/features/plot/treemaps.py | ghiffaryr/grplot | 43ea08febac4ffecbce0a6a3d679850f5013aa28 | [
"BSD-3-Clause"
] | null | null | null | # Squarified Treemap Layout
# Implements algorithm from Bruls, Huizing, van Wijk, "Squarified Treemaps" and Laserson with some modifications
# (but not using their pseudocode)
# INTERNAL FUNCTIONS not meant to be used by the user
def pad_rectangle(rect):
if rect["dx"] > 2:
rect["x"] += 1
rect["dx"] -= 2
if rect["dy"] > 2:
rect["y"] += 1
rect["dy"] -= 2
def layoutrow(sizes, x, y, dx, dy):
# generate rects for each size in sizes
# dx >= dy
# they will fill up height dy, and width will be determined by their area
# sizes should be pre-normalized wrt dx * dy (i.e., they should be same units)
covered_area = sum(sizes)
width = covered_area / dy
rects = []
for size in sizes:
rects.append({"x": x, "y": y, "dx": width, "dy": size / width})
y += size / width
return rects
def layoutcol(sizes, x, y, dx, dy):
# generate rects for each size in sizes
# dx < dy
# they will fill up width dx, and height will be determined by their area
# sizes should be pre-normalized wrt dx * dy (i.e., they should be same units)
covered_area = sum(sizes)
height = covered_area / dx
rects = []
for size in sizes:
rects.append({"x": x, "y": y, "dx": size / height, "dy": height})
x += size / height
return rects
def layout(sizes, x, y, dx, dy):
return (
layoutrow(sizes, x, y, dx, dy) if dx >= dy else layoutcol(sizes, x, y, dx, dy)
)
def leftoverrow(sizes, x, y, dx, dy):
# compute remaining area when dx >= dy
covered_area = sum(sizes)
width = covered_area / dy
leftover_x = x + width
leftover_y = y
leftover_dx = dx - width
leftover_dy = dy
return (leftover_x, leftover_y, leftover_dx, leftover_dy)
def leftovercol(sizes, x, y, dx, dy):
# compute remaining area when dx >= dy
covered_area = sum(sizes)
height = covered_area / dx
leftover_x = x
leftover_y = y + height
leftover_dx = dx
leftover_dy = dy - height
return (leftover_x, leftover_y, leftover_dx, leftover_dy)
def leftover(sizes, x, y, dx, dy):
return (
leftoverrow(sizes, x, y, dx, dy)
if dx >= dy
else leftovercol(sizes, x, y, dx, dy)
)
def worst_ratio(sizes, x, y, dx, dy):
return max(
[
max(rect["dx"] / rect["dy"], rect["dy"] / rect["dx"])
for rect in layout(sizes, x, y, dx, dy)
]
)
# PUBLIC API
def squarify(sizes, x, y, dx, dy):
"""Compute treemap rectangles.
Given a set of values, computes a treemap layout in the specified geometry
using an algorithm based on Bruls, Huizing, van Wijk, "Squarified Treemaps".
See README for example usage.
Parameters
----------
sizes : list-like of numeric values
The set of values to compute a treemap for. `sizes` must be positive
values sorted in descending order and they should be normalized to the
total area (i.e., `dx * dy == sum(sizes)`)
x, y : numeric
The coordinates of the "origin".
dx, dy : numeric
The full width (`dx`) and height (`dy`) of the treemap.
Returns
-------
list[dict]
Each dict in the returned list represents a single rectangle in the
treemap. The order corresponds to the input order.
"""
sizes = list(map(float, sizes))
if len(sizes) == 0:
return []
if len(sizes) == 1:
return layout(sizes, x, y, dx, dy)
# figure out where 'split' should be
i = 1
while i < len(sizes) and worst_ratio(sizes[:i], x, y, dx, dy) >= worst_ratio(
sizes[: (i + 1)], x, y, dx, dy
):
i += 1
current = sizes[:i]
remaining = sizes[i:]
(leftover_x, leftover_y, leftover_dx, leftover_dy) = leftover(current, x, y, dx, dy)
return layout(current, x, y, dx, dy) + squarify(
remaining, leftover_x, leftover_y, leftover_dx, leftover_dy
)
def padded_squarify(sizes, x, y, dx, dy):
"""Compute padded treemap rectangles.
See `squarify` docstring for details. The only difference is that the
returned rectangles have been "padded" to allow for a visible border.
"""
rects = squarify(sizes, x, y, dx, dy)
for rect in rects:
pad_rectangle(rect)
return rects
def normalize_sizes(sizes, dx, dy):
"""Normalize list of values.
Normalizes a list of numeric values so that `sum(sizes) == dx * dy`.
Parameters
----------
sizes : list-like of numeric values
Input list of numeric values to normalize.
dx, dy : numeric
The dimensions of the full rectangle to normalize total values to.
Returns
-------
list[numeric]
The normalized values.
"""
total_size = sum(sizes)
total_area = dx * dy
sizes = map(float, sizes)
sizes = map(lambda size: size * total_area / total_size, sizes)
return list(sizes)
def plot(
sizes,
norm_x=100,
norm_y=100,
color=None,
label=None,
value=None,
ax=None,
pad=False,
bar_kwargs=None,
text_kwargs=None,
**kwargs
):
"""Plotting with Matplotlib.
Parameters
----------
sizes
input for squarify
norm_x, norm_y
x and y values for normalization
color
color string or list-like (see Matplotlib documentation for details)
label
list-like used as label text
value
list-like used as value text (in most cases identical with sizes argument)
ax
Matplotlib Axes instance
pad
draw rectangles with a small gap between them
bar_kwargs : dict
keyword arguments passed to matplotlib.Axes.bar
text_kwargs : dict
keyword arguments passed to matplotlib.Axes.text
**kwargs
Any additional kwargs are merged into `bar_kwargs`. Explicitly provided
kwargs here will take precedence.
Returns
-------
matplotlib.axes.Axes
Matplotlib Axes
"""
import matplotlib.pyplot as plt
from math import ceil
if ax is None:
ax = plt.gca()
if color is None:
color_cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
if len(sizes) > len(color_cycle):
color_cycle = color_cycle * ceil(len(color_cycle)/len(sizes))
color = color_cycle[:len(sizes)]
if bar_kwargs is None:
bar_kwargs = {}
if text_kwargs is None:
text_kwargs = {}
if len(kwargs) > 0:
bar_kwargs.update(kwargs)
normed = normalize_sizes(sizes, norm_x, norm_y)
if pad:
rects = padded_squarify(normed, 0, 0, norm_x, norm_y)
else:
rects = squarify(normed, 0, 0, norm_x, norm_y)
x = [rect["x"] for rect in rects]
y = [rect["y"] for rect in rects]
dx = [rect["dx"] for rect in rects]
dy = [rect["dy"] for rect in rects]
ax.bar(
x, dy, width=dx, bottom=y, color=color, label=label, align="edge", **bar_kwargs
)
if value is not None:
va = "center" if label is None else "top"
for v, r in zip(value, rects):
x, y, dx, dy = r["x"], r["y"], r["dx"], r["dy"]
ax.text(x + dx / 2, y + dy / 2, v, va=va, ha="center", **text_kwargs)
if label is not None:
va = "center" if value is None else "bottom"
for l, r in zip(label, rects):
x, y, dx, dy = r["x"], r["y"], r["dx"], r["dy"]
ax.text(x + dx / 2, y + dy / 2, l, va=va, ha="center", **text_kwargs)
ax.set_xlim(0, norm_x)
ax.set_ylim(0, norm_y)
ax.axis('off')
return ax | 28.513208 | 112 | 0.598332 |
def pad_rectangle(rect):
if rect["dx"] > 2:
rect["x"] += 1
rect["dx"] -= 2
if rect["dy"] > 2:
rect["y"] += 1
rect["dy"] -= 2
def layoutrow(sizes, x, y, dx, dy):
covered_area = sum(sizes)
width = covered_area / dy
rects = []
for size in sizes:
rects.append({"x": x, "y": y, "dx": width, "dy": size / width})
y += size / width
return rects
def layoutcol(sizes, x, y, dx, dy):
covered_area = sum(sizes)
height = covered_area / dx
rects = []
for size in sizes:
rects.append({"x": x, "y": y, "dx": size / height, "dy": height})
x += size / height
return rects
def layout(sizes, x, y, dx, dy):
return (
layoutrow(sizes, x, y, dx, dy) if dx >= dy else layoutcol(sizes, x, y, dx, dy)
)
def leftoverrow(sizes, x, y, dx, dy):
covered_area = sum(sizes)
width = covered_area / dy
leftover_x = x + width
leftover_y = y
leftover_dx = dx - width
leftover_dy = dy
return (leftover_x, leftover_y, leftover_dx, leftover_dy)
def leftovercol(sizes, x, y, dx, dy):
covered_area = sum(sizes)
height = covered_area / dx
leftover_x = x
leftover_y = y + height
leftover_dx = dx
leftover_dy = dy - height
return (leftover_x, leftover_y, leftover_dx, leftover_dy)
def leftover(sizes, x, y, dx, dy):
return (
leftoverrow(sizes, x, y, dx, dy)
if dx >= dy
else leftovercol(sizes, x, y, dx, dy)
)
def worst_ratio(sizes, x, y, dx, dy):
return max(
[
max(rect["dx"] / rect["dy"], rect["dy"] / rect["dx"])
for rect in layout(sizes, x, y, dx, dy)
]
)
def squarify(sizes, x, y, dx, dy):
sizes = list(map(float, sizes))
if len(sizes) == 0:
return []
if len(sizes) == 1:
return layout(sizes, x, y, dx, dy)
i = 1
while i < len(sizes) and worst_ratio(sizes[:i], x, y, dx, dy) >= worst_ratio(
sizes[: (i + 1)], x, y, dx, dy
):
i += 1
current = sizes[:i]
remaining = sizes[i:]
(leftover_x, leftover_y, leftover_dx, leftover_dy) = leftover(current, x, y, dx, dy)
return layout(current, x, y, dx, dy) + squarify(
remaining, leftover_x, leftover_y, leftover_dx, leftover_dy
)
def padded_squarify(sizes, x, y, dx, dy):
rects = squarify(sizes, x, y, dx, dy)
for rect in rects:
pad_rectangle(rect)
return rects
def normalize_sizes(sizes, dx, dy):
total_size = sum(sizes)
total_area = dx * dy
sizes = map(float, sizes)
sizes = map(lambda size: size * total_area / total_size, sizes)
return list(sizes)
def plot(
sizes,
norm_x=100,
norm_y=100,
color=None,
label=None,
value=None,
ax=None,
pad=False,
bar_kwargs=None,
text_kwargs=None,
**kwargs
):
import matplotlib.pyplot as plt
from math import ceil
if ax is None:
ax = plt.gca()
if color is None:
color_cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
if len(sizes) > len(color_cycle):
color_cycle = color_cycle * ceil(len(color_cycle)/len(sizes))
color = color_cycle[:len(sizes)]
if bar_kwargs is None:
bar_kwargs = {}
if text_kwargs is None:
text_kwargs = {}
if len(kwargs) > 0:
bar_kwargs.update(kwargs)
normed = normalize_sizes(sizes, norm_x, norm_y)
if pad:
rects = padded_squarify(normed, 0, 0, norm_x, norm_y)
else:
rects = squarify(normed, 0, 0, norm_x, norm_y)
x = [rect["x"] for rect in rects]
y = [rect["y"] for rect in rects]
dx = [rect["dx"] for rect in rects]
dy = [rect["dy"] for rect in rects]
ax.bar(
x, dy, width=dx, bottom=y, color=color, label=label, align="edge", **bar_kwargs
)
if value is not None:
va = "center" if label is None else "top"
for v, r in zip(value, rects):
x, y, dx, dy = r["x"], r["y"], r["dx"], r["dy"]
ax.text(x + dx / 2, y + dy / 2, v, va=va, ha="center", **text_kwargs)
if label is not None:
va = "center" if value is None else "bottom"
for l, r in zip(label, rects):
x, y, dx, dy = r["x"], r["y"], r["dx"], r["dy"]
ax.text(x + dx / 2, y + dy / 2, l, va=va, ha="center", **text_kwargs)
ax.set_xlim(0, norm_x)
ax.set_ylim(0, norm_y)
ax.axis('off')
return ax | true | true |
f733020541f06e586fbfed8118f91663bf6bdd3c | 4,814 | py | Python | tests/unittest/test_sampler.py | bkktimber/gluon-nlp | 205acce13a83b30eabd7a638e4773e7a4f91059a | [
"Apache-2.0"
] | null | null | null | tests/unittest/test_sampler.py | bkktimber/gluon-nlp | 205acce13a83b30eabd7a638e4773e7a4f91059a | [
"Apache-2.0"
] | null | null | null | tests/unittest/test_sampler.py | bkktimber/gluon-nlp | 205acce13a83b30eabd7a638e4773e7a4f91059a | [
"Apache-2.0"
] | 1 | 2018-09-18T08:39:00.000Z | 2018-09-18T08:39:00.000Z | import pytest
import numpy as np
from mxnet.gluon import data
import gluonnlp as nlp
from gluonnlp.data import sampler as s
N = 1000
def test_sorted_sampler():
dataset = data.SimpleDataset([np.random.normal(0, 1, (np.random.randint(10, 100), 1, 1))
for _ in range(N)])
gt_sample_id = sorted(range(len(dataset)), key=lambda i: dataset[i].shape, reverse=True)
sample_ret = list(s.SortedSampler([ele.shape[0] for ele in dataset]))
for lhs, rhs in zip(gt_sample_id, sample_ret):
assert lhs == rhs
@pytest.mark.parametrize('seq_lengths', [[np.random.randint(10, 100) for _ in range(N)],
[(np.random.randint(10, 100), np.random.randint(10, 100))
for _ in range(N)]])
@pytest.mark.parametrize('ratio', [0.0, 0.5])
@pytest.mark.parametrize('shuffle', [False, True])
@pytest.mark.parametrize('num_buckets', [1, 10, 100, 5000])
@pytest.mark.parametrize('bucket_scheme', [s.ConstWidthBucket(),
s.LinearWidthBucket(),
s.ExpWidthBucket()])
@pytest.mark.parametrize('use_average_length', [False, True])
@pytest.mark.parametrize('num_shards', range(4))
def test_fixed_bucket_sampler(seq_lengths, ratio, shuffle, num_buckets, bucket_scheme,
use_average_length, num_shards):
sampler = s.FixedBucketSampler(seq_lengths,
batch_size=8,
num_buckets=num_buckets,
ratio=ratio, shuffle=shuffle,
use_average_length=use_average_length,
bucket_scheme=bucket_scheme,
num_shards=num_shards)
print(sampler.stats())
total_sampled_ids = []
for batch_sample_ids in sampler:
if num_shards > 0:
assert len(batch_sample_ids) == num_shards
else:
total_sampled_ids.extend(batch_sample_ids)
if num_shards == 0:
assert len(set(total_sampled_ids)) == len(total_sampled_ids) == N
@pytest.mark.parametrize('bucket_keys', [[1, 5, 10, 100], [10, 100], [200]])
@pytest.mark.parametrize('ratio', [0.0, 0.5])
@pytest.mark.parametrize('shuffle', [False, True])
def test_fixed_bucket_sampler_with_single_key(bucket_keys, ratio, shuffle):
seq_lengths = [np.random.randint(10, 100) for _ in range(N)]
sampler = s.FixedBucketSampler(seq_lengths, batch_size=8, num_buckets=None,
bucket_keys=bucket_keys, ratio=ratio, shuffle=shuffle)
print(sampler.stats())
total_sampled_ids = []
for batch_sample_ids in sampler:
total_sampled_ids.extend(batch_sample_ids)
assert len(set(total_sampled_ids)) == len(total_sampled_ids) == N
@pytest.mark.parametrize('bucket_keys', [[(1, 1), (5, 10), (10, 20), (20, 10), (100, 100)],
[(20, 20), (30, 15), (100, 100)],
[(100, 200)]])
@pytest.mark.parametrize('ratio', [0.0, 0.5])
@pytest.mark.parametrize('shuffle', [False, True])
def test_fixed_bucket_sampler_with_single_key(bucket_keys, ratio, shuffle):
seq_lengths = [(np.random.randint(10, 100), np.random.randint(10, 100)) for _ in range(N)]
sampler = s.FixedBucketSampler(seq_lengths, batch_size=8, num_buckets=None,
bucket_keys=bucket_keys, ratio=ratio, shuffle=shuffle)
print(sampler.stats())
total_sampled_ids = []
for batch_sample_ids in sampler:
total_sampled_ids.extend(batch_sample_ids)
assert len(set(total_sampled_ids)) == len(total_sampled_ids) == N
def test_fixed_bucket_sampler_compactness():
samples = list(
s.FixedBucketSampler(
np.arange(16, 32), 8, num_buckets=2,
bucket_scheme=nlp.data.ConstWidthBucket()))
assert len(samples) == 2
@pytest.mark.parametrize('seq_lengths', [[np.random.randint(10, 100) for _ in range(N)],
[(np.random.randint(10, 100), np.random.randint(10, 100))
for _ in range(N)]])
@pytest.mark.parametrize('mult', [10, 100])
@pytest.mark.parametrize('batch_size', [5, 7])
@pytest.mark.parametrize('shuffle', [False, True])
def test_sorted_bucket_sampler(seq_lengths, mult, batch_size, shuffle):
sampler = s.SortedBucketSampler(sort_keys=seq_lengths,
batch_size=batch_size,
mult=mult, shuffle=shuffle)
total_sampled_ids = []
for batch_sample_ids in sampler:
total_sampled_ids.extend(batch_sample_ids)
assert len(set(total_sampled_ids)) == len(total_sampled_ids) == N
| 49.122449 | 98 | 0.606356 | import pytest
import numpy as np
from mxnet.gluon import data
import gluonnlp as nlp
from gluonnlp.data import sampler as s
N = 1000
def test_sorted_sampler():
dataset = data.SimpleDataset([np.random.normal(0, 1, (np.random.randint(10, 100), 1, 1))
for _ in range(N)])
gt_sample_id = sorted(range(len(dataset)), key=lambda i: dataset[i].shape, reverse=True)
sample_ret = list(s.SortedSampler([ele.shape[0] for ele in dataset]))
for lhs, rhs in zip(gt_sample_id, sample_ret):
assert lhs == rhs
@pytest.mark.parametrize('seq_lengths', [[np.random.randint(10, 100) for _ in range(N)],
[(np.random.randint(10, 100), np.random.randint(10, 100))
for _ in range(N)]])
@pytest.mark.parametrize('ratio', [0.0, 0.5])
@pytest.mark.parametrize('shuffle', [False, True])
@pytest.mark.parametrize('num_buckets', [1, 10, 100, 5000])
@pytest.mark.parametrize('bucket_scheme', [s.ConstWidthBucket(),
s.LinearWidthBucket(),
s.ExpWidthBucket()])
@pytest.mark.parametrize('use_average_length', [False, True])
@pytest.mark.parametrize('num_shards', range(4))
def test_fixed_bucket_sampler(seq_lengths, ratio, shuffle, num_buckets, bucket_scheme,
use_average_length, num_shards):
sampler = s.FixedBucketSampler(seq_lengths,
batch_size=8,
num_buckets=num_buckets,
ratio=ratio, shuffle=shuffle,
use_average_length=use_average_length,
bucket_scheme=bucket_scheme,
num_shards=num_shards)
print(sampler.stats())
total_sampled_ids = []
for batch_sample_ids in sampler:
if num_shards > 0:
assert len(batch_sample_ids) == num_shards
else:
total_sampled_ids.extend(batch_sample_ids)
if num_shards == 0:
assert len(set(total_sampled_ids)) == len(total_sampled_ids) == N
@pytest.mark.parametrize('bucket_keys', [[1, 5, 10, 100], [10, 100], [200]])
@pytest.mark.parametrize('ratio', [0.0, 0.5])
@pytest.mark.parametrize('shuffle', [False, True])
def test_fixed_bucket_sampler_with_single_key(bucket_keys, ratio, shuffle):
seq_lengths = [np.random.randint(10, 100) for _ in range(N)]
sampler = s.FixedBucketSampler(seq_lengths, batch_size=8, num_buckets=None,
bucket_keys=bucket_keys, ratio=ratio, shuffle=shuffle)
print(sampler.stats())
total_sampled_ids = []
for batch_sample_ids in sampler:
total_sampled_ids.extend(batch_sample_ids)
assert len(set(total_sampled_ids)) == len(total_sampled_ids) == N
@pytest.mark.parametrize('bucket_keys', [[(1, 1), (5, 10), (10, 20), (20, 10), (100, 100)],
[(20, 20), (30, 15), (100, 100)],
[(100, 200)]])
@pytest.mark.parametrize('ratio', [0.0, 0.5])
@pytest.mark.parametrize('shuffle', [False, True])
def test_fixed_bucket_sampler_with_single_key(bucket_keys, ratio, shuffle):
seq_lengths = [(np.random.randint(10, 100), np.random.randint(10, 100)) for _ in range(N)]
sampler = s.FixedBucketSampler(seq_lengths, batch_size=8, num_buckets=None,
bucket_keys=bucket_keys, ratio=ratio, shuffle=shuffle)
print(sampler.stats())
total_sampled_ids = []
for batch_sample_ids in sampler:
total_sampled_ids.extend(batch_sample_ids)
assert len(set(total_sampled_ids)) == len(total_sampled_ids) == N
def test_fixed_bucket_sampler_compactness():
samples = list(
s.FixedBucketSampler(
np.arange(16, 32), 8, num_buckets=2,
bucket_scheme=nlp.data.ConstWidthBucket()))
assert len(samples) == 2
@pytest.mark.parametrize('seq_lengths', [[np.random.randint(10, 100) for _ in range(N)],
[(np.random.randint(10, 100), np.random.randint(10, 100))
for _ in range(N)]])
@pytest.mark.parametrize('mult', [10, 100])
@pytest.mark.parametrize('batch_size', [5, 7])
@pytest.mark.parametrize('shuffle', [False, True])
def test_sorted_bucket_sampler(seq_lengths, mult, batch_size, shuffle):
sampler = s.SortedBucketSampler(sort_keys=seq_lengths,
batch_size=batch_size,
mult=mult, shuffle=shuffle)
total_sampled_ids = []
for batch_sample_ids in sampler:
total_sampled_ids.extend(batch_sample_ids)
assert len(set(total_sampled_ids)) == len(total_sampled_ids) == N
| true | true |
f73302dea282cd2c8d0f519eeb2540fda43085a2 | 3,677 | py | Python | prssm/benchmarks/run_real_world_tasks/run_large_scale_experiment.py | boschresearch/PR-SSM | 87d24888373b43575924a4e117b496908830d1a9 | [
"MIT",
"BSD-3-Clause"
] | 38 | 2018-07-12T09:48:00.000Z | 2022-03-28T10:31:42.000Z | prssm/benchmarks/run_real_world_tasks/run_large_scale_experiment.py | andreasdoerr/PR-SSM | 87d24888373b43575924a4e117b496908830d1a9 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2019-11-13T11:48:12.000Z | 2019-12-15T12:50:57.000Z | prssm/benchmarks/run_real_world_tasks/run_large_scale_experiment.py | boschresearch/PR-SSM | 87d24888373b43575924a4e117b496908830d1a9 | [
"MIT",
"BSD-3-Clause"
] | 19 | 2018-09-19T15:26:44.000Z | 2021-12-02T11:31:45.000Z | # -*- coding: utf-8 -*-
"""
Copyright (c) 2018 Robert Bosch GmbH
All rights reserved.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
@author: Andreas Doerr
"""
import os
from prssm.benchmarks.run import run
from prssm.tasks.real_world_tasks import SarcosArm
from prssm.benchmarks.outputs import VisualOutput
from prssm.benchmarks.outputs import PrintRMSE
from prssm.benchmarks.outputs import SaveRMSE
from prssm.benchmarks.outputs import PrintLogLik
from prssm.benchmarks.outputs import SaveLogLik
from prssm.benchmarks.outputs import SaveStartScript
from prssm.benchmarks.outputs import SavePredictionResults
from prssm.models.prssm import PRSSM
from prssm.utils.utils import create_dated_directory
# Create a directory for experimental results
outpath = create_dated_directory('results/sarcos')
prjname = 'PRSSM'
# Configuration of PR-SSM model and inference
Dx = 14
Dy = 7
Du = 7
# System input: torques, System output: positions
# Select first 60 experiments for training, last 6 for testing
task_config = {
'input_ind': list(range(21, 28)),
'output_ind': list(range(0, 7)),
'downsample': 2,
'train_ind': list(range(0, 60)),
'test_ind': list(range(60, 66))
}
config_inducing_inputs = {
'method': 'uniform',
'low': -3,
'high': 3
}
config_inducing_outputs = {
'method': 'zero',
'noise': 0.1**2, #
'var': 0.01**2,
}
PRSSM_config = {
'x0_noise': 0.001**2, # noise variance on initial state
'variance': 0.5**2,
'lengthscales': [2]*(Dx + Du), # kernel lengthscales
'config_inducing_inputs': config_inducing_inputs,
'config_inducing_outputs': config_inducing_outputs,
'x0_init_method': 'output',
'Dx': Dx,
'N_inducing': 50,
'N_samples': 20,
'maxiter': 1000,
'learning_rate': 0.05,
'result_dir': outpath,
'normalize_data': True,
'N_batch': 5,
'H_batch': 100,
'output_epoch': 50,
'var_x_np': [0.002**2]*Dx,
'process_noise': False,
'var_y_np': [0.05**2]*Dy, # observation noise variance
'optimize_observation_noise': False,
'plot_output_ind': [0],
'plot_legends': False
}
# Configuration of large scale experiment
config = {
'methods': [PRSSM],
'method_configs': [PRSSM_config],
'tasks': [SarcosArm],
'task_configs': [task_config],
'repeats': 1,
'outpath': outpath,
'prjname': prjname,
'outputs': [VisualOutput(outpath, prjname),
PrintRMSE(outpath, prjname),
SaveRMSE(outpath, prjname),
PrintLogLik(outpath, prjname),
SaveLogLik(outpath, prjname),
SaveStartScript(outpath, prjname, os.path.realpath(__file__)),
SavePredictionResults(outpath, prjname)],
'intermediate_results': True,
'raise_exception': True,
'validate': True
}
if __name__ == '__main__':
run(config)
| 33.427273 | 84 | 0.538754 |
import os
from prssm.benchmarks.run import run
from prssm.tasks.real_world_tasks import SarcosArm
from prssm.benchmarks.outputs import VisualOutput
from prssm.benchmarks.outputs import PrintRMSE
from prssm.benchmarks.outputs import SaveRMSE
from prssm.benchmarks.outputs import PrintLogLik
from prssm.benchmarks.outputs import SaveLogLik
from prssm.benchmarks.outputs import SaveStartScript
from prssm.benchmarks.outputs import SavePredictionResults
from prssm.models.prssm import PRSSM
from prssm.utils.utils import create_dated_directory
outpath = create_dated_directory('results/sarcos')
prjname = 'PRSSM'
Dx = 14
Dy = 7
Du = 7
task_config = {
'input_ind': list(range(21, 28)),
'output_ind': list(range(0, 7)),
'downsample': 2,
'train_ind': list(range(0, 60)),
'test_ind': list(range(60, 66))
}
config_inducing_inputs = {
'method': 'uniform',
'low': -3,
'high': 3
}
config_inducing_outputs = {
'method': 'zero',
'noise': 0.1**2,
'var': 0.01**2,
}
PRSSM_config = {
'x0_noise': 0.001**2,
'variance': 0.5**2,
'lengthscales': [2]*(Dx + Du),
'config_inducing_inputs': config_inducing_inputs,
'config_inducing_outputs': config_inducing_outputs,
'x0_init_method': 'output',
'Dx': Dx,
'N_inducing': 50,
'N_samples': 20,
'maxiter': 1000,
'learning_rate': 0.05,
'result_dir': outpath,
'normalize_data': True,
'N_batch': 5,
'H_batch': 100,
'output_epoch': 50,
'var_x_np': [0.002**2]*Dx,
'process_noise': False,
'var_y_np': [0.05**2]*Dy,
'optimize_observation_noise': False,
'plot_output_ind': [0],
'plot_legends': False
}
config = {
'methods': [PRSSM],
'method_configs': [PRSSM_config],
'tasks': [SarcosArm],
'task_configs': [task_config],
'repeats': 1,
'outpath': outpath,
'prjname': prjname,
'outputs': [VisualOutput(outpath, prjname),
PrintRMSE(outpath, prjname),
SaveRMSE(outpath, prjname),
PrintLogLik(outpath, prjname),
SaveLogLik(outpath, prjname),
SaveStartScript(outpath, prjname, os.path.realpath(__file__)),
SavePredictionResults(outpath, prjname)],
'intermediate_results': True,
'raise_exception': True,
'validate': True
}
if __name__ == '__main__':
run(config)
| true | true |
f73303c8e3b3a96b3d69dd83cfb99a5abe8a2a72 | 721 | py | Python | var/spack/repos/builtin/packages/inputproto/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360 | 2017-11-06T08:47:01.000Z | 2022-03-31T14:45:33.000Z | var/spack/repos/builtin/packages/inputproto/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838 | 2017-11-04T07:49:45.000Z | 2022-03-31T23:38:39.000Z | var/spack/repos/builtin/packages/inputproto/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793 | 2017-11-04T07:45:50.000Z | 2022-03-30T14:31:53.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Inputproto(AutotoolsPackage, XorgPackage):
"""X Input Extension.
This extension defines a protocol to provide additional input devices
management such as graphic tablets."""
homepage = "https://cgit.freedesktop.org/xorg/proto/inputproto"
xorg_mirror_path = "proto/inputproto-2.3.2.tar.gz"
version('2.3.2', sha256='10eaadd531f38f7c92ab59ef0708ca195caf3164a75c4ed99f0c04f2913f6ef3')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
| 32.772727 | 95 | 0.751734 |
from spack import *
class Inputproto(AutotoolsPackage, XorgPackage):
homepage = "https://cgit.freedesktop.org/xorg/proto/inputproto"
xorg_mirror_path = "proto/inputproto-2.3.2.tar.gz"
version('2.3.2', sha256='10eaadd531f38f7c92ab59ef0708ca195caf3164a75c4ed99f0c04f2913f6ef3')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
| true | true |
f733056fff62a3323147362fe522314a66fa7c85 | 4,578 | py | Python | flask_sketch/handlers/api_framework_handler.py | ericsouza/flask-sketch | 65625a567e5492b3787c5da3ba5e12b1473783c4 | [
"MIT"
] | 11 | 2020-08-19T14:33:00.000Z | 2021-05-29T17:12:47.000Z | flask_sketch/handlers/api_framework_handler.py | ericsouza/flask-sketch | 65625a567e5492b3787c5da3ba5e12b1473783c4 | [
"MIT"
] | 3 | 2021-03-16T19:33:39.000Z | 2021-03-18T18:53:55.000Z | flask_sketch/handlers/api_framework_handler.py | ericsouza/flask-sketch | 65625a567e5492b3787c5da3ba5e12b1473783c4 | [
"MIT"
] | 2 | 2020-08-19T14:33:11.000Z | 2020-09-16T19:34:16.000Z | import os
from os.path import join as pjoin
from flask_sketch import templates
from flask_sketch.sketch import Sketch
from flask_sketch.const import requirements as reqs
from flask_sketch.utils import GenericHandler
def restx_handler(sketch: Sketch):
if sketch.api_framework == "restx":
sketch.add_requirements(reqs.FLASK_RESTX)
os.makedirs(pjoin(sketch.app_folder, "api", "resources", "examples"))
open(
pjoin(
sketch.app_folder,
"api",
"resources",
"examples",
"__init__.py",
),
"a",
).close()
if sketch.api_auth_framework == "jwt_extended":
sketch.write_template(
"api_init_restx_jwtext_tpl",
templates.api,
pjoin(sketch.app_folder, "api", "__init__.py"),
)
else:
sketch.write_template(
"api_init_restx_noauth_tpl",
templates.api,
pjoin(sketch.app_folder, "api", "__init__.py"),
)
if sketch.api_auth_framework == "none":
resource_tpl = "api_examples_restx_pet_tpl"
else:
resource_tpl = "api_examples_restx_pet_auth_tpl"
sketch.write_template(
resource_tpl,
templates.api.resources.examples,
pjoin(sketch.app_folder, "api", "resources", "examples", "pet.py"),
)
if sketch.database == "mongodb":
example_tpl_model = "pet_mongo_tpl"
else:
example_tpl_model = "pet_sql_tpl"
sketch.write_template(
example_tpl_model,
templates.models.examples,
pjoin(sketch.app_folder, "models", "examples", "pet.py"),
)
return True
def smorest_handler(sketch: Sketch):
if sketch.api_framework == "smorest":
sketch.add_requirements(reqs.FLASK_SMOREST)
sketch.settings["default"]["API_TITLE"] = sketch.project_name
sketch.settings["default"]["API_VERSION"] = "v1"
sketch.settings["default"]["OPENAPI_VERSION"] = "3.0.2"
sketch.settings["default"]["OPENAPI_JSON_PATH"] = "api-spec.json"
sketch.settings["default"]["OPENAPI_URL_PREFIX"] = "/openapi"
sketch.settings["default"]["OPENAPI_REDOC_PATH"] = "/redoc"
sketch.settings["default"][
"OPENAPI_REDOC_URL"
] = "https://cdn.jsdelivr.net/npm/redoc@next/bundles/redoc.standalone.js" # noqa
sketch.settings["default"]["OPENAPI_SWAGGER_UI_PATH"] = "/swagger-ui"
sketch.settings["default"][
"OPENAPI_SWAGGER_UI_URL"
] = "https://cdn.jsdelivr.net/npm/swagger-ui-dist/"
sketch.add_extensions("api")
os.makedirs(pjoin(sketch.app_folder, "api", "resources", "examples"))
open(
pjoin(
sketch.app_folder,
"api",
"resources",
"examples",
"__init__.py",
),
"a",
).close()
if sketch.api_auth_framework == "jwt_extended":
sketch.write_template(
"api_init_jwt_extended_tpl",
templates.api,
pjoin(sketch.app_folder, "api", "__init__.py"),
)
sketch.write_template(
"ext_api_smorest_tpl",
templates.ext,
pjoin(sketch.app_folder, "ext", "api.py"),
)
if sketch.api_auth_framework == "none":
resource_tpl = "api_example_smorest_pet_tpl"
else:
resource_tpl = "api_example_smorest_pet_auth_tpl"
sketch.write_template(
resource_tpl,
templates.api.resources.examples,
pjoin(sketch.app_folder, "api", "resources", "examples", "pet.py"),
)
if sketch.database == "mongodb":
example_tpl_model = "pet_mongo_tpl"
else:
example_tpl_model = "pet_sql_tpl"
sketch.write_template(
example_tpl_model,
templates.models.examples,
pjoin(sketch.app_folder, "models", "examples", "pet.py"),
)
return True
def restful_handler(sketch: Sketch):
if sketch.api_framework == "restful":
return True
def none_handler(sketch: Sketch):
if sketch.api_framework == "none":
return True
class ApiFrameworkHandler(GenericHandler):
...
api_framework_handler = ApiFrameworkHandler(
restx_handler, smorest_handler, restful_handler, none_handler,
)
| 30.52 | 89 | 0.576889 | import os
from os.path import join as pjoin
from flask_sketch import templates
from flask_sketch.sketch import Sketch
from flask_sketch.const import requirements as reqs
from flask_sketch.utils import GenericHandler
def restx_handler(sketch: Sketch):
if sketch.api_framework == "restx":
sketch.add_requirements(reqs.FLASK_RESTX)
os.makedirs(pjoin(sketch.app_folder, "api", "resources", "examples"))
open(
pjoin(
sketch.app_folder,
"api",
"resources",
"examples",
"__init__.py",
),
"a",
).close()
if sketch.api_auth_framework == "jwt_extended":
sketch.write_template(
"api_init_restx_jwtext_tpl",
templates.api,
pjoin(sketch.app_folder, "api", "__init__.py"),
)
else:
sketch.write_template(
"api_init_restx_noauth_tpl",
templates.api,
pjoin(sketch.app_folder, "api", "__init__.py"),
)
if sketch.api_auth_framework == "none":
resource_tpl = "api_examples_restx_pet_tpl"
else:
resource_tpl = "api_examples_restx_pet_auth_tpl"
sketch.write_template(
resource_tpl,
templates.api.resources.examples,
pjoin(sketch.app_folder, "api", "resources", "examples", "pet.py"),
)
if sketch.database == "mongodb":
example_tpl_model = "pet_mongo_tpl"
else:
example_tpl_model = "pet_sql_tpl"
sketch.write_template(
example_tpl_model,
templates.models.examples,
pjoin(sketch.app_folder, "models", "examples", "pet.py"),
)
return True
def smorest_handler(sketch: Sketch):
if sketch.api_framework == "smorest":
sketch.add_requirements(reqs.FLASK_SMOREST)
sketch.settings["default"]["API_TITLE"] = sketch.project_name
sketch.settings["default"]["API_VERSION"] = "v1"
sketch.settings["default"]["OPENAPI_VERSION"] = "3.0.2"
sketch.settings["default"]["OPENAPI_JSON_PATH"] = "api-spec.json"
sketch.settings["default"]["OPENAPI_URL_PREFIX"] = "/openapi"
sketch.settings["default"]["OPENAPI_REDOC_PATH"] = "/redoc"
sketch.settings["default"][
"OPENAPI_REDOC_URL"
] = "https://cdn.jsdelivr.net/npm/redoc@next/bundles/redoc.standalone.js"
sketch.settings["default"]["OPENAPI_SWAGGER_UI_PATH"] = "/swagger-ui"
sketch.settings["default"][
"OPENAPI_SWAGGER_UI_URL"
] = "https://cdn.jsdelivr.net/npm/swagger-ui-dist/"
sketch.add_extensions("api")
os.makedirs(pjoin(sketch.app_folder, "api", "resources", "examples"))
open(
pjoin(
sketch.app_folder,
"api",
"resources",
"examples",
"__init__.py",
),
"a",
).close()
if sketch.api_auth_framework == "jwt_extended":
sketch.write_template(
"api_init_jwt_extended_tpl",
templates.api,
pjoin(sketch.app_folder, "api", "__init__.py"),
)
sketch.write_template(
"ext_api_smorest_tpl",
templates.ext,
pjoin(sketch.app_folder, "ext", "api.py"),
)
if sketch.api_auth_framework == "none":
resource_tpl = "api_example_smorest_pet_tpl"
else:
resource_tpl = "api_example_smorest_pet_auth_tpl"
sketch.write_template(
resource_tpl,
templates.api.resources.examples,
pjoin(sketch.app_folder, "api", "resources", "examples", "pet.py"),
)
if sketch.database == "mongodb":
example_tpl_model = "pet_mongo_tpl"
else:
example_tpl_model = "pet_sql_tpl"
sketch.write_template(
example_tpl_model,
templates.models.examples,
pjoin(sketch.app_folder, "models", "examples", "pet.py"),
)
return True
def restful_handler(sketch: Sketch):
if sketch.api_framework == "restful":
return True
def none_handler(sketch: Sketch):
if sketch.api_framework == "none":
return True
class ApiFrameworkHandler(GenericHandler):
...
api_framework_handler = ApiFrameworkHandler(
restx_handler, smorest_handler, restful_handler, none_handler,
)
| true | true |
f7330613c773e3513188aafb37bf605465fc7e38 | 428 | py | Python | scheduler.py | becca-mayers/alert-bot | c4abdc39038fdda97a08c650a64d231282ba0cce | [
"MIT"
] | null | null | null | scheduler.py | becca-mayers/alert-bot | c4abdc39038fdda97a08c650a64d231282ba0cce | [
"MIT"
] | null | null | null | scheduler.py | becca-mayers/alert-bot | c4abdc39038fdda97a08c650a64d231282ba0cce | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 9 16:31:57 2021
@author: beccamayers
"""
import schedule
from datetime import datetime
from alert_bot import get_alert
import time
now = datetime.now()
timestamp = now.strftime("%b%d%Y %H%M%p")
def job():
print("Launching Alert Bot app...")
get_alert()
schedule.every().hour.do(job)
while True:
schedule.run_pending()
time.sleep(1) | 16.461538 | 42 | 0.633178 |
import schedule
from datetime import datetime
from alert_bot import get_alert
import time
now = datetime.now()
timestamp = now.strftime("%b%d%Y %H%M%p")
def job():
print("Launching Alert Bot app...")
get_alert()
schedule.every().hour.do(job)
while True:
schedule.run_pending()
time.sleep(1) | true | true |
f733062558b556f138fa0ab74640c75b263babd6 | 4,139 | py | Python | deep_qa-master/deep_qa/contrib/models/tree_lstm_model.py | RTHMaK/RPGOne | 3f3ada7db1762781668bfb2377154fdc00e17212 | [
"Apache-2.0"
] | 1 | 2017-04-11T13:03:55.000Z | 2017-04-11T13:03:55.000Z | deep_qa-master/deep_qa/contrib/models/tree_lstm_model.py | RTHMaK/RPGOne | 3f3ada7db1762781668bfb2377154fdc00e17212 | [
"Apache-2.0"
] | null | null | null | deep_qa-master/deep_qa/contrib/models/tree_lstm_model.py | RTHMaK/RPGOne | 3f3ada7db1762781668bfb2377154fdc00e17212 | [
"Apache-2.0"
] | null | null | null | from typing import Any, Dict
from overrides import overrides
from keras.layers import Input, Dense, Dropout, merge
from keras.regularizers import l2
from ...data.instances.logical_form_instance import LogicalFormInstance
from ..layers.tree_composition_lstm import TreeCompositionLSTM
from ...training.text_trainer import TextTrainer
from ...training.models import DeepQaModel
class TreeLSTMModel(TextTrainer):
"""
This is a model very similar to the TrueFalseModel, in that it simply tries to decide whether
a statement is true or false. The difference is that this uses a TreeLSTM to encode logical
forms, instead of a simple sentence encoder. This requires logical forms as input, instead of
regular word sequences.
We haven't worked on this in a while, so I would not be surprised if it doesn't actually run at
the moment.
"""
def __init__(self, params: Dict[str, Any]):
super(TreeLSTMModel, self).__init__(params)
self.max_transition_length = None
def _instance_type(self):
return LogicalFormInstance
@overrides
def _build_model(self):
'''
train_input: List of two numpy arrays: transitions and initial buffer
'''
# Step 1: Initialze the transition inputs.
# Length of transitions (ops) is an upper limit on the stack and buffer sizes. So we'll use
# that to initialize the stack and buffer in the LSTM.
buffer_ops_limit = self.num_sentence_words
stack_limit = buffer_ops_limit
# The transitions input has an extra trailing dimension to make the concatenation with the
# buffer embedding easier.
transitions_input = Input(shape=(buffer_ops_limit, 1))
# Step 2: Convert the logical form input to word vectors.
logical_form_input = Input(shape=self._get_sentence_shape(), dtype='int32', name="sentence_input")
logical_form_embeddings = self._embed_input(logical_form_input)
# Step 3: Merge transitions and buffer.
lstm_input = merge([transitions_input, logical_form_embeddings], mode='concat')
# Step 4: Pass the sequences of word vectors through TreeLSTM.
lstm_layer = TreeCompositionLSTM(stack_limit=stack_limit, buffer_ops_limit=buffer_ops_limit,
units=self.embedding_dim['words'],
W_regularizer=l2(0.01), U_regularizer=l2(0.01),
V_regularizer=l2(0.01), b_regularizer=l2(0.01),
name='treelstm')
lstm_out = lstm_layer(lstm_input)
regularized_lstm_out = Dropout(0.2)(lstm_out)
# Step 5: Find p(true | proposition) by passing the encoded proposition through MLP with
# ReLU followed by softmax.
projection_layer = Dense(self.embedding_dim['words']/2, activation='relu', name='projector')
softmax_layer = Dense(2, activation='softmax', name='softmax')
output_probabilities = softmax_layer(projection_layer(regularized_lstm_out))
# Step 6: Define crossentropy against labels as the loss and compile the model.
return DeepQaModel(input=[transitions_input, logical_form_input], output=output_probabilities)
@overrides
def _get_max_lengths(self) -> Dict[str, int]:
max_lengths = super(TreeLSTMModel, self)._get_max_lengths()
max_lengths['transition_length'] = self.max_transition_length
return max_lengths
@overrides
def _set_max_lengths(self, max_lengths: Dict[str, int]):
super(TreeLSTMModel, self)._set_max_lengths(max_lengths)
self.max_transition_length = max_lengths['transition_length']
@overrides
def _set_max_lengths_from_model(self):
self.num_sentence_words = self.model.get_input_shape_at(0)[0][1]
# TODO(matt): set the max transition length.
@classmethod
@overrides
def _get_custom_objects(cls):
custom_objects = super(TreeLSTMModel, cls)._get_custom_objects()
custom_objects['TreeCompositionLSTM'] = TreeCompositionLSTM
return custom_objects
| 44.505376 | 106 | 0.694129 | from typing import Any, Dict
from overrides import overrides
from keras.layers import Input, Dense, Dropout, merge
from keras.regularizers import l2
from ...data.instances.logical_form_instance import LogicalFormInstance
from ..layers.tree_composition_lstm import TreeCompositionLSTM
from ...training.text_trainer import TextTrainer
from ...training.models import DeepQaModel
class TreeLSTMModel(TextTrainer):
def __init__(self, params: Dict[str, Any]):
super(TreeLSTMModel, self).__init__(params)
self.max_transition_length = None
def _instance_type(self):
return LogicalFormInstance
@overrides
def _build_model(self):
# that to initialize the stack and buffer in the LSTM.
buffer_ops_limit = self.num_sentence_words
stack_limit = buffer_ops_limit
# The transitions input has an extra trailing dimension to make the concatenation with the
# buffer embedding easier.
transitions_input = Input(shape=(buffer_ops_limit, 1))
# Step 2: Convert the logical form input to word vectors.
logical_form_input = Input(shape=self._get_sentence_shape(), dtype='int32', name="sentence_input")
logical_form_embeddings = self._embed_input(logical_form_input)
# Step 3: Merge transitions and buffer.
lstm_input = merge([transitions_input, logical_form_embeddings], mode='concat')
# Step 4: Pass the sequences of word vectors through TreeLSTM.
lstm_layer = TreeCompositionLSTM(stack_limit=stack_limit, buffer_ops_limit=buffer_ops_limit,
units=self.embedding_dim['words'],
W_regularizer=l2(0.01), U_regularizer=l2(0.01),
V_regularizer=l2(0.01), b_regularizer=l2(0.01),
name='treelstm')
lstm_out = lstm_layer(lstm_input)
regularized_lstm_out = Dropout(0.2)(lstm_out)
# Step 5: Find p(true | proposition) by passing the encoded proposition through MLP with
# ReLU followed by softmax.
projection_layer = Dense(self.embedding_dim['words']/2, activation='relu', name='projector')
softmax_layer = Dense(2, activation='softmax', name='softmax')
output_probabilities = softmax_layer(projection_layer(regularized_lstm_out))
# Step 6: Define crossentropy against labels as the loss and compile the model.
return DeepQaModel(input=[transitions_input, logical_form_input], output=output_probabilities)
@overrides
def _get_max_lengths(self) -> Dict[str, int]:
max_lengths = super(TreeLSTMModel, self)._get_max_lengths()
max_lengths['transition_length'] = self.max_transition_length
return max_lengths
@overrides
def _set_max_lengths(self, max_lengths: Dict[str, int]):
super(TreeLSTMModel, self)._set_max_lengths(max_lengths)
self.max_transition_length = max_lengths['transition_length']
@overrides
def _set_max_lengths_from_model(self):
self.num_sentence_words = self.model.get_input_shape_at(0)[0][1]
# TODO(matt): set the max transition length.
@classmethod
@overrides
def _get_custom_objects(cls):
custom_objects = super(TreeLSTMModel, cls)._get_custom_objects()
custom_objects['TreeCompositionLSTM'] = TreeCompositionLSTM
return custom_objects
| true | true |
f73306ecc4e756db762dfaa3937f0a707690f701 | 16,072 | py | Python | tests/test_event_auth.py | User-green/synapse | 173ddbbe0b220bb28e67575079e1f775d73f967f | [
"Apache-2.0"
] | null | null | null | tests/test_event_auth.py | User-green/synapse | 173ddbbe0b220bb28e67575079e1f775d73f967f | [
"Apache-2.0"
] | null | null | null | tests/test_event_auth.py | User-green/synapse | 173ddbbe0b220bb28e67575079e1f775d73f967f | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import Optional
from synapse import event_auth
from synapse.api.errors import AuthError
from synapse.api.room_versions import RoomVersions
from synapse.events import EventBase, make_event_from_dict
from synapse.types import JsonDict, get_domain_from_id
class EventAuthTestCase(unittest.TestCase):
def test_random_users_cannot_send_state_before_first_pl(self):
"""
Check that, before the first PL lands, the creator is the only user
that can send a state event.
"""
creator = "@creator:example.com"
joiner = "@joiner:example.com"
auth_events = {
("m.room.create", ""): _create_event(creator),
("m.room.member", creator): _join_event(creator),
("m.room.member", joiner): _join_event(joiner),
}
# creator should be able to send state
event_auth.check(
RoomVersions.V1,
_random_state_event(creator),
auth_events,
do_sig_check=False,
)
# joiner should not be able to send state
self.assertRaises(
AuthError,
event_auth.check,
RoomVersions.V1,
_random_state_event(joiner),
auth_events,
do_sig_check=False,
)
def test_state_default_level(self):
"""
Check that users above the state_default level can send state and
those below cannot
"""
creator = "@creator:example.com"
pleb = "@joiner:example.com"
king = "@joiner2:example.com"
auth_events = {
("m.room.create", ""): _create_event(creator),
("m.room.member", creator): _join_event(creator),
("m.room.power_levels", ""): _power_levels_event(
creator, {"state_default": "30", "users": {pleb: "29", king: "30"}}
),
("m.room.member", pleb): _join_event(pleb),
("m.room.member", king): _join_event(king),
}
# pleb should not be able to send state
self.assertRaises(
AuthError,
event_auth.check,
RoomVersions.V1,
_random_state_event(pleb),
auth_events,
do_sig_check=False,
),
# king should be able to send state
event_auth.check(
RoomVersions.V1,
_random_state_event(king),
auth_events,
do_sig_check=False,
)
def test_alias_event(self):
"""Alias events have special behavior up through room version 6."""
creator = "@creator:example.com"
other = "@other:example.com"
auth_events = {
("m.room.create", ""): _create_event(creator),
("m.room.member", creator): _join_event(creator),
}
# creator should be able to send aliases
event_auth.check(
RoomVersions.V1,
_alias_event(creator),
auth_events,
do_sig_check=False,
)
# Reject an event with no state key.
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.V1,
_alias_event(creator, state_key=""),
auth_events,
do_sig_check=False,
)
# If the domain of the sender does not match the state key, reject.
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.V1,
_alias_event(creator, state_key="test.com"),
auth_events,
do_sig_check=False,
)
# Note that the member does *not* need to be in the room.
event_auth.check(
RoomVersions.V1,
_alias_event(other),
auth_events,
do_sig_check=False,
)
def test_msc2432_alias_event(self):
"""After MSC2432, alias events have no special behavior."""
creator = "@creator:example.com"
other = "@other:example.com"
auth_events = {
("m.room.create", ""): _create_event(creator),
("m.room.member", creator): _join_event(creator),
}
# creator should be able to send aliases
event_auth.check(
RoomVersions.V6,
_alias_event(creator),
auth_events,
do_sig_check=False,
)
# No particular checks are done on the state key.
event_auth.check(
RoomVersions.V6,
_alias_event(creator, state_key=""),
auth_events,
do_sig_check=False,
)
event_auth.check(
RoomVersions.V6,
_alias_event(creator, state_key="test.com"),
auth_events,
do_sig_check=False,
)
# Per standard auth rules, the member must be in the room.
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.V6,
_alias_event(other),
auth_events,
do_sig_check=False,
)
def test_msc2209(self):
"""
Notifications power levels get checked due to MSC2209.
"""
creator = "@creator:example.com"
pleb = "@joiner:example.com"
auth_events = {
("m.room.create", ""): _create_event(creator),
("m.room.member", creator): _join_event(creator),
("m.room.power_levels", ""): _power_levels_event(
creator, {"state_default": "30", "users": {pleb: "30"}}
),
("m.room.member", pleb): _join_event(pleb),
}
# pleb should be able to modify the notifications power level.
event_auth.check(
RoomVersions.V1,
_power_levels_event(pleb, {"notifications": {"room": 100}}),
auth_events,
do_sig_check=False,
)
# But an MSC2209 room rejects this change.
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.V6,
_power_levels_event(pleb, {"notifications": {"room": 100}}),
auth_events,
do_sig_check=False,
)
def test_join_rules_public(self):
"""
Test joining a public room.
"""
creator = "@creator:example.com"
pleb = "@joiner:example.com"
auth_events = {
("m.room.create", ""): _create_event(creator),
("m.room.member", creator): _join_event(creator),
("m.room.join_rules", ""): _join_rules_event(creator, "public"),
}
# Check join.
event_auth.check(
RoomVersions.V6,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
# A user cannot be force-joined to a room.
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.V6,
_member_event(pleb, "join", sender=creator),
auth_events,
do_sig_check=False,
)
# Banned should be rejected.
auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban")
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.V6,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
# A user who left can re-join.
auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave")
event_auth.check(
RoomVersions.V6,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
# A user can send a join if they're in the room.
auth_events[("m.room.member", pleb)] = _member_event(pleb, "join")
event_auth.check(
RoomVersions.V6,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
# A user can accept an invite.
auth_events[("m.room.member", pleb)] = _member_event(
pleb, "invite", sender=creator
)
event_auth.check(
RoomVersions.V6,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
def test_join_rules_invite(self):
"""
Test joining an invite only room.
"""
creator = "@creator:example.com"
pleb = "@joiner:example.com"
auth_events = {
("m.room.create", ""): _create_event(creator),
("m.room.member", creator): _join_event(creator),
("m.room.join_rules", ""): _join_rules_event(creator, "invite"),
}
# A join without an invite is rejected.
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.V6,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
# A user cannot be force-joined to a room.
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.V6,
_member_event(pleb, "join", sender=creator),
auth_events,
do_sig_check=False,
)
# Banned should be rejected.
auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban")
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.V6,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
# A user who left cannot re-join.
auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave")
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.V6,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
# A user can send a join if they're in the room.
auth_events[("m.room.member", pleb)] = _member_event(pleb, "join")
event_auth.check(
RoomVersions.V6,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
# A user can accept an invite.
auth_events[("m.room.member", pleb)] = _member_event(
pleb, "invite", sender=creator
)
event_auth.check(
RoomVersions.V6,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
def test_join_rules_msc3083_restricted(self):
"""
Test joining a restricted room from MSC3083.
This is pretty much the same test as public.
"""
creator = "@creator:example.com"
pleb = "@joiner:example.com"
auth_events = {
("m.room.create", ""): _create_event(creator),
("m.room.member", creator): _join_event(creator),
("m.room.join_rules", ""): _join_rules_event(creator, "restricted"),
}
# Older room versions don't understand this join rule
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.V6,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
# Check join.
event_auth.check(
RoomVersions.MSC3083,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
# A user cannot be force-joined to a room.
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.MSC3083,
_member_event(pleb, "join", sender=creator),
auth_events,
do_sig_check=False,
)
# Banned should be rejected.
auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban")
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.MSC3083,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
# A user who left can re-join.
auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave")
event_auth.check(
RoomVersions.MSC3083,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
# A user can send a join if they're in the room.
auth_events[("m.room.member", pleb)] = _member_event(pleb, "join")
event_auth.check(
RoomVersions.MSC3083,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
# A user can accept an invite.
auth_events[("m.room.member", pleb)] = _member_event(
pleb, "invite", sender=creator
)
event_auth.check(
RoomVersions.MSC3083,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
# helpers for making events
TEST_ROOM_ID = "!test:room"
def _create_event(user_id: str) -> EventBase:
return make_event_from_dict(
{
"room_id": TEST_ROOM_ID,
"event_id": _get_event_id(),
"type": "m.room.create",
"sender": user_id,
"content": {"creator": user_id},
}
)
def _member_event(
user_id: str, membership: str, sender: Optional[str] = None
) -> EventBase:
return make_event_from_dict(
{
"room_id": TEST_ROOM_ID,
"event_id": _get_event_id(),
"type": "m.room.member",
"sender": sender or user_id,
"state_key": user_id,
"content": {"membership": membership},
"prev_events": [],
}
)
def _join_event(user_id: str) -> EventBase:
return _member_event(user_id, "join")
def _power_levels_event(sender: str, content: JsonDict) -> EventBase:
return make_event_from_dict(
{
"room_id": TEST_ROOM_ID,
"event_id": _get_event_id(),
"type": "m.room.power_levels",
"sender": sender,
"state_key": "",
"content": content,
}
)
def _alias_event(sender: str, **kwargs) -> EventBase:
data = {
"room_id": TEST_ROOM_ID,
"event_id": _get_event_id(),
"type": "m.room.aliases",
"sender": sender,
"state_key": get_domain_from_id(sender),
"content": {"aliases": []},
}
data.update(**kwargs)
return make_event_from_dict(data)
def _random_state_event(sender: str) -> EventBase:
return make_event_from_dict(
{
"room_id": TEST_ROOM_ID,
"event_id": _get_event_id(),
"type": "test.state",
"sender": sender,
"state_key": "",
"content": {"membership": "join"},
}
)
def _join_rules_event(sender: str, join_rule: str) -> EventBase:
return make_event_from_dict(
{
"room_id": TEST_ROOM_ID,
"event_id": _get_event_id(),
"type": "m.room.join_rules",
"sender": sender,
"state_key": "",
"content": {
"join_rule": join_rule,
},
}
)
event_count = 0
def _get_event_id() -> str:
global event_count
c = event_count
event_count += 1
return "!%i:example.com" % (c,)
| 30.324528 | 83 | 0.542123 |
import unittest
from typing import Optional
from synapse import event_auth
from synapse.api.errors import AuthError
from synapse.api.room_versions import RoomVersions
from synapse.events import EventBase, make_event_from_dict
from synapse.types import JsonDict, get_domain_from_id
class EventAuthTestCase(unittest.TestCase):
def test_random_users_cannot_send_state_before_first_pl(self):
creator = "@creator:example.com"
joiner = "@joiner:example.com"
auth_events = {
("m.room.create", ""): _create_event(creator),
("m.room.member", creator): _join_event(creator),
("m.room.member", joiner): _join_event(joiner),
}
event_auth.check(
RoomVersions.V1,
_random_state_event(creator),
auth_events,
do_sig_check=False,
)
self.assertRaises(
AuthError,
event_auth.check,
RoomVersions.V1,
_random_state_event(joiner),
auth_events,
do_sig_check=False,
)
def test_state_default_level(self):
creator = "@creator:example.com"
pleb = "@joiner:example.com"
king = "@joiner2:example.com"
auth_events = {
("m.room.create", ""): _create_event(creator),
("m.room.member", creator): _join_event(creator),
("m.room.power_levels", ""): _power_levels_event(
creator, {"state_default": "30", "users": {pleb: "29", king: "30"}}
),
("m.room.member", pleb): _join_event(pleb),
("m.room.member", king): _join_event(king),
}
self.assertRaises(
AuthError,
event_auth.check,
RoomVersions.V1,
_random_state_event(pleb),
auth_events,
do_sig_check=False,
),
event_auth.check(
RoomVersions.V1,
_random_state_event(king),
auth_events,
do_sig_check=False,
)
def test_alias_event(self):
creator = "@creator:example.com"
other = "@other:example.com"
auth_events = {
("m.room.create", ""): _create_event(creator),
("m.room.member", creator): _join_event(creator),
}
event_auth.check(
RoomVersions.V1,
_alias_event(creator),
auth_events,
do_sig_check=False,
)
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.V1,
_alias_event(creator, state_key=""),
auth_events,
do_sig_check=False,
)
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.V1,
_alias_event(creator, state_key="test.com"),
auth_events,
do_sig_check=False,
)
event_auth.check(
RoomVersions.V1,
_alias_event(other),
auth_events,
do_sig_check=False,
)
def test_msc2432_alias_event(self):
creator = "@creator:example.com"
other = "@other:example.com"
auth_events = {
("m.room.create", ""): _create_event(creator),
("m.room.member", creator): _join_event(creator),
}
event_auth.check(
RoomVersions.V6,
_alias_event(creator),
auth_events,
do_sig_check=False,
)
event_auth.check(
RoomVersions.V6,
_alias_event(creator, state_key=""),
auth_events,
do_sig_check=False,
)
event_auth.check(
RoomVersions.V6,
_alias_event(creator, state_key="test.com"),
auth_events,
do_sig_check=False,
)
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.V6,
_alias_event(other),
auth_events,
do_sig_check=False,
)
def test_msc2209(self):
creator = "@creator:example.com"
pleb = "@joiner:example.com"
auth_events = {
("m.room.create", ""): _create_event(creator),
("m.room.member", creator): _join_event(creator),
("m.room.power_levels", ""): _power_levels_event(
creator, {"state_default": "30", "users": {pleb: "30"}}
),
("m.room.member", pleb): _join_event(pleb),
}
event_auth.check(
RoomVersions.V1,
_power_levels_event(pleb, {"notifications": {"room": 100}}),
auth_events,
do_sig_check=False,
)
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.V6,
_power_levels_event(pleb, {"notifications": {"room": 100}}),
auth_events,
do_sig_check=False,
)
def test_join_rules_public(self):
creator = "@creator:example.com"
pleb = "@joiner:example.com"
auth_events = {
("m.room.create", ""): _create_event(creator),
("m.room.member", creator): _join_event(creator),
("m.room.join_rules", ""): _join_rules_event(creator, "public"),
}
event_auth.check(
RoomVersions.V6,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.V6,
_member_event(pleb, "join", sender=creator),
auth_events,
do_sig_check=False,
)
auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban")
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.V6,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave")
event_auth.check(
RoomVersions.V6,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
auth_events[("m.room.member", pleb)] = _member_event(pleb, "join")
event_auth.check(
RoomVersions.V6,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
# A user can accept an invite.
auth_events[("m.room.member", pleb)] = _member_event(
pleb, "invite", sender=creator
)
event_auth.check(
RoomVersions.V6,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
def test_join_rules_invite(self):
creator = "@creator:example.com"
pleb = "@joiner:example.com"
auth_events = {
("m.room.create", ""): _create_event(creator),
("m.room.member", creator): _join_event(creator),
("m.room.join_rules", ""): _join_rules_event(creator, "invite"),
}
# A join without an invite is rejected.
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.V6,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
# A user cannot be force-joined to a room.
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.V6,
_member_event(pleb, "join", sender=creator),
auth_events,
do_sig_check=False,
)
# Banned should be rejected.
auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban")
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.V6,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
# A user who left cannot re-join.
auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave")
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.V6,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
# A user can send a join if they're in the room.
auth_events[("m.room.member", pleb)] = _member_event(pleb, "join")
event_auth.check(
RoomVersions.V6,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
auth_events[("m.room.member", pleb)] = _member_event(
pleb, "invite", sender=creator
)
event_auth.check(
RoomVersions.V6,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
def test_join_rules_msc3083_restricted(self):
creator = "@creator:example.com"
pleb = "@joiner:example.com"
auth_events = {
("m.room.create", ""): _create_event(creator),
("m.room.member", creator): _join_event(creator),
("m.room.join_rules", ""): _join_rules_event(creator, "restricted"),
}
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.V6,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
# Check join.
event_auth.check(
RoomVersions.MSC3083,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
# A user cannot be force-joined to a room.
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.MSC3083,
_member_event(pleb, "join", sender=creator),
auth_events,
do_sig_check=False,
)
# Banned should be rejected.
auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban")
with self.assertRaises(AuthError):
event_auth.check(
RoomVersions.MSC3083,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
# A user who left can re-join.
auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave")
event_auth.check(
RoomVersions.MSC3083,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
# A user can send a join if they're in the room.
auth_events[("m.room.member", pleb)] = _member_event(pleb, "join")
event_auth.check(
RoomVersions.MSC3083,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
auth_events[("m.room.member", pleb)] = _member_event(
pleb, "invite", sender=creator
)
event_auth.check(
RoomVersions.MSC3083,
_join_event(pleb),
auth_events,
do_sig_check=False,
)
TEST_ROOM_ID = "!test:room"
def _create_event(user_id: str) -> EventBase:
return make_event_from_dict(
{
"room_id": TEST_ROOM_ID,
"event_id": _get_event_id(),
"type": "m.room.create",
"sender": user_id,
"content": {"creator": user_id},
}
)
def _member_event(
user_id: str, membership: str, sender: Optional[str] = None
) -> EventBase:
return make_event_from_dict(
{
"room_id": TEST_ROOM_ID,
"event_id": _get_event_id(),
"type": "m.room.member",
"sender": sender or user_id,
"state_key": user_id,
"content": {"membership": membership},
"prev_events": [],
}
)
def _join_event(user_id: str) -> EventBase:
return _member_event(user_id, "join")
def _power_levels_event(sender: str, content: JsonDict) -> EventBase:
return make_event_from_dict(
{
"room_id": TEST_ROOM_ID,
"event_id": _get_event_id(),
"type": "m.room.power_levels",
"sender": sender,
"state_key": "",
"content": content,
}
)
def _alias_event(sender: str, **kwargs) -> EventBase:
data = {
"room_id": TEST_ROOM_ID,
"event_id": _get_event_id(),
"type": "m.room.aliases",
"sender": sender,
"state_key": get_domain_from_id(sender),
"content": {"aliases": []},
}
data.update(**kwargs)
return make_event_from_dict(data)
def _random_state_event(sender: str) -> EventBase:
return make_event_from_dict(
{
"room_id": TEST_ROOM_ID,
"event_id": _get_event_id(),
"type": "test.state",
"sender": sender,
"state_key": "",
"content": {"membership": "join"},
}
)
def _join_rules_event(sender: str, join_rule: str) -> EventBase:
return make_event_from_dict(
{
"room_id": TEST_ROOM_ID,
"event_id": _get_event_id(),
"type": "m.room.join_rules",
"sender": sender,
"state_key": "",
"content": {
"join_rule": join_rule,
},
}
)
event_count = 0
def _get_event_id() -> str:
global event_count
c = event_count
event_count += 1
return "!%i:example.com" % (c,)
| true | true |
f73307d09ce2412b5111e7dbe06013b05e0f061f | 1,930 | py | Python | homeassistant/components/nam/button.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 22,481 | 2020-03-02T13:09:59.000Z | 2022-03-31T23:34:28.000Z | homeassistant/components/nam/button.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | homeassistant/components/nam/button.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 11,411 | 2020-03-02T14:19:20.000Z | 2022-03-31T22:46:07.000Z | """Support for the Nettigo Air Monitor service."""
from __future__ import annotations
import logging
from homeassistant.components.button import (
ButtonDeviceClass,
ButtonEntity,
ButtonEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from . import NAMDataUpdateCoordinator
from .const import DEFAULT_NAME, DOMAIN
PARALLEL_UPDATES = 1
_LOGGER = logging.getLogger(__name__)
RESTART_BUTTON: ButtonEntityDescription = ButtonEntityDescription(
key="restart",
name=f"{DEFAULT_NAME} Restart",
device_class=ButtonDeviceClass.RESTART,
entity_category=EntityCategory.CONFIG,
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Add a Nettigo Air Monitor entities from a config_entry."""
coordinator: NAMDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
buttons: list[NAMButton] = []
buttons.append(NAMButton(coordinator, RESTART_BUTTON))
async_add_entities(buttons, False)
class NAMButton(CoordinatorEntity, ButtonEntity):
"""Define an Nettigo Air Monitor button."""
coordinator: NAMDataUpdateCoordinator
def __init__(
self,
coordinator: NAMDataUpdateCoordinator,
description: ButtonEntityDescription,
) -> None:
"""Initialize."""
super().__init__(coordinator)
self._attr_device_info = coordinator.device_info
self._attr_unique_id = f"{coordinator.unique_id}-{description.key}"
self.entity_description = description
async def async_press(self) -> None:
"""Triggers the restart."""
await self.coordinator.nam.async_restart()
| 30.634921 | 84 | 0.759067 | from __future__ import annotations
import logging
from homeassistant.components.button import (
ButtonDeviceClass,
ButtonEntity,
ButtonEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from . import NAMDataUpdateCoordinator
from .const import DEFAULT_NAME, DOMAIN
PARALLEL_UPDATES = 1
_LOGGER = logging.getLogger(__name__)
RESTART_BUTTON: ButtonEntityDescription = ButtonEntityDescription(
key="restart",
name=f"{DEFAULT_NAME} Restart",
device_class=ButtonDeviceClass.RESTART,
entity_category=EntityCategory.CONFIG,
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
coordinator: NAMDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
buttons: list[NAMButton] = []
buttons.append(NAMButton(coordinator, RESTART_BUTTON))
async_add_entities(buttons, False)
class NAMButton(CoordinatorEntity, ButtonEntity):
coordinator: NAMDataUpdateCoordinator
def __init__(
self,
coordinator: NAMDataUpdateCoordinator,
description: ButtonEntityDescription,
) -> None:
super().__init__(coordinator)
self._attr_device_info = coordinator.device_info
self._attr_unique_id = f"{coordinator.unique_id}-{description.key}"
self.entity_description = description
async def async_press(self) -> None:
await self.coordinator.nam.async_restart()
| true | true |
f73309561fba95db4ba2a0a348346c540dbea713 | 392 | py | Python | pacientes/migrations/0002_auto_20191003_1201.py | williamlemos253/health | 9c4ffebfdf79a6e622225f6b5d059e991c903bc2 | [
"Unlicense"
] | null | null | null | pacientes/migrations/0002_auto_20191003_1201.py | williamlemos253/health | 9c4ffebfdf79a6e622225f6b5d059e991c903bc2 | [
"Unlicense"
] | null | null | null | pacientes/migrations/0002_auto_20191003_1201.py | williamlemos253/health | 9c4ffebfdf79a6e622225f6b5d059e991c903bc2 | [
"Unlicense"
] | null | null | null | # Generated by Django 2.2.5 on 2019-10-03 15:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pacientes', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='empresa',
field=models.CharField(blank=True, max_length=150),
),
]
| 20.631579 | 63 | 0.596939 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pacientes', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='empresa',
field=models.CharField(blank=True, max_length=150),
),
]
| true | true |
f7330a456835a6e3ed0b79a4dfe6154732fa71ea | 3,851 | py | Python | parsers/GB_ORK.py | phanthe1/electricitymap-contrib | 26c071107fca86c2548b9558891c4a38b8ac7781 | [
"MIT"
] | 1,582 | 2018-07-16T10:52:36.000Z | 2021-12-06T06:03:32.000Z | parsers/GB_ORK.py | phanthe1/electricitymap-contrib | 26c071107fca86c2548b9558891c4a38b8ac7781 | [
"MIT"
] | 1,463 | 2018-07-09T12:23:35.000Z | 2021-12-06T08:11:37.000Z | parsers/GB_ORK.py | phanthe1/electricitymap-contrib | 26c071107fca86c2548b9558891c4a38b8ac7781 | [
"MIT"
] | 650 | 2018-07-10T02:07:17.000Z | 2021-12-03T11:05:45.000Z | #!/usr/bin/env python3
"""Parser for the Orkney Islands"""
import arrow
import dateutil
import logging
import requests
from bs4 import BeautifulSoup
# There is a 2MW storage battery on the islands.
# http://www.oref.co.uk/orkneys-energy/innovations-2/
TZ = 'Europe/London'
DATETIME_LINK = 'https://www.ssen.co.uk/anm/orkney/'
GENERATION_LINK = 'https://www.ssen.co.uk/Sse_Components/Views/Controls/FormControls/Handlers/ActiveNetworkManagementHandler.ashx?action=graph&contentId=14973&_=1537467858726'
GENERATION_MAPPING = {"Live Demand": "Demand",
"Orkney ANM": "ANM Renewables",
"Non-ANM Renewable Generation": "Renewables"}
def get_json_data(session):
"""
Requests json data and extracts generation information.
Returns a dictionary.
"""
s = session or requests.Session()
req = s.get(GENERATION_LINK, verify=False)
raw_json_data = req.json()
generation_data = raw_json_data['data']['datasets']
production = {}
for datapoint in generation_data:
gen_type = datapoint['label']
val = float(max(datapoint['data']))
production[gen_type] = val
for k in list(production.keys()):
if k not in GENERATION_MAPPING.keys():
# Get rid of unneeded keys.
production.pop(k)
return production
def get_datetime(session):
"""
Extracts data timestamp from html and checks it's less than 2 hours old.
Returns an arrow object.
"""
s = session or requests.Session()
req = s.get(DATETIME_LINK, verify=False)
soup = BeautifulSoup(req.text, 'html.parser')
data_table = soup.find("div", {"class": "Widget-Base Widget-ANMGraph"})
last_updated = data_table.find("div", {"class": "button"}).contents
raw_dt = last_updated[-1].strip().split(' ', 1)[-1]
naive_dt = arrow.get(raw_dt, 'DD MMMM YYYY HH:mm:ss')
aware_dt = naive_dt.replace(tzinfo=dateutil.tz.gettz(TZ))
current_time = arrow.now(TZ)
diff = current_time - aware_dt
if diff.total_seconds() > 7200:
raise ValueError('Orkney data is too old to use, data is {} hours old.'.format(diff.total_seconds()/3600))
return aware_dt.datetime
def fetch_production(zone_key='GB-ORK', session=None, target_datetime=None,
logger=logging.getLogger(__name__)) -> dict:
"""Requests the last known production mix (in MW) of a given country."""
if target_datetime:
raise NotImplementedError('This parser is not yet able to parse past dates')
raw_data = get_json_data(session)
raw_data.pop("Live Demand")
mapped_data = {}
mapped_data['unknown'] = raw_data.get("Orkney ANM", 0.0) + raw_data.get("Non-ANM Renewable Generation", 0.0)
dt = get_datetime(session)
data = {
'zoneKey': zone_key,
'datetime': dt,
'production': mapped_data,
'storage': {
'battery': None,
},
'source': 'ssen.co.uk'
}
return data
def fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=logging.getLogger(__name__)) -> dict:
"""Requests the last known power exchange (in MW) between two zones."""
sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))
raw_data = get_json_data(session)
dt = get_datetime(session)
# +ve importing from mainland
# -ve export to mainland
total_generation = raw_data['Orkney ANM'] + raw_data['Non-ANM Renewable Generation']
netflow = raw_data['Live Demand'] - total_generation
data = {'netFlow': netflow,
'datetime': dt,
'sortedZoneKeys': sorted_zone_keys,
'source': 'ssen.co.uk'}
return data
if __name__ == '__main__':
print('fetch_production() ->')
print(fetch_production())
print('fetch_exchange(GB, GB-ORK)')
print(fetch_exchange('GB', 'GB-ORK'))
| 31.056452 | 175 | 0.660608 |
import arrow
import dateutil
import logging
import requests
from bs4 import BeautifulSoup
TZ = 'Europe/London'
DATETIME_LINK = 'https://www.ssen.co.uk/anm/orkney/'
GENERATION_LINK = 'https://www.ssen.co.uk/Sse_Components/Views/Controls/FormControls/Handlers/ActiveNetworkManagementHandler.ashx?action=graph&contentId=14973&_=1537467858726'
GENERATION_MAPPING = {"Live Demand": "Demand",
"Orkney ANM": "ANM Renewables",
"Non-ANM Renewable Generation": "Renewables"}
def get_json_data(session):
s = session or requests.Session()
req = s.get(GENERATION_LINK, verify=False)
raw_json_data = req.json()
generation_data = raw_json_data['data']['datasets']
production = {}
for datapoint in generation_data:
gen_type = datapoint['label']
val = float(max(datapoint['data']))
production[gen_type] = val
for k in list(production.keys()):
if k not in GENERATION_MAPPING.keys():
production.pop(k)
return production
def get_datetime(session):
s = session or requests.Session()
req = s.get(DATETIME_LINK, verify=False)
soup = BeautifulSoup(req.text, 'html.parser')
data_table = soup.find("div", {"class": "Widget-Base Widget-ANMGraph"})
last_updated = data_table.find("div", {"class": "button"}).contents
raw_dt = last_updated[-1].strip().split(' ', 1)[-1]
naive_dt = arrow.get(raw_dt, 'DD MMMM YYYY HH:mm:ss')
aware_dt = naive_dt.replace(tzinfo=dateutil.tz.gettz(TZ))
current_time = arrow.now(TZ)
diff = current_time - aware_dt
if diff.total_seconds() > 7200:
raise ValueError('Orkney data is too old to use, data is {} hours old.'.format(diff.total_seconds()/3600))
return aware_dt.datetime
def fetch_production(zone_key='GB-ORK', session=None, target_datetime=None,
logger=logging.getLogger(__name__)) -> dict:
if target_datetime:
raise NotImplementedError('This parser is not yet able to parse past dates')
raw_data = get_json_data(session)
raw_data.pop("Live Demand")
mapped_data = {}
mapped_data['unknown'] = raw_data.get("Orkney ANM", 0.0) + raw_data.get("Non-ANM Renewable Generation", 0.0)
dt = get_datetime(session)
data = {
'zoneKey': zone_key,
'datetime': dt,
'production': mapped_data,
'storage': {
'battery': None,
},
'source': 'ssen.co.uk'
}
return data
def fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=logging.getLogger(__name__)) -> dict:
sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))
raw_data = get_json_data(session)
dt = get_datetime(session)
total_generation = raw_data['Orkney ANM'] + raw_data['Non-ANM Renewable Generation']
netflow = raw_data['Live Demand'] - total_generation
data = {'netFlow': netflow,
'datetime': dt,
'sortedZoneKeys': sorted_zone_keys,
'source': 'ssen.co.uk'}
return data
if __name__ == '__main__':
print('fetch_production() ->')
print(fetch_production())
print('fetch_exchange(GB, GB-ORK)')
print(fetch_exchange('GB', 'GB-ORK'))
| true | true |
f7330a62c3ae0fab87713a5d1f5c2187bc314f45 | 2,750 | py | Python | tests/endtoend/TestRealRandomGroupXData.py | jun2tong/bnp-anomaly | c7fa106b5bb29ed6688a3d91e3f302a0a130b896 | [
"BSD-3-Clause"
] | 3 | 2018-07-02T03:50:23.000Z | 2019-05-16T03:23:55.000Z | tests/endtoend/TestRealRandomGroupXData.py | jun2tong/bnp-anomaly | c7fa106b5bb29ed6688a3d91e3f302a0a130b896 | [
"BSD-3-Clause"
] | 1 | 2021-01-07T01:33:06.000Z | 2021-01-07T01:33:06.000Z | tests/endtoend/TestRealRandomGroupXData.py | jun2tong/bnp-anomaly | c7fa106b5bb29ed6688a3d91e3f302a0a130b896 | [
"BSD-3-Clause"
] | 1 | 2020-09-01T13:21:18.000Z | 2020-09-01T13:21:18.000Z | import numpy as np
import unittest
from collections import OrderedDict
import bnpy
from AbstractEndToEndTest import AbstractEndToEndTest
class TestEndToEnd(AbstractEndToEndTest):
__test__ = True
def setUp(self):
""" Create the dataset
"""
rng = np.random.RandomState(0)
X = rng.rand(100, 2)
doc_range = [0, 20, 40, 50, 100]
self.Data = bnpy.data.GroupXData(X=X, doc_range=doc_range)
self.possibleAllocModelNames = ["FiniteMixtureModel",
"FiniteTopicModel",
"HDPTopicModel",
]
self.possibleObsModelNames = ["Gauss",
"DiagGauss",
"ZeroMeanGauss",
]
self.possibleInitNames = ["randexamples",
"randexamplesbydist",
]
self.possibleLearnAlgsForAllocModel = dict(
FiniteMixtureModel=["EM", "VB", "soVB", "moVB"],
FiniteTopicModel=["VB", "soVB", "moVB"],
HDPTopicModel=["VB", "soVB", "moVB"],
)
def nextAllocKwArgsForVB(self):
for aName in self.possibleAllocModelNames:
kwargs = OrderedDict()
kwargs['name'] = aName
if aName == 'FiniteMixtureModel':
for gamma in [0.1, 1.0, 9.9]:
kwargs['gamma'] = gamma
yield kwargs
elif aName == 'DPMixtureModel':
for gamma0 in [1.0, 9.9]:
kwargs['gamma0'] = gamma0
yield kwargs
elif aName == 'FiniteTopicModel':
for alpha in [0.1, 0.5, 22]:
kwargs['alpha'] = alpha
yield kwargs
elif aName == 'HDPTopicModel':
for alpha in [0.1, 0.5]:
for gamma in [1.0, 5.0]:
kwargs['gamma'] = gamma
yield kwargs
def nextObsKwArgsForVB(self, aName):
for oName in self.possibleObsModelNames:
for sF in [0.5, 1.0, 5.0]:
for ECovMat in ['eye', 'covdata']:
kwargs = OrderedDict()
kwargs['name'] = oName
kwargs['ECovMat'] = ECovMat
kwargs['sF'] = sF
yield kwargs
def nextInitKwArgs(self, aName, oName):
for iName in self.possibleInitNames:
for K in [5, 10]:
kwargs = OrderedDict()
kwargs['initname'] = iName
kwargs['K'] = K
yield kwargs
| 35.714286 | 66 | 0.460727 | import numpy as np
import unittest
from collections import OrderedDict
import bnpy
from AbstractEndToEndTest import AbstractEndToEndTest
class TestEndToEnd(AbstractEndToEndTest):
__test__ = True
def setUp(self):
rng = np.random.RandomState(0)
X = rng.rand(100, 2)
doc_range = [0, 20, 40, 50, 100]
self.Data = bnpy.data.GroupXData(X=X, doc_range=doc_range)
self.possibleAllocModelNames = ["FiniteMixtureModel",
"FiniteTopicModel",
"HDPTopicModel",
]
self.possibleObsModelNames = ["Gauss",
"DiagGauss",
"ZeroMeanGauss",
]
self.possibleInitNames = ["randexamples",
"randexamplesbydist",
]
self.possibleLearnAlgsForAllocModel = dict(
FiniteMixtureModel=["EM", "VB", "soVB", "moVB"],
FiniteTopicModel=["VB", "soVB", "moVB"],
HDPTopicModel=["VB", "soVB", "moVB"],
)
def nextAllocKwArgsForVB(self):
for aName in self.possibleAllocModelNames:
kwargs = OrderedDict()
kwargs['name'] = aName
if aName == 'FiniteMixtureModel':
for gamma in [0.1, 1.0, 9.9]:
kwargs['gamma'] = gamma
yield kwargs
elif aName == 'DPMixtureModel':
for gamma0 in [1.0, 9.9]:
kwargs['gamma0'] = gamma0
yield kwargs
elif aName == 'FiniteTopicModel':
for alpha in [0.1, 0.5, 22]:
kwargs['alpha'] = alpha
yield kwargs
elif aName == 'HDPTopicModel':
for alpha in [0.1, 0.5]:
for gamma in [1.0, 5.0]:
kwargs['gamma'] = gamma
yield kwargs
def nextObsKwArgsForVB(self, aName):
for oName in self.possibleObsModelNames:
for sF in [0.5, 1.0, 5.0]:
for ECovMat in ['eye', 'covdata']:
kwargs = OrderedDict()
kwargs['name'] = oName
kwargs['ECovMat'] = ECovMat
kwargs['sF'] = sF
yield kwargs
def nextInitKwArgs(self, aName, oName):
for iName in self.possibleInitNames:
for K in [5, 10]:
kwargs = OrderedDict()
kwargs['initname'] = iName
kwargs['K'] = K
yield kwargs
| true | true |
f7330c64bab4aab0a950535c1736cf21247eb440 | 776 | py | Python | projects/project_2_commerce/auctions/migrations/0013_auto_20210426_1629.py | kevinbeirne1/CS50-course | b6247cdea50890369a5e0fd60017980643bf6e37 | [
"MIT"
] | 1 | 2021-08-31T20:52:41.000Z | 2021-08-31T20:52:41.000Z | projects/project_2_commerce/auctions/migrations/0013_auto_20210426_1629.py | kevinbeirne1/CS50-course | b6247cdea50890369a5e0fd60017980643bf6e37 | [
"MIT"
] | null | null | null | projects/project_2_commerce/auctions/migrations/0013_auto_20210426_1629.py | kevinbeirne1/CS50-course | b6247cdea50890369a5e0fd60017980643bf6e37 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.6 on 2021-04-26 20:29
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('auctions', '0012_auto_20210426_1628'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='comment_date',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='comment date'),
),
migrations.AlterField(
model_name='listing',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime(2021, 4, 26, 20, 29, 27, 350009, tzinfo=utc), verbose_name='listing date'),
),
]
| 28.740741 | 140 | 0.645619 |
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('auctions', '0012_auto_20210426_1628'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='comment_date',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='comment date'),
),
migrations.AlterField(
model_name='listing',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime(2021, 4, 26, 20, 29, 27, 350009, tzinfo=utc), verbose_name='listing date'),
),
]
| true | true |
f7330c6694875625fe7eec1a0346b2660db5233d | 3,759 | py | Python | tests/test_neural_predicate.py | Joshua-Schroijen/deepproblog | 4ae56f1e860010b7857b29d5bd76fb1555d5e19d | [
"Apache-2.0"
] | 54 | 2021-06-23T08:03:23.000Z | 2022-03-10T01:02:43.000Z | tests/test_neural_predicate.py | Damzwan/deepproblog | 56bcf5208e79c17510b5d288068fabc6cd64f3cf | [
"Apache-2.0"
] | 2 | 2021-06-30T23:48:25.000Z | 2022-03-18T10:45:05.000Z | tests/test_neural_predicate.py | Damzwan/deepproblog | 56bcf5208e79c17510b5d288068fabc6cd64f3cf | [
"Apache-2.0"
] | 12 | 2021-06-30T10:47:52.000Z | 2022-03-09T23:51:48.000Z | import pytest
from deepproblog.utils.standard_networks import DummyNet
from problog.logic import Term, Var
from deepproblog.engines import ExactEngine, ApproximateEngine
from deepproblog.heuristics import geometric_mean
from deepproblog.model import Model
from deepproblog.query import Query
from deepproblog.network import Network
import numpy as np
program = """
nn(dummy1,[X],Y,[a,b,c]) :: net1(X,Y).
nn(dummy2,[X]) :: net2(X).
nn(dummy3,[X],Y) :: net3(X,Y).
test1(X1,Y1,X2,Y2) :- net1(X1,Y1), net1(X2,Y2).
test2(X1,X2) :- net2(X1), net2(X2).
test3(X,Y) :- net3(X,Y).
"""
dummy_values1 = {Term("i1"): [0.8, 0.15, 0.05], Term("i2"): [0.2, 0.3, 0.5]}
dummy_net1 = Network(DummyNet(dummy_values1), "dummy1")
dummy_values2 = {Term("i1"): [0.8], Term("i2"): [0.4]}
dummy_net2 = Network(DummyNet(dummy_values2), "dummy2")
dummy_values3 = {Term("i1"): [1.0, 2.0, 3.0, 4.0], Term("i2"): [-1.0, 0.0, 1.0]}
dummy_net3 = Network(DummyNet(dummy_values3), "dummy3")
@pytest.fixture(
params=[
{
"engine_factory": lambda model: ApproximateEngine(
model, 10, geometric_mean
),
"cache": False,
},
{"engine_factory": lambda model: ExactEngine(model), "cache": False},
{"engine_factory": lambda model: ExactEngine(model), "cache": True},
]
)
def model(request) -> Model:
"""Simple fixture creating both the approximate and the exact engine"""
model = Model(program, [dummy_net1, dummy_net2, dummy_net3], load=False)
engine = request.param["engine_factory"](model)
model.set_engine(engine, cache=request.param["cache"])
return model
def test_model_basics(model):
# These should be set up after running the fixture.
assert model.solver is not None
assert model.program is not None
def test_ad_network(model: Model):
q1 = Query(Term("test1", Term("i1"), Term("a"), Term("i2"), Term("b"))) # p = 0.24
q2 = Query(Term("test1", Term("i1"), Term("a"), Term("i2"), Term("a"))) # p = 0.16
q3 = Query(Term("test1", Term("i1"), Term("a"), Term("i1"), Term("b"))) # p = 0
results = model.solve([q1, q2, q3])
r1 = float(results[0].result[q1.query])
r2 = float(results[1].result[q2.query])
r3 = float(results[2].result[q3.query])
assert pytest.approx(0.24) == r1
assert pytest.approx(0.16) == r2
assert pytest.approx(0) == r3
def test_fact_network(model: Model):
q1 = Query(Term("test2", Term("i1"), Term("i2"))) # p = 0.32
q2 = Query(Term("test2", Term("i1"), Term("i1"))) # p = 0.8
results = model.solve([q1, q2])
r1 = float(results[0].result[q1.query])
r2 = float(results[1].result[q2.query])
assert pytest.approx(0.32) == r1
assert pytest.approx(0.8) == r2
def test_det_network(model: Model):
q1 = Query(Term("test3", Term("i1"), Var("X")))
q2 = Query(Term("test3", Term("i2"), Var("X")))
results = model.solve([q1, q2])
r1 = list(results[0].result)[0].args[1]
r2 = list(results[1].result)[0].args[1]
r1 = model.get_tensor(r1)
r2 = model.get_tensor(r2)
assert all(r1.detach().numpy() == [1.0, 2.0, 3.0, 4.0])
assert all(r2.detach().numpy() == [-1.0, 0.0, 1.0])
def test_det_network_substitution(model: Model):
if not model.solver.cache.cache:
q1 = Query(Term("test3", Term("a"), Var("X")), {Term("a"): Term("i1")})
q2 = Query(Term("test3", Term("a"), Var("X")), {Term("a"): Term("i2")})
results = model.solve([q1, q2])
r1 = list(results[0].result)[0].args[1]
r2 = list(results[1].result)[0].args[1]
r1 = model.get_tensor(r1)
r2 = model.get_tensor(r2)
assert all(r1.detach().numpy() == [1.0, 2.0, 3.0, 4.0])
assert all(r2.detach().numpy() == [-1.0, 0.0, 1.0])
| 36.495146 | 87 | 0.611067 | import pytest
from deepproblog.utils.standard_networks import DummyNet
from problog.logic import Term, Var
from deepproblog.engines import ExactEngine, ApproximateEngine
from deepproblog.heuristics import geometric_mean
from deepproblog.model import Model
from deepproblog.query import Query
from deepproblog.network import Network
import numpy as np
program = """
nn(dummy1,[X],Y,[a,b,c]) :: net1(X,Y).
nn(dummy2,[X]) :: net2(X).
nn(dummy3,[X],Y) :: net3(X,Y).
test1(X1,Y1,X2,Y2) :- net1(X1,Y1), net1(X2,Y2).
test2(X1,X2) :- net2(X1), net2(X2).
test3(X,Y) :- net3(X,Y).
"""
dummy_values1 = {Term("i1"): [0.8, 0.15, 0.05], Term("i2"): [0.2, 0.3, 0.5]}
dummy_net1 = Network(DummyNet(dummy_values1), "dummy1")
dummy_values2 = {Term("i1"): [0.8], Term("i2"): [0.4]}
dummy_net2 = Network(DummyNet(dummy_values2), "dummy2")
dummy_values3 = {Term("i1"): [1.0, 2.0, 3.0, 4.0], Term("i2"): [-1.0, 0.0, 1.0]}
dummy_net3 = Network(DummyNet(dummy_values3), "dummy3")
@pytest.fixture(
params=[
{
"engine_factory": lambda model: ApproximateEngine(
model, 10, geometric_mean
),
"cache": False,
},
{"engine_factory": lambda model: ExactEngine(model), "cache": False},
{"engine_factory": lambda model: ExactEngine(model), "cache": True},
]
)
def model(request) -> Model:
model = Model(program, [dummy_net1, dummy_net2, dummy_net3], load=False)
engine = request.param["engine_factory"](model)
model.set_engine(engine, cache=request.param["cache"])
return model
def test_model_basics(model):
assert model.solver is not None
assert model.program is not None
def test_ad_network(model: Model):
q1 = Query(Term("test1", Term("i1"), Term("a"), Term("i2"), Term("b")))
q2 = Query(Term("test1", Term("i1"), Term("a"), Term("i2"), Term("a")))
q3 = Query(Term("test1", Term("i1"), Term("a"), Term("i1"), Term("b")))
results = model.solve([q1, q2, q3])
r1 = float(results[0].result[q1.query])
r2 = float(results[1].result[q2.query])
r3 = float(results[2].result[q3.query])
assert pytest.approx(0.24) == r1
assert pytest.approx(0.16) == r2
assert pytest.approx(0) == r3
def test_fact_network(model: Model):
q1 = Query(Term("test2", Term("i1"), Term("i2")))
q2 = Query(Term("test2", Term("i1"), Term("i1")))
results = model.solve([q1, q2])
r1 = float(results[0].result[q1.query])
r2 = float(results[1].result[q2.query])
assert pytest.approx(0.32) == r1
assert pytest.approx(0.8) == r2
def test_det_network(model: Model):
q1 = Query(Term("test3", Term("i1"), Var("X")))
q2 = Query(Term("test3", Term("i2"), Var("X")))
results = model.solve([q1, q2])
r1 = list(results[0].result)[0].args[1]
r2 = list(results[1].result)[0].args[1]
r1 = model.get_tensor(r1)
r2 = model.get_tensor(r2)
assert all(r1.detach().numpy() == [1.0, 2.0, 3.0, 4.0])
assert all(r2.detach().numpy() == [-1.0, 0.0, 1.0])
def test_det_network_substitution(model: Model):
if not model.solver.cache.cache:
q1 = Query(Term("test3", Term("a"), Var("X")), {Term("a"): Term("i1")})
q2 = Query(Term("test3", Term("a"), Var("X")), {Term("a"): Term("i2")})
results = model.solve([q1, q2])
r1 = list(results[0].result)[0].args[1]
r2 = list(results[1].result)[0].args[1]
r1 = model.get_tensor(r1)
r2 = model.get_tensor(r2)
assert all(r1.detach().numpy() == [1.0, 2.0, 3.0, 4.0])
assert all(r2.detach().numpy() == [-1.0, 0.0, 1.0])
| true | true |
f7330d70b64d5fb5ed24ad6e63793959cb3a079f | 72 | py | Python | File Handling/ReadCharacter.py | UgainJain/LearnPythonByDoing | 4784c334d7f485223a29592ab47c6c017ec67145 | [
"MIT"
] | 5 | 2018-11-06T11:15:35.000Z | 2020-07-29T21:54:28.000Z | File Handling/ReadCharacter.py | UgainJain/LearnPythonByDoing | 4784c334d7f485223a29592ab47c6c017ec67145 | [
"MIT"
] | 1 | 2018-11-13T13:22:11.000Z | 2018-11-13T13:22:11.000Z | File Handling/ReadCharacter.py | UgainJain/LearnPythonByDoing | 4784c334d7f485223a29592ab47c6c017ec67145 | [
"MIT"
] | 11 | 2018-11-06T11:12:21.000Z | 2019-07-12T11:43:05.000Z | f = open("Writelist.txt", "r")
data = f.read(6)
print(data)
f.close() | 18 | 31 | 0.597222 | f = open("Writelist.txt", "r")
data = f.read(6)
print(data)
f.close() | true | true |
f7330e60ec01b0731bf537b748634fcb7065790e | 1,957 | py | Python | alipay/aop/api/domain/AlipayEcoMycarViolationCityPushModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/AlipayEcoMycarViolationCityPushModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/AlipayEcoMycarViolationCityPushModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayEcoMycarViolationCityPushModel(object):
def __init__(self):
self._city_code = None
self._push_type = None
self._service_status = None
@property
def city_code(self):
return self._city_code
@city_code.setter
def city_code(self, value):
self._city_code = value
@property
def push_type(self):
return self._push_type
@push_type.setter
def push_type(self, value):
self._push_type = value
@property
def service_status(self):
return self._service_status
@service_status.setter
def service_status(self, value):
self._service_status = value
def to_alipay_dict(self):
params = dict()
if self.city_code:
if hasattr(self.city_code, 'to_alipay_dict'):
params['city_code'] = self.city_code.to_alipay_dict()
else:
params['city_code'] = self.city_code
if self.push_type:
if hasattr(self.push_type, 'to_alipay_dict'):
params['push_type'] = self.push_type.to_alipay_dict()
else:
params['push_type'] = self.push_type
if self.service_status:
if hasattr(self.service_status, 'to_alipay_dict'):
params['service_status'] = self.service_status.to_alipay_dict()
else:
params['service_status'] = self.service_status
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEcoMycarViolationCityPushModel()
if 'city_code' in d:
o.city_code = d['city_code']
if 'push_type' in d:
o.push_type = d['push_type']
if 'service_status' in d:
o.service_status = d['service_status']
return o
| 27.56338 | 79 | 0.602453 |
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayEcoMycarViolationCityPushModel(object):
def __init__(self):
self._city_code = None
self._push_type = None
self._service_status = None
@property
def city_code(self):
return self._city_code
@city_code.setter
def city_code(self, value):
self._city_code = value
@property
def push_type(self):
return self._push_type
@push_type.setter
def push_type(self, value):
self._push_type = value
@property
def service_status(self):
return self._service_status
@service_status.setter
def service_status(self, value):
self._service_status = value
def to_alipay_dict(self):
params = dict()
if self.city_code:
if hasattr(self.city_code, 'to_alipay_dict'):
params['city_code'] = self.city_code.to_alipay_dict()
else:
params['city_code'] = self.city_code
if self.push_type:
if hasattr(self.push_type, 'to_alipay_dict'):
params['push_type'] = self.push_type.to_alipay_dict()
else:
params['push_type'] = self.push_type
if self.service_status:
if hasattr(self.service_status, 'to_alipay_dict'):
params['service_status'] = self.service_status.to_alipay_dict()
else:
params['service_status'] = self.service_status
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEcoMycarViolationCityPushModel()
if 'city_code' in d:
o.city_code = d['city_code']
if 'push_type' in d:
o.push_type = d['push_type']
if 'service_status' in d:
o.service_status = d['service_status']
return o
| true | true |
f7330ee984c0980ccf5e9c6390ac902891c23fe9 | 1,098 | py | Python | Chapter16/bypass_os.py | souhaiboudiouf/Hands-On-AWS-Penetration-Testing-with-Kali-Linux | 65b987e741d08dc933ffec0590947d764f4f62b7 | [
"MIT"
] | 102 | 2019-02-15T13:38:13.000Z | 2022-01-17T11:39:34.000Z | Chapter16/bypass_os.py | souhaiboudiouf/Hands-On-AWS-Penetration-Testing-with-Kali-Linux | 65b987e741d08dc933ffec0590947d764f4f62b7 | [
"MIT"
] | null | null | null | Chapter16/bypass_os.py | souhaiboudiouf/Hands-On-AWS-Penetration-Testing-with-Kali-Linux | 65b987e741d08dc933ffec0590947d764f4f62b7 | [
"MIT"
] | 56 | 2019-05-21T22:31:52.000Z | 2022-03-19T14:16:41.000Z | import random
import boto3
import botocore
# A list of user agents that won't trigger GuardDuty
safe_user_agents = [
'Boto3/1.7.48 Python/3.7.0 Windows/10 Botocore/1.10.48',
'aws-sdk-go/1.4.22 (go1.7.4; linux; amd64)',
'aws-cli/1.15.10 Python/2.7.9 Windows/8 botocore/1.10.10'
]
# Grab the current user agent
user_agent = boto3.session.Session()._session.user_agent().lower()
# Check if we are on Kali, Parrot, or Pentoo Linux against a lowercase version of the user agent
if 'kali' in user_agent.lower() or 'parrot' in user_agent.lower() or 'pentoo' in user_agent.lower():
# Change the user agent to a random one from the list of safe user agents
user_agent = random.choice(safe_user_agents)
# Prepare a botocore config object with our user agent
botocore_config = botocore.config.Config(
user_agent=user_agent
)
# Create the boto3 client, using the botocore config we just set up
client = boto3.client(
'ec2',
region_name='us-east-1',
config=botocore_config
)
# Print out the results of our EC2 DescribeInstances call
print(client.describe_instances()) | 31.371429 | 100 | 0.734973 | import random
import boto3
import botocore
safe_user_agents = [
'Boto3/1.7.48 Python/3.7.0 Windows/10 Botocore/1.10.48',
'aws-sdk-go/1.4.22 (go1.7.4; linux; amd64)',
'aws-cli/1.15.10 Python/2.7.9 Windows/8 botocore/1.10.10'
]
# Grab the current user agent
user_agent = boto3.session.Session()._session.user_agent().lower()
# Check if we are on Kali, Parrot, or Pentoo Linux against a lowercase version of the user agent
if 'kali' in user_agent.lower() or 'parrot' in user_agent.lower() or 'pentoo' in user_agent.lower():
# Change the user agent to a random one from the list of safe user agents
user_agent = random.choice(safe_user_agents)
# Prepare a botocore config object with our user agent
botocore_config = botocore.config.Config(
user_agent=user_agent
)
# Create the boto3 client, using the botocore config we just set up
client = boto3.client(
'ec2',
region_name='us-east-1',
config=botocore_config
)
# Print out the results of our EC2 DescribeInstances call
print(client.describe_instances()) | true | true |
f7330f121e0d941a5ef4aff5f7eb6011732a74f8 | 4,077 | py | Python | Gds/src/fprime_gds/common/templates/event_template.py | SSteve/fprime | 12c478bd79c2c4ba2d9f9e634e47f8b6557c54a8 | [
"Apache-2.0"
] | 4 | 2021-06-17T16:45:40.000Z | 2022-02-08T00:05:32.000Z | Gds/src/fprime_gds/common/templates/event_template.py | SSteve/fprime | 12c478bd79c2c4ba2d9f9e634e47f8b6557c54a8 | [
"Apache-2.0"
] | 35 | 2021-06-30T20:40:48.000Z | 2022-03-01T04:51:42.000Z | Gds/src/fprime_gds/common/templates/event_template.py | SSteve/fprime | 12c478bd79c2c4ba2d9f9e634e47f8b6557c54a8 | [
"Apache-2.0"
] | 13 | 2021-06-02T20:52:17.000Z | 2022-02-21T17:51:57.000Z | """
@brief Event Template class
Instances of this class describe a specific event type. For example: AF_ASSERT_0
or cmdSeq_CS_CmdStarted
@date Created July 2, 2018
@author R. Joseph Paetz
@bug No known bugs
"""
from fprime.common.models.serialize import type_base
from fprime.common.models.serialize.type_exceptions import TypeMismatchException
from fprime_gds.common.utils.event_severity import EventSeverity
from . import data_template
class EventTemplate(data_template.DataTemplate):
"""Class to create event templates to describe specific event types"""
def __init__(
self, event_id, name, component, args, severity, format_str, description=None
):
"""
Constructor
Args:
event_id: The ID of the event being described
name: event name as a string
component: Component that produces the Event
args: List of arguments in tuple form. Each tuple should be:
(arg name, arg description, arg obj). Where arg obj is an
object of a type derived from the class Base Type. Arg
description may be None.
severity: event severity as an EventSeverity Enum
format_str: Format string for the event's arguments
description: (Optional) Event Description
"""
super().__init__()
# Make sure correct types are passed
if not isinstance(event_id, int):
raise TypeMismatchException(int, type(event_id))
if not isinstance(name, str):
raise TypeMismatchException(str, type(name))
if not isinstance(component, str):
raise TypeMismatchException(str, type(component))
if not isinstance(format_str, str):
raise TypeMismatchException(str, type(format_str))
if not isinstance(args, list):
raise TypeMismatchException(list, type(args))
for (arg_name, arg_desc, arg_type) in args:
if not isinstance(arg_name, str):
raise TypeMismatchException(str, type(arg_name))
if arg_desc is not None and not isinstance(arg_desc, str):
raise TypeMismatchException(str, type(arg_desc))
if not isinstance(arg_type, type_base.BaseType):
raise TypeMismatchException(type_base.BaseType, type(arg_type))
if description is not None and not isinstance(description, str):
raise TypeMismatchException(str, type(description))
if not isinstance(severity, EventSeverity):
raise TypeMismatchException("EventSeverity", type(severity))
# Initialize event internal variables
self.id = event_id
self.name = name
self.comp_name = component
self.args = args
self.severity = severity
self.format_str = format_str
self.description = description
def get_full_name(self):
"""
Get the full name of this event
Returns:
The full name (component.channel) for this event
"""
return "{}.{}".format(self.comp_name, self.name)
def get_id(self):
return self.id
def get_name(self):
return self.name
def get_comp_name(self):
return self.comp_name
def get_severity(self):
"""
Returns the event's severity as an EventSeverity Enum.
Returns:
The event's severity as an EventSeverity Enum
"""
return self.severity
def get_format_str(self):
return self.format_str
def get_description(self):
return self.description
def get_args(self):
"""
Returns a list of argument information
Returns:
A list of tuples where each tuple represents an argument. Each tuple
in the form: (arg name, arg description, arg obj). Where arg obj is
an object of a type derived from the class Base Type. Arg
description may be None.
"""
return self.args
if __name__ == "__main__":
pass
| 31.122137 | 85 | 0.638705 |
from fprime.common.models.serialize import type_base
from fprime.common.models.serialize.type_exceptions import TypeMismatchException
from fprime_gds.common.utils.event_severity import EventSeverity
from . import data_template
class EventTemplate(data_template.DataTemplate):
def __init__(
self, event_id, name, component, args, severity, format_str, description=None
):
super().__init__()
if not isinstance(event_id, int):
raise TypeMismatchException(int, type(event_id))
if not isinstance(name, str):
raise TypeMismatchException(str, type(name))
if not isinstance(component, str):
raise TypeMismatchException(str, type(component))
if not isinstance(format_str, str):
raise TypeMismatchException(str, type(format_str))
if not isinstance(args, list):
raise TypeMismatchException(list, type(args))
for (arg_name, arg_desc, arg_type) in args:
if not isinstance(arg_name, str):
raise TypeMismatchException(str, type(arg_name))
if arg_desc is not None and not isinstance(arg_desc, str):
raise TypeMismatchException(str, type(arg_desc))
if not isinstance(arg_type, type_base.BaseType):
raise TypeMismatchException(type_base.BaseType, type(arg_type))
if description is not None and not isinstance(description, str):
raise TypeMismatchException(str, type(description))
if not isinstance(severity, EventSeverity):
raise TypeMismatchException("EventSeverity", type(severity))
self.id = event_id
self.name = name
self.comp_name = component
self.args = args
self.severity = severity
self.format_str = format_str
self.description = description
def get_full_name(self):
return "{}.{}".format(self.comp_name, self.name)
def get_id(self):
return self.id
def get_name(self):
return self.name
def get_comp_name(self):
return self.comp_name
def get_severity(self):
return self.severity
def get_format_str(self):
return self.format_str
def get_description(self):
return self.description
def get_args(self):
return self.args
if __name__ == "__main__":
pass
| true | true |
f7330f24728dcfd149a3001a44d2ed61a9343a8e | 3,105 | py | Python | ddtrace/utils/deprecation.py | SzySteve/dd-trace-py | 90d1d5981c72ea312c21ac04e5be47521d0f0f2e | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | ddtrace/utils/deprecation.py | SzySteve/dd-trace-py | 90d1d5981c72ea312c21ac04e5be47521d0f0f2e | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2020-12-22T16:56:55.000Z | 2020-12-22T16:56:55.000Z | ddtrace/utils/deprecation.py | kenferrara/dd-trace-py | 12e52e0ab804061e72b0f76214f5e4bb475ae20f | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2020-12-22T16:54:02.000Z | 2020-12-22T16:54:02.000Z | import os
import warnings
from functools import wraps
from ddtrace.vendor import debtcollector
class RemovedInDDTrace10Warning(DeprecationWarning):
pass
def format_message(name, message, version):
"""Message formatter to create `DeprecationWarning` messages
such as:
'fn' is deprecated and will be remove in future versions (1.0).
"""
return "'{}' is deprecated and will be remove in future versions{}. {}".format(
name,
" ({})".format(version) if version else "",
message,
)
def warn(message, stacklevel=2):
"""Helper function used as a ``DeprecationWarning``."""
warnings.warn(message, RemovedInDDTrace10Warning, stacklevel=stacklevel)
def deprecation(name="", message="", version=None):
"""Function to report a ``DeprecationWarning``. Bear in mind that `DeprecationWarning`
are ignored by default so they're not available in user logs. To show them,
the application must be launched with a special flag:
$ python -Wall script.py
This approach is used by most of the frameworks, including Django
(ref: https://docs.djangoproject.com/en/2.0/howto/upgrade-version/#resolving-deprecation-warnings)
"""
msg = format_message(name, message, version)
warn(msg, stacklevel=4)
def deprecated(message="", version=None):
"""Decorator function to report a ``DeprecationWarning``. Bear
in mind that `DeprecationWarning` are ignored by default so they're
not available in user logs. To show them, the application must be launched
with a special flag:
$ python -Wall script.py
This approach is used by most of the frameworks, including Django
(ref: https://docs.djangoproject.com/en/2.0/howto/upgrade-version/#resolving-deprecation-warnings)
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
msg = format_message(func.__name__, message, version)
warn(msg, stacklevel=3)
return func(*args, **kwargs)
return wrapper
return decorator
def get_service_legacy(default=None):
"""Helper to get the old {DD,DATADOG}_SERVICE_NAME environment variables
and output a deprecation warning if they are defined.
Note that this helper should only be used for migrating integrations which
use the {DD,DATADOG}_SERVICE_NAME variables to the new DD_SERVICE variable.
If the environment variables are not in use, no deprecation warning is
produced and `default` is returned.
"""
for old_env_key in ["DD_SERVICE_NAME", "DATADOG_SERVICE_NAME"]:
if old_env_key in os.environ:
debtcollector.deprecate(
(
"'{}' is deprecated and will be removed in a future version. Please use DD_SERVICE instead. "
"Refer to our release notes on Github: https://github.com/DataDog/dd-trace-py/releases/tag/v0.36.0 "
"for the improvements being made for service names."
).format(old_env_key)
)
return os.getenv(old_env_key)
return default
| 34.5 | 120 | 0.677617 | import os
import warnings
from functools import wraps
from ddtrace.vendor import debtcollector
class RemovedInDDTrace10Warning(DeprecationWarning):
pass
def format_message(name, message, version):
return "'{}' is deprecated and will be remove in future versions{}. {}".format(
name,
" ({})".format(version) if version else "",
message,
)
def warn(message, stacklevel=2):
warnings.warn(message, RemovedInDDTrace10Warning, stacklevel=stacklevel)
def deprecation(name="", message="", version=None):
msg = format_message(name, message, version)
warn(msg, stacklevel=4)
def deprecated(message="", version=None):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
msg = format_message(func.__name__, message, version)
warn(msg, stacklevel=3)
return func(*args, **kwargs)
return wrapper
return decorator
def get_service_legacy(default=None):
for old_env_key in ["DD_SERVICE_NAME", "DATADOG_SERVICE_NAME"]:
if old_env_key in os.environ:
debtcollector.deprecate(
(
"'{}' is deprecated and will be removed in a future version. Please use DD_SERVICE instead. "
"Refer to our release notes on Github: https://github.com/DataDog/dd-trace-py/releases/tag/v0.36.0 "
"for the improvements being made for service names."
).format(old_env_key)
)
return os.getenv(old_env_key)
return default
| true | true |
f733105d65957142954e8587e025220e714ea3f8 | 21,697 | py | Python | nlplingo/nn/extractor.py | BBN-E/nlplingo | 32ff17b1320937faa3d3ebe727032f4b3e7a353d | [
"Apache-2.0"
] | 3 | 2020-10-22T13:28:00.000Z | 2022-03-24T19:57:22.000Z | nlplingo/nn/extractor.py | BBN-E/nlplingo | 32ff17b1320937faa3d3ebe727032f4b3e7a353d | [
"Apache-2.0"
] | null | null | null | nlplingo/nn/extractor.py | BBN-E/nlplingo | 32ff17b1320937faa3d3ebe727032f4b3e7a353d | [
"Apache-2.0"
] | 1 | 2020-10-22T13:29:51.000Z | 2020-10-22T13:29:51.000Z | import codecs
import json
import os
import numpy as np
from nlplingo.nn.sequence_model import SequenceXLMRBase, SequenceXLMRCustom
from nlplingo.nn.spanpair_model import SpanPairModelEmbedded
from nlplingo.tasks.entitycoref.feature import EntityCorefFeatureGenerator
from nlplingo.tasks.entitycoref.generator import EntityCorefExampleGenerator
from nlplingo.tasks.eventcoref.feature import EventCorefFeatureGenerator
from nlplingo.tasks.eventcoref.generator import EventCorefExampleGenerator
from nlplingo.tasks.eventpair.feature import EventPairFeatureGenerator
from nlplingo.tasks.eventpair.generator import EventPairExampleGenerator
from nlplingo.tasks.eventframe.feature import EventFramePairFeatureGenerator
from nlplingo.tasks.eventframe.generator import EventFramePairExampleGenerator
from keras.models import load_model as keras_load_model
from keras.models import Model as KerasModel
from nlplingo.tasks.eventargument.feature import EventArgumentFeatureGenerator
from nlplingo.tasks.eventargument.generator import EventArgumentExampleGenerator
from nlplingo.tasks.eventrelation.feature import EventEventRelationFeatureGenerator
from nlplingo.tasks.eventrelation.generator import EventEventRelationExampleGenerator
from nlplingo.tasks.entityrelation.feature import EntityRelationFeatureGenerator
from nlplingo.tasks.entityrelation.generator import EntityRelationExampleGenerator
from nlplingo.tasks.event_domain import EventDomain
from nlplingo.tasks.eventtrigger.feature import EventTriggerFeatureGenerator
from nlplingo.tasks.eventtrigger.generator import EventTriggerExampleGenerator
from nlplingo.nn.argument_model import CNNArgumentModel
from nlplingo.nn.argument_model import GNNArgumentModel
from nlplingo.nn.argument_model import MultiLayerArgumentModelEmbedded, WithinSentenceArgumentModel
from nlplingo.nn.extraction_model import ExtractionModel
from nlplingo.nn.keras_models.common import keras_custom_objects
from nlplingo.nn.trigger_model import CNNTriggerModel
from nlplingo.nn.trigger_model import MultiLayerTriggerModelEmbedded
from nlplingo.nn.eventpair_model import EventPairModelEmbeddedTrigger
from nlplingo.nn.event_event_relation_model import MultiLayerEventEventRelationModel, WithinSentenceEER, EventEventRelationStackedOpenNREModel, WithinSentenceEERGeneral
from nlplingo.nn.entity_entity_relation_model import MultiLayerEntityRelationModelEmbedded, WithinSentenceEntityRelationModel
from nlplingo.nn.eventframepair_model import EventFramePairModelEmbedded
from nlplingo.nn.hyperparameters import HyperParameters
from nlplingo.tasks.common.feature.feature_setting import FeatureSetting
import logging
from nlplingo.tasks.sequence.feature import SequenceFeatureGenerator
from nlplingo.tasks.sequence.generator import SequenceExampleGenerator
logger = logging.getLogger(__name__)
class Extractor(object):
trigger_model_table = {
'event-trigger_cnn': CNNTriggerModel,
'event-trigger_cnn-embedded': CNNTriggerModel,
'event-trigger_multilayer-embedded': MultiLayerTriggerModelEmbedded,
'event-trigger_sentence-embedded': MultiLayerTriggerModelEmbedded,
}
argument_model_table = {
'event-argument_cnn': CNNArgumentModel,
'event-argument_cnn-embedded': CNNArgumentModel,
'event-argument_gnn': GNNArgumentModel,
'event-argument_multilayer-embedded': MultiLayerArgumentModelEmbedded,
'event-argument_bert-mention': WithinSentenceArgumentModel
}
eer_model_table = {
'event-event-relation_multilayer': MultiLayerEventEventRelationModel,
'event-event-relation_multilayer-embedded': MultiLayerEventEventRelationModel,
'event-event-relation_two_models_with_postprocessing': EventEventRelationStackedOpenNREModel,
'event-event-relation_cnn-embedded': WithinSentenceEER, # This exists for legacy reasons
'event-event-relation_within-sentence': WithinSentenceEER,
'event-event-relation_general_decode-embedded': WithinSentenceEERGeneral
}
entity_relation_model_table = {
'entity-entity-relation_multilayer-embedded': MultiLayerEntityRelationModelEmbedded,
'entity-entity-relation_bert-mention': WithinSentenceEntityRelationModel
}
eventpair_model_table = {
'event-pair_embedded': SpanPairModelEmbedded,
'event-pair_embedded_trigger': EventPairModelEmbeddedTrigger
}
eventframepair_model_table = {
'event-framepair_embedded': EventFramePairModelEmbedded
}
entitycoref_model_table = {
'entitycoref_embedded': SpanPairModelEmbedded
}
eventcoref_model_table = {
'eventcoref_embedded': SpanPairModelEmbedded
}
sequence_model_table = {
'sequence_xlmr-base': SequenceXLMRBase,
'sequence_xlmr-custom': SequenceXLMRCustom
}
def __init__(self, params, extractor_params, embeddings, load_from_file=False):
"""
:type params: dict # general parameters
:type extractor_params: dict # specific to this extractor
:type embeddings: dict[str : nlplingo.embeddings.word_embeddings.WordEmbedding]
"""
self.extractor_params = extractor_params
self.extractor_name = extractor_params.get('extractor_name', None)
self.task = extractor_params.get('task', None)
self.engine = extractor_params.get('engine', None)
self.model_type = extractor_params['model_type']
""":type: str"""
self.domain = EventDomain.read_domain_ontology_file(extractor_params['domain_ontology'],
domain_name=extractor_params.get('domain_name', 'general'))
""":type: nlplingo.tasks.event_domain.EventDomain"""
self.domain.build_prior(extractor_params.get('ontology_yaml'))
self.model_file = extractor_params['model_file']
""":type: str"""
self.class_thresholds_path = extractor_params.get('class_thresholds')
""":type: str"""
self.class_thresholds_global = float(
extractor_params.get('class_thresholds_global', -1.0))
""":type: float"""
self.use_trigger_safelist = extractor_params.get('trigger.use_safelist', False)
if 'engine' not in extractor_params or (('engine' in extractor_params) and (extractor_params['engine'] == 'keras')):
self.hyper_parameters = HyperParameters(extractor_params['hyper-parameters'], load_from_file)
elif extractor_params['engine'] == 'pytorch':
self.hyper_parameters = HyperParameters(extractor_params['hyper-parameters'], load_from_file)
# elif extractor_params['engine'] == 'transformers':
# pass
else:
raise RuntimeError('Extractor model type: {} not implemented.'.format(self.model_type))
""":type: nlplingo.nn.extractor.HyperParameters"""
self.feature_setting = FeatureSetting(self.extractor_params['features'])
self.extraction_model = None
if self.model_type in self.trigger_model_table:
self.extraction_model = self.trigger_model_table[self.model_type](params, extractor_params, self.domain, embeddings,
self.hyper_parameters,
self.feature_setting)
elif self.model_type in self.argument_model_table:
self.extraction_model = self.argument_model_table[self.model_type](params, extractor_params, self.domain,
embeddings, self.hyper_parameters,
self.feature_setting)
elif self.model_type in self.eventpair_model_table:
self.extraction_model = self.eventpair_model_table[self.model_type](params, extractor_params, self.domain, embeddings,
self.hyper_parameters,
self.feature_setting) # TODO: fix this model
elif self.model_type in self.eer_model_table:
self.extraction_model = self.eer_model_table[self.model_type](params, extractor_params, self.domain, embeddings,
self.hyper_parameters,
self.feature_setting)
elif self.model_type in self.entity_relation_model_table:
self.extraction_model = self.entity_relation_model_table[self.model_type](params, extractor_params, self.domain, embeddings,
self.hyper_parameters,
self.feature_setting)
elif self.model_type in self.eventframepair_model_table:
self.extraction_model = self.eventframepair_model_table[self.model_type](params, extractor_params, self.domain, embeddings,
self.hyper_parameters,
self.feature_setting) # TODO: fix this model
elif self.model_type in self.entitycoref_model_table:
self.extraction_model = self.entitycoref_model_table[self.model_type](params, extractor_params, self.domain, embeddings,
self.hyper_parameters,
self.feature_setting)
elif self.model_type in self.eventcoref_model_table:
self.extraction_model = self.eventcoref_model_table[self.model_type](params, extractor_params, self.domain, embeddings,
self.hyper_parameters,
self.feature_setting)
elif self.model_type in self.sequence_model_table:
if self.task == 'event-trigger':
self.domain.create_sequence_types(self.domain.event_types)
elif self.task == 'event-argument':
self.domain.create_sequence_types(self.domain.event_roles)
elif self.task == 'ner':
self.domain.create_sequence_types(self.domain.entity_types)
self.extraction_model = self.sequence_model_table[self.model_type](params, extractor_params, self.domain, embeddings,
self.hyper_parameters, self.feature_setting)
elif self.model_type.startswith('oregon'): # TODO hack, until YS has time to properly integrate after BETTER eval
pass
else:
raise RuntimeError('Extractor model type: {} not implemented.'.format(self.model_type))
""":type: nlplingo.nn.event_model.ExtractionModel"""
# TODO: extend this to support EventEventRelation models
if load_from_file:
logging.info('Loading previously trained model')
if extractor_params.get('engine', None) == 'keras':
self.load_keras()
if extractor_params.get('engine', None) is None: # TODO use framework
self.load_keras()
elif extractor_params['engine'] == 'pytorch':
pass
# elif extractor_params['engine'] == 'transformers':
# pass
else:
raise Exception(
'Only Keras or PyTorch engines are supported.')
#if ('engine' in extractor_params) and (extractor_params['engine'] == 'pytorch'):
# if load_from_file or self.extraction_model.hyper_params.load:
# pass
"""
self.extraction_model.hyper_params.num_class = self.extraction_model.num_output
if self.extraction_model.word_embeddings is not None:
trainer = self.extraction_model.model(self.extraction_model.extractor_params, self.extraction_model.hyper_params.dict, self.extraction_model.optimizer,
feature_names=self.extraction_model.features.feature_strings, emb_matrix=self.extraction_model.word_embeddings)
else: # frozen, external embedding case
if self.extraction_model.embeddings_vector_size is not None:
self.extraction_model.hyper_params.dict['emb_dim'] = self.extraction_model.embeddings_vector_size
trainer = self.extraction_model.model(self.extraction_model.extractor_params, self.extraction_model.hyper_params.dict, self.extraction_model.optimizer,
feature_names=self.extraction_model.features.feature_strings)
if self.model_file:
trainer.load(self.model_file)
self.extraction_model.trained_model = trainer
"""
self.feature_generator = None # feature generator
self.example_generator = None # example generator
# TODO this should really be renamed as task instead of model_type
if self.model_type.startswith('event-trigger_'):
self.feature_generator = EventTriggerFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting, self.domain)
self.example_generator = EventTriggerExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
elif self.model_type.startswith('event-argument_'):
self.feature_generator = EventArgumentFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting)
self.example_generator = EventArgumentExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
elif self.model_type.startswith('event-pair_'):
self.feature_generator = EventPairFeatureGenerator(extractor_params)
self.example_generator = EventPairExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
elif self.model_type.startswith('event-event-relation_'):
self.feature_generator = EventEventRelationFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting)
self.example_generator = EventEventRelationExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
elif self.model_type.startswith('entity-entity-relation_'):
self.feature_generator = EntityRelationFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting)
self.example_generator = EntityRelationExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
elif self.model_type.startswith('event-framepair_'):
self.feature_generator = EventFramePairFeatureGenerator(extractor_params)
self.example_generator = EventFramePairExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
elif self.model_type.startswith('entitycoref_'):
self.feature_generator = EntityCorefFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting)
self.example_generator = EntityCorefExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
elif self.model_type.startswith('eventcoref_'):
self.feature_generator = EventCorefFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting)
self.example_generator = EventCorefExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
elif self.model_type.startswith('oregon'): # TODO hack, until YS has time to properly integrate after BETTER eval
pass
elif self.model_type.startswith('sequence_'):
self.feature_generator = SequenceFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting, self.extraction_model.tokenizer, self.domain)
self.example_generator = SequenceExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
else:
raise RuntimeError('Extractor model type: {} not implemented.'.format(self.model_type))
self.extraction_model_last_layer = None
""":type: keras.models.Model"""
self.emit_vectors = extractor_params.get('output_vectors', False)
self.class_thresholds = None
# load saved thresholds from file
self._build_threshold_vector()
# use a global threshold value if they were not loaded
if self.class_thresholds is None:
logging.info('Using global threshold override for {}'.format(
self.extractor_name))
# use defaults, if no global override given in extractor parameters
if self.class_thresholds_global < 0.0:
logging.info('Using default thresholds for {}'.format(
self.extractor_name))
self.class_thresholds_global = 0.5
number_of_classes = len(self.domain.event_types.keys())
logging.info('- global threshold ={}'.format(self.class_thresholds_global))
self.class_thresholds = np.asarray(
[self.class_thresholds_global] * number_of_classes)
def _build_threshold_vector(self):
path = self.class_thresholds_path
if path is not None and os.path.isfile(str(path)):
if path.endswith('.npz'):
self.class_thresholds = np.load(str(path))['thresholds']
print('Loaded saved thresholds from NPZ for {}'.format(
self.extractor_name))
elif path.endswith('.json'):
number_of_classes = len(self.domain.event_types.keys())
self.class_thresholds = np.asarray([0.5] * number_of_classes)
with codecs.open(path, 'r', encoding='utf8') as fh:
thresholds_json = json.load(fh)
for label, threshold in thresholds_json.items():
try:
index = self.domain.get_event_type_index(label)
self.class_thresholds[index] = float(threshold)
except ValueError as e:
print('The following error occurred while loading '
'thresholds from json and will be ignored:\n'
'{}'.format(e))
print('Loaded saved thresholds from JSON for {}'.format(
self.extractor_name))
def make_last_layer_model(self):
if self.extraction_model_last_layer is not None:
print("Last layer of model has already been built")
return
keras_model = self.extraction_model
if type(keras_model) is not KerasModel:
keras_model = keras_model.model
""":type: keras.models.Model"""
print("Original model:")
try:
print(keras_model.summary())
except TypeError:
print("Keras encountered an error when trying to print the model "
"summary. Skipping this printout...")
self.extraction_model_last_layer = KerasModel(
inputs=keras_model.input,
outputs=keras_model.layers[-2].output)
print("Copy model:")
try:
print(self.extraction_model_last_layer.summary())
except TypeError:
print("Keras encountered an error when trying to print the copy's "
"summary. Skipping this printout...")
def get_embeddings(self, examples, data_list):
ret = []
self.make_last_layer_model()
vectors = self.extraction_model_last_layer.predict(data_list)
for i, ex in enumerate(examples):
output_vector = vectors[i, :]
ret.append(output_vector)
return ret
def load_keras(self):
try:
trained_keras_model = keras_load_model(self.model_file)
except ValueError:
custom_objects = keras_custom_objects
trained_keras_model = keras_load_model(self.model_file, custom_objects)
weights = trained_keras_model.get_weights()
new_weights = []
for i, w in enumerate(weights):
pretrained = self.extraction_model.layers.pretrained_embeddings
using_pretrained = pretrained is not None
if using_pretrained and i > 1 and w.shape[0] == pretrained.shape[0]:
# TODO retrain models to avoid this hack
pass
else:
new_weights.append(w)
weights = new_weights
# for i, w in enumerate(weights):
# print(i, w.shape
self.extraction_model.model.set_weights(weights)
| 55.633333 | 175 | 0.646357 | import codecs
import json
import os
import numpy as np
from nlplingo.nn.sequence_model import SequenceXLMRBase, SequenceXLMRCustom
from nlplingo.nn.spanpair_model import SpanPairModelEmbedded
from nlplingo.tasks.entitycoref.feature import EntityCorefFeatureGenerator
from nlplingo.tasks.entitycoref.generator import EntityCorefExampleGenerator
from nlplingo.tasks.eventcoref.feature import EventCorefFeatureGenerator
from nlplingo.tasks.eventcoref.generator import EventCorefExampleGenerator
from nlplingo.tasks.eventpair.feature import EventPairFeatureGenerator
from nlplingo.tasks.eventpair.generator import EventPairExampleGenerator
from nlplingo.tasks.eventframe.feature import EventFramePairFeatureGenerator
from nlplingo.tasks.eventframe.generator import EventFramePairExampleGenerator
from keras.models import load_model as keras_load_model
from keras.models import Model as KerasModel
from nlplingo.tasks.eventargument.feature import EventArgumentFeatureGenerator
from nlplingo.tasks.eventargument.generator import EventArgumentExampleGenerator
from nlplingo.tasks.eventrelation.feature import EventEventRelationFeatureGenerator
from nlplingo.tasks.eventrelation.generator import EventEventRelationExampleGenerator
from nlplingo.tasks.entityrelation.feature import EntityRelationFeatureGenerator
from nlplingo.tasks.entityrelation.generator import EntityRelationExampleGenerator
from nlplingo.tasks.event_domain import EventDomain
from nlplingo.tasks.eventtrigger.feature import EventTriggerFeatureGenerator
from nlplingo.tasks.eventtrigger.generator import EventTriggerExampleGenerator
from nlplingo.nn.argument_model import CNNArgumentModel
from nlplingo.nn.argument_model import GNNArgumentModel
from nlplingo.nn.argument_model import MultiLayerArgumentModelEmbedded, WithinSentenceArgumentModel
from nlplingo.nn.extraction_model import ExtractionModel
from nlplingo.nn.keras_models.common import keras_custom_objects
from nlplingo.nn.trigger_model import CNNTriggerModel
from nlplingo.nn.trigger_model import MultiLayerTriggerModelEmbedded
from nlplingo.nn.eventpair_model import EventPairModelEmbeddedTrigger
from nlplingo.nn.event_event_relation_model import MultiLayerEventEventRelationModel, WithinSentenceEER, EventEventRelationStackedOpenNREModel, WithinSentenceEERGeneral
from nlplingo.nn.entity_entity_relation_model import MultiLayerEntityRelationModelEmbedded, WithinSentenceEntityRelationModel
from nlplingo.nn.eventframepair_model import EventFramePairModelEmbedded
from nlplingo.nn.hyperparameters import HyperParameters
from nlplingo.tasks.common.feature.feature_setting import FeatureSetting
import logging
from nlplingo.tasks.sequence.feature import SequenceFeatureGenerator
from nlplingo.tasks.sequence.generator import SequenceExampleGenerator
logger = logging.getLogger(__name__)
class Extractor(object):
trigger_model_table = {
'event-trigger_cnn': CNNTriggerModel,
'event-trigger_cnn-embedded': CNNTriggerModel,
'event-trigger_multilayer-embedded': MultiLayerTriggerModelEmbedded,
'event-trigger_sentence-embedded': MultiLayerTriggerModelEmbedded,
}
argument_model_table = {
'event-argument_cnn': CNNArgumentModel,
'event-argument_cnn-embedded': CNNArgumentModel,
'event-argument_gnn': GNNArgumentModel,
'event-argument_multilayer-embedded': MultiLayerArgumentModelEmbedded,
'event-argument_bert-mention': WithinSentenceArgumentModel
}
eer_model_table = {
'event-event-relation_multilayer': MultiLayerEventEventRelationModel,
'event-event-relation_multilayer-embedded': MultiLayerEventEventRelationModel,
'event-event-relation_two_models_with_postprocessing': EventEventRelationStackedOpenNREModel,
'event-event-relation_cnn-embedded': WithinSentenceEER,
'event-event-relation_within-sentence': WithinSentenceEER,
'event-event-relation_general_decode-embedded': WithinSentenceEERGeneral
}
entity_relation_model_table = {
'entity-entity-relation_multilayer-embedded': MultiLayerEntityRelationModelEmbedded,
'entity-entity-relation_bert-mention': WithinSentenceEntityRelationModel
}
eventpair_model_table = {
'event-pair_embedded': SpanPairModelEmbedded,
'event-pair_embedded_trigger': EventPairModelEmbeddedTrigger
}
eventframepair_model_table = {
'event-framepair_embedded': EventFramePairModelEmbedded
}
entitycoref_model_table = {
'entitycoref_embedded': SpanPairModelEmbedded
}
eventcoref_model_table = {
'eventcoref_embedded': SpanPairModelEmbedded
}
sequence_model_table = {
'sequence_xlmr-base': SequenceXLMRBase,
'sequence_xlmr-custom': SequenceXLMRCustom
}
def __init__(self, params, extractor_params, embeddings, load_from_file=False):
self.extractor_params = extractor_params
self.extractor_name = extractor_params.get('extractor_name', None)
self.task = extractor_params.get('task', None)
self.engine = extractor_params.get('engine', None)
self.model_type = extractor_params['model_type']
self.domain = EventDomain.read_domain_ontology_file(extractor_params['domain_ontology'],
domain_name=extractor_params.get('domain_name', 'general'))
self.domain.build_prior(extractor_params.get('ontology_yaml'))
self.model_file = extractor_params['model_file']
self.class_thresholds_path = extractor_params.get('class_thresholds')
self.class_thresholds_global = float(
extractor_params.get('class_thresholds_global', -1.0))
self.use_trigger_safelist = extractor_params.get('trigger.use_safelist', False)
if 'engine' not in extractor_params or (('engine' in extractor_params) and (extractor_params['engine'] == 'keras')):
self.hyper_parameters = HyperParameters(extractor_params['hyper-parameters'], load_from_file)
elif extractor_params['engine'] == 'pytorch':
self.hyper_parameters = HyperParameters(extractor_params['hyper-parameters'], load_from_file)
else:
raise RuntimeError('Extractor model type: {} not implemented.'.format(self.model_type))
self.feature_setting = FeatureSetting(self.extractor_params['features'])
self.extraction_model = None
if self.model_type in self.trigger_model_table:
self.extraction_model = self.trigger_model_table[self.model_type](params, extractor_params, self.domain, embeddings,
self.hyper_parameters,
self.feature_setting)
elif self.model_type in self.argument_model_table:
self.extraction_model = self.argument_model_table[self.model_type](params, extractor_params, self.domain,
embeddings, self.hyper_parameters,
self.feature_setting)
elif self.model_type in self.eventpair_model_table:
self.extraction_model = self.eventpair_model_table[self.model_type](params, extractor_params, self.domain, embeddings,
self.hyper_parameters,
self.feature_setting)
elif self.model_type in self.eer_model_table:
self.extraction_model = self.eer_model_table[self.model_type](params, extractor_params, self.domain, embeddings,
self.hyper_parameters,
self.feature_setting)
elif self.model_type in self.entity_relation_model_table:
self.extraction_model = self.entity_relation_model_table[self.model_type](params, extractor_params, self.domain, embeddings,
self.hyper_parameters,
self.feature_setting)
elif self.model_type in self.eventframepair_model_table:
self.extraction_model = self.eventframepair_model_table[self.model_type](params, extractor_params, self.domain, embeddings,
self.hyper_parameters,
self.feature_setting)
elif self.model_type in self.entitycoref_model_table:
self.extraction_model = self.entitycoref_model_table[self.model_type](params, extractor_params, self.domain, embeddings,
self.hyper_parameters,
self.feature_setting)
elif self.model_type in self.eventcoref_model_table:
self.extraction_model = self.eventcoref_model_table[self.model_type](params, extractor_params, self.domain, embeddings,
self.hyper_parameters,
self.feature_setting)
elif self.model_type in self.sequence_model_table:
if self.task == 'event-trigger':
self.domain.create_sequence_types(self.domain.event_types)
elif self.task == 'event-argument':
self.domain.create_sequence_types(self.domain.event_roles)
elif self.task == 'ner':
self.domain.create_sequence_types(self.domain.entity_types)
self.extraction_model = self.sequence_model_table[self.model_type](params, extractor_params, self.domain, embeddings,
self.hyper_parameters, self.feature_setting)
elif self.model_type.startswith('oregon'):
pass
else:
raise RuntimeError('Extractor model type: {} not implemented.'.format(self.model_type))
if load_from_file:
logging.info('Loading previously trained model')
if extractor_params.get('engine', None) == 'keras':
self.load_keras()
if extractor_params.get('engine', None) is None:
self.load_keras()
elif extractor_params['engine'] == 'pytorch':
pass
else:
raise Exception(
'Only Keras or PyTorch engines are supported.')
"""
self.extraction_model.hyper_params.num_class = self.extraction_model.num_output
if self.extraction_model.word_embeddings is not None:
trainer = self.extraction_model.model(self.extraction_model.extractor_params, self.extraction_model.hyper_params.dict, self.extraction_model.optimizer,
feature_names=self.extraction_model.features.feature_strings, emb_matrix=self.extraction_model.word_embeddings)
else: # frozen, external embedding case
if self.extraction_model.embeddings_vector_size is not None:
self.extraction_model.hyper_params.dict['emb_dim'] = self.extraction_model.embeddings_vector_size
trainer = self.extraction_model.model(self.extraction_model.extractor_params, self.extraction_model.hyper_params.dict, self.extraction_model.optimizer,
feature_names=self.extraction_model.features.feature_strings)
if self.model_file:
trainer.load(self.model_file)
self.extraction_model.trained_model = trainer
"""
self.feature_generator = None
self.example_generator = None
if self.model_type.startswith('event-trigger_'):
self.feature_generator = EventTriggerFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting, self.domain)
self.example_generator = EventTriggerExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
elif self.model_type.startswith('event-argument_'):
self.feature_generator = EventArgumentFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting)
self.example_generator = EventArgumentExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
elif self.model_type.startswith('event-pair_'):
self.feature_generator = EventPairFeatureGenerator(extractor_params)
self.example_generator = EventPairExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
elif self.model_type.startswith('event-event-relation_'):
self.feature_generator = EventEventRelationFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting)
self.example_generator = EventEventRelationExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
elif self.model_type.startswith('entity-entity-relation_'):
self.feature_generator = EntityRelationFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting)
self.example_generator = EntityRelationExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
elif self.model_type.startswith('event-framepair_'):
self.feature_generator = EventFramePairFeatureGenerator(extractor_params)
self.example_generator = EventFramePairExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
elif self.model_type.startswith('entitycoref_'):
self.feature_generator = EntityCorefFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting)
self.example_generator = EntityCorefExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
elif self.model_type.startswith('eventcoref_'):
self.feature_generator = EventCorefFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting)
self.example_generator = EventCorefExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
elif self.model_type.startswith('oregon'):
pass
elif self.model_type.startswith('sequence_'):
self.feature_generator = SequenceFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting, self.extraction_model.tokenizer, self.domain)
self.example_generator = SequenceExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
else:
raise RuntimeError('Extractor model type: {} not implemented.'.format(self.model_type))
self.extraction_model_last_layer = None
self.emit_vectors = extractor_params.get('output_vectors', False)
self.class_thresholds = None
self._build_threshold_vector()
if self.class_thresholds is None:
logging.info('Using global threshold override for {}'.format(
self.extractor_name))
if self.class_thresholds_global < 0.0:
logging.info('Using default thresholds for {}'.format(
self.extractor_name))
self.class_thresholds_global = 0.5
number_of_classes = len(self.domain.event_types.keys())
logging.info('- global threshold ={}'.format(self.class_thresholds_global))
self.class_thresholds = np.asarray(
[self.class_thresholds_global] * number_of_classes)
def _build_threshold_vector(self):
path = self.class_thresholds_path
if path is not None and os.path.isfile(str(path)):
if path.endswith('.npz'):
self.class_thresholds = np.load(str(path))['thresholds']
print('Loaded saved thresholds from NPZ for {}'.format(
self.extractor_name))
elif path.endswith('.json'):
number_of_classes = len(self.domain.event_types.keys())
self.class_thresholds = np.asarray([0.5] * number_of_classes)
with codecs.open(path, 'r', encoding='utf8') as fh:
thresholds_json = json.load(fh)
for label, threshold in thresholds_json.items():
try:
index = self.domain.get_event_type_index(label)
self.class_thresholds[index] = float(threshold)
except ValueError as e:
print('The following error occurred while loading '
'thresholds from json and will be ignored:\n'
'{}'.format(e))
print('Loaded saved thresholds from JSON for {}'.format(
self.extractor_name))
def make_last_layer_model(self):
if self.extraction_model_last_layer is not None:
print("Last layer of model has already been built")
return
keras_model = self.extraction_model
if type(keras_model) is not KerasModel:
keras_model = keras_model.model
print("Original model:")
try:
print(keras_model.summary())
except TypeError:
print("Keras encountered an error when trying to print the model "
"summary. Skipping this printout...")
self.extraction_model_last_layer = KerasModel(
inputs=keras_model.input,
outputs=keras_model.layers[-2].output)
print("Copy model:")
try:
print(self.extraction_model_last_layer.summary())
except TypeError:
print("Keras encountered an error when trying to print the copy's "
"summary. Skipping this printout...")
def get_embeddings(self, examples, data_list):
ret = []
self.make_last_layer_model()
vectors = self.extraction_model_last_layer.predict(data_list)
for i, ex in enumerate(examples):
output_vector = vectors[i, :]
ret.append(output_vector)
return ret
def load_keras(self):
try:
trained_keras_model = keras_load_model(self.model_file)
except ValueError:
custom_objects = keras_custom_objects
trained_keras_model = keras_load_model(self.model_file, custom_objects)
weights = trained_keras_model.get_weights()
new_weights = []
for i, w in enumerate(weights):
pretrained = self.extraction_model.layers.pretrained_embeddings
using_pretrained = pretrained is not None
if using_pretrained and i > 1 and w.shape[0] == pretrained.shape[0]:
# TODO retrain models to avoid this hack
pass
else:
new_weights.append(w)
weights = new_weights
# for i, w in enumerate(weights):
# print(i, w.shape
self.extraction_model.model.set_weights(weights)
| true | true |
f73310aa2b49cd42868188d5dd473dea96e80215 | 45 | py | Python | mmdet2trt/models/backbones/__init__.py | jackweiwang/mmdetection-to-tensorrt | f988ba8e923764fb1173385a1c7160b8f8b5bd99 | [
"Apache-2.0"
] | 1 | 2021-08-23T10:09:37.000Z | 2021-08-23T10:09:37.000Z | mmdet2trt/models/backbones/__init__.py | gcong18/mmdetection-to-tensorrt | c31c32ee4720ff56010bcda77bacf3a110d0526c | [
"Apache-2.0"
] | null | null | null | mmdet2trt/models/backbones/__init__.py | gcong18/mmdetection-to-tensorrt | c31c32ee4720ff56010bcda77bacf3a110d0526c | [
"Apache-2.0"
] | null | null | null | from .base_backbone import BaseBackboneWraper | 45 | 45 | 0.911111 | from .base_backbone import BaseBackboneWraper | true | true |
f73310bc504ae60753d4112724e86a8d78e11598 | 7,189 | py | Python | tests/components/bond/test_fan.py | dakesson/core | 8cfffd00d607e75595e208fbe46794084e6c8707 | [
"Apache-2.0"
] | 1 | 2016-08-21T21:32:20.000Z | 2016-08-21T21:32:20.000Z | tests/components/bond/test_fan.py | dakesson/core | 8cfffd00d607e75595e208fbe46794084e6c8707 | [
"Apache-2.0"
] | 39 | 2020-08-03T07:33:25.000Z | 2022-03-31T06:02:02.000Z | tests/components/bond/test_fan.py | mKeRix/home-assistant | 9a81fe507db4a2b7f164d0683ca55c4e3dc6ccda | [
"Apache-2.0"
] | null | null | null | """Tests for the Bond fan device."""
from datetime import timedelta
from typing import Optional
from bond_api import Action, DeviceType, Direction
from homeassistant import core
from homeassistant.components import fan
from homeassistant.components.fan import (
ATTR_DIRECTION,
ATTR_SPEED_LIST,
DIRECTION_FORWARD,
DIRECTION_REVERSE,
DOMAIN as FAN_DOMAIN,
SERVICE_SET_DIRECTION,
)
from homeassistant.const import ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON
from homeassistant.helpers.entity_registry import EntityRegistry
from homeassistant.util import utcnow
from .common import patch_bond_action, patch_bond_device_state, setup_platform
from tests.common import async_fire_time_changed
def ceiling_fan(name: str):
"""Create a ceiling fan with given name."""
return {
"name": name,
"type": DeviceType.CEILING_FAN,
"actions": ["SetSpeed", "SetDirection"],
}
async def turn_fan_on(
hass: core.HomeAssistant, fan_id: str, speed: Optional[str] = None
) -> None:
"""Turn the fan on at the specified speed."""
service_data = {ATTR_ENTITY_ID: fan_id}
if speed:
service_data[fan.ATTR_SPEED] = speed
await hass.services.async_call(
FAN_DOMAIN, SERVICE_TURN_ON, service_data=service_data, blocking=True,
)
await hass.async_block_till_done()
async def test_entity_registry(hass: core.HomeAssistant):
"""Tests that the devices are registered in the entity registry."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
registry: EntityRegistry = await hass.helpers.entity_registry.async_get_registry()
assert [key for key in registry.entities] == ["fan.name_1"]
async def test_non_standard_speed_list(hass: core.HomeAssistant):
"""Tests that the device is registered with custom speed list if number of supported speeds differs form 3."""
await setup_platform(
hass,
FAN_DOMAIN,
ceiling_fan("name-1"),
bond_device_id="test-device-id",
props={"max_speed": 6},
)
actual_speeds = hass.states.get("fan.name_1").attributes[ATTR_SPEED_LIST]
assert actual_speeds == [
fan.SPEED_OFF,
fan.SPEED_LOW,
fan.SPEED_MEDIUM,
fan.SPEED_HIGH,
]
with patch_bond_device_state():
with patch_bond_action() as mock_set_speed_low:
await turn_fan_on(hass, "fan.name_1", fan.SPEED_LOW)
mock_set_speed_low.assert_called_once_with(
"test-device-id", Action.set_speed(1)
)
with patch_bond_action() as mock_set_speed_medium:
await turn_fan_on(hass, "fan.name_1", fan.SPEED_MEDIUM)
mock_set_speed_medium.assert_called_once_with(
"test-device-id", Action.set_speed(3)
)
with patch_bond_action() as mock_set_speed_high:
await turn_fan_on(hass, "fan.name_1", fan.SPEED_HIGH)
mock_set_speed_high.assert_called_once_with(
"test-device-id", Action.set_speed(6)
)
async def test_turn_on_fan_with_speed(hass: core.HomeAssistant):
"""Tests that turn on command delegates to set speed API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_set_speed, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1", fan.SPEED_LOW)
mock_set_speed.assert_called_with("test-device-id", Action.set_speed(1))
async def test_turn_on_fan_without_speed(hass: core.HomeAssistant):
"""Tests that turn on command delegates to turn on API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_on, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1")
mock_turn_on.assert_called_with("test-device-id", Action.turn_on())
async def test_turn_off_fan(hass: core.HomeAssistant):
"""Tests that turn off command delegates to API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_off, patch_bond_device_state():
await hass.services.async_call(
FAN_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: "fan.name_1"}, blocking=True,
)
await hass.async_block_till_done()
mock_turn_off.assert_called_once_with("test-device-id", Action.turn_off())
async def test_update_reports_fan_on(hass: core.HomeAssistant):
"""Tests that update command sets correct state when Bond API reports fan power is on."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"power": 1, "speed": 1}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").state == "on"
async def test_update_reports_fan_off(hass: core.HomeAssistant):
"""Tests that update command sets correct state when Bond API reports fan power is off."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"power": 0, "speed": 1}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").state == "off"
async def test_update_reports_direction_forward(hass: core.HomeAssistant):
"""Tests that update command sets correct direction when Bond API reports fan direction is forward."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"direction": Direction.FORWARD}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").attributes[ATTR_DIRECTION] == DIRECTION_FORWARD
async def test_update_reports_direction_reverse(hass: core.HomeAssistant):
"""Tests that update command sets correct direction when Bond API reports fan direction is reverse."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"direction": Direction.REVERSE}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").attributes[ATTR_DIRECTION] == DIRECTION_REVERSE
async def test_set_fan_direction(hass: core.HomeAssistant):
"""Tests that set direction command delegates to API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_set_direction, patch_bond_device_state():
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_SET_DIRECTION,
{ATTR_ENTITY_ID: "fan.name_1", ATTR_DIRECTION: DIRECTION_FORWARD},
blocking=True,
)
await hass.async_block_till_done()
mock_set_direction.assert_called_once_with(
"test-device-id", Action.set_direction(Direction.FORWARD)
)
| 36.866667 | 114 | 0.716372 | from datetime import timedelta
from typing import Optional
from bond_api import Action, DeviceType, Direction
from homeassistant import core
from homeassistant.components import fan
from homeassistant.components.fan import (
ATTR_DIRECTION,
ATTR_SPEED_LIST,
DIRECTION_FORWARD,
DIRECTION_REVERSE,
DOMAIN as FAN_DOMAIN,
SERVICE_SET_DIRECTION,
)
from homeassistant.const import ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON
from homeassistant.helpers.entity_registry import EntityRegistry
from homeassistant.util import utcnow
from .common import patch_bond_action, patch_bond_device_state, setup_platform
from tests.common import async_fire_time_changed
def ceiling_fan(name: str):
return {
"name": name,
"type": DeviceType.CEILING_FAN,
"actions": ["SetSpeed", "SetDirection"],
}
async def turn_fan_on(
hass: core.HomeAssistant, fan_id: str, speed: Optional[str] = None
) -> None:
service_data = {ATTR_ENTITY_ID: fan_id}
if speed:
service_data[fan.ATTR_SPEED] = speed
await hass.services.async_call(
FAN_DOMAIN, SERVICE_TURN_ON, service_data=service_data, blocking=True,
)
await hass.async_block_till_done()
async def test_entity_registry(hass: core.HomeAssistant):
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
registry: EntityRegistry = await hass.helpers.entity_registry.async_get_registry()
assert [key for key in registry.entities] == ["fan.name_1"]
async def test_non_standard_speed_list(hass: core.HomeAssistant):
await setup_platform(
hass,
FAN_DOMAIN,
ceiling_fan("name-1"),
bond_device_id="test-device-id",
props={"max_speed": 6},
)
actual_speeds = hass.states.get("fan.name_1").attributes[ATTR_SPEED_LIST]
assert actual_speeds == [
fan.SPEED_OFF,
fan.SPEED_LOW,
fan.SPEED_MEDIUM,
fan.SPEED_HIGH,
]
with patch_bond_device_state():
with patch_bond_action() as mock_set_speed_low:
await turn_fan_on(hass, "fan.name_1", fan.SPEED_LOW)
mock_set_speed_low.assert_called_once_with(
"test-device-id", Action.set_speed(1)
)
with patch_bond_action() as mock_set_speed_medium:
await turn_fan_on(hass, "fan.name_1", fan.SPEED_MEDIUM)
mock_set_speed_medium.assert_called_once_with(
"test-device-id", Action.set_speed(3)
)
with patch_bond_action() as mock_set_speed_high:
await turn_fan_on(hass, "fan.name_1", fan.SPEED_HIGH)
mock_set_speed_high.assert_called_once_with(
"test-device-id", Action.set_speed(6)
)
async def test_turn_on_fan_with_speed(hass: core.HomeAssistant):
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_set_speed, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1", fan.SPEED_LOW)
mock_set_speed.assert_called_with("test-device-id", Action.set_speed(1))
async def test_turn_on_fan_without_speed(hass: core.HomeAssistant):
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_on, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1")
mock_turn_on.assert_called_with("test-device-id", Action.turn_on())
async def test_turn_off_fan(hass: core.HomeAssistant):
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_off, patch_bond_device_state():
await hass.services.async_call(
FAN_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: "fan.name_1"}, blocking=True,
)
await hass.async_block_till_done()
mock_turn_off.assert_called_once_with("test-device-id", Action.turn_off())
async def test_update_reports_fan_on(hass: core.HomeAssistant):
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"power": 1, "speed": 1}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").state == "on"
async def test_update_reports_fan_off(hass: core.HomeAssistant):
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"power": 0, "speed": 1}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").state == "off"
async def test_update_reports_direction_forward(hass: core.HomeAssistant):
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"direction": Direction.FORWARD}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").attributes[ATTR_DIRECTION] == DIRECTION_FORWARD
async def test_update_reports_direction_reverse(hass: core.HomeAssistant):
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"direction": Direction.REVERSE}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").attributes[ATTR_DIRECTION] == DIRECTION_REVERSE
async def test_set_fan_direction(hass: core.HomeAssistant):
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_set_direction, patch_bond_device_state():
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_SET_DIRECTION,
{ATTR_ENTITY_ID: "fan.name_1", ATTR_DIRECTION: DIRECTION_FORWARD},
blocking=True,
)
await hass.async_block_till_done()
mock_set_direction.assert_called_once_with(
"test-device-id", Action.set_direction(Direction.FORWARD)
)
| true | true |
f733114f2e58f62143485597d342dc7a64037cbd | 1,889 | py | Python | one.py | jacobbieker/NUR_Handin2 | 6e620b23191edaec4452d29eac90ec37ced0c038 | [
"MIT"
] | null | null | null | one.py | jacobbieker/NUR_Handin2 | 6e620b23191edaec4452d29eac90ec37ced0c038 | [
"MIT"
] | null | null | null | one.py | jacobbieker/NUR_Handin2 | 6e620b23191edaec4452d29eac90ec37ced0c038 | [
"MIT"
] | 1 | 2019-05-17T07:33:07.000Z | 2019-05-17T07:33:07.000Z | import numpy as np
import matplotlib.pyplot as plt
from one_a import one_a
from one_b import one_b
from one_c import one_c
from one_d import one_d
from one_e import one_e
def random_generator(seed, m=2 ** 64 - 1, a=2349543, c=913842, a1=21, a2=35, a3=4, a4=4294957665):
"""
Generates psuedorandom numbers with a combination of (M)LCC, 64 bit shift, and MWC
:param seed: Seed to use
:param m: Determines period of the MLCC
:param a: For the MLCC
:param c: For the MLCC
:param a1: For the first bit shift
:param a2: For the second bit shift
:param a3: For the third bit shift
:param a4: For the MWC
:return:
"""
# First linear congruential generator
# While true, so the generator never stops making new numbers
# This is used to make sure teh XOR shift is 64 bit
bit_64 = 0xffffffffffffffff
while True:
# This is MLCC part
generated_number = (a * seed + c) % m
# Now bit shift
generated_number = generated_number ^ (generated_number >> a1) & bit_64
generated_number = generated_number ^ (generated_number << a2) & bit_64
generated_number = generated_number ^ (generated_number >> a3) & bit_64
# Now MWC part
mwc_out = a4 * (generated_number & (2 ** 32 - 1)) + (generated_number >> 32)
seed = mwc_out # set the seed to a new number, so a different number generated next time
mwc_out = mwc_out / m
if mwc_out > 1.:
# Have to make it between 1 and 0, so mod 1. makes sure its between 0 and 1 now
close_to_final = mwc_out % 1.
else:
close_to_final = mwc_out
yield close_to_final
def all_one(rand_gen):
one_a(rand_gen)
plt.cla()
one_b(rand_gen)
plt.cla()
one_c(rand_gen)
plt.cla()
one_d(rand_gen)
plt.cla()
one_e(rand_gen)
plt.cla()
| 30.467742 | 98 | 0.640551 | import numpy as np
import matplotlib.pyplot as plt
from one_a import one_a
from one_b import one_b
from one_c import one_c
from one_d import one_d
from one_e import one_e
def random_generator(seed, m=2 ** 64 - 1, a=2349543, c=913842, a1=21, a2=35, a3=4, a4=4294957665):
bit_64 = 0xffffffffffffffff
while True:
generated_number = (a * seed + c) % m
generated_number = generated_number ^ (generated_number >> a1) & bit_64
generated_number = generated_number ^ (generated_number << a2) & bit_64
generated_number = generated_number ^ (generated_number >> a3) & bit_64
mwc_out = a4 * (generated_number & (2 ** 32 - 1)) + (generated_number >> 32)
seed = mwc_out
mwc_out = mwc_out / m
if mwc_out > 1.:
close_to_final = mwc_out % 1.
else:
close_to_final = mwc_out
yield close_to_final
def all_one(rand_gen):
one_a(rand_gen)
plt.cla()
one_b(rand_gen)
plt.cla()
one_c(rand_gen)
plt.cla()
one_d(rand_gen)
plt.cla()
one_e(rand_gen)
plt.cla()
| true | true |
f73315aa71396ff61edb7deaaef7841c99c6b86e | 364 | py | Python | solution/practice/algorithms/strings/hackerrank-in-a-string/solution.py | satyam857/HackerRank | b6c0c199a5e320b1b59fdedafda2630258314b76 | [
"MIT"
] | 158 | 2017-03-17T19:43:14.000Z | 2022-03-23T04:10:33.000Z | solution/practice/algorithms/strings/hackerrank-in-a-string/solution.py | skatsd/HackerRank | 74918aa46144150f4da0986c27d6762afe7c4ef3 | [
"MIT"
] | 2 | 2018-06-13T02:41:19.000Z | 2020-06-03T19:43:58.000Z | solution/practice/algorithms/strings/hackerrank-in-a-string/solution.py | skatsd/HackerRank | 74918aa46144150f4da0986c27d6762afe7c4ef3 | [
"MIT"
] | 67 | 2017-08-03T02:52:24.000Z | 2022-03-25T03:22:01.000Z | # This challenge just wants us to determine if a string contains a
# given subsequence. We do this by iterating through the string and
# test whether the characters from the subsequence appear one by one.
for case in range(int(input())):
s = input()
i = 0
for c in s:
if i < 10 and c == 'hackerrank'[i]:
i += 1
print('YES' if i == 10 else 'NO')
| 30.333333 | 69 | 0.662088 |
for case in range(int(input())):
s = input()
i = 0
for c in s:
if i < 10 and c == 'hackerrank'[i]:
i += 1
print('YES' if i == 10 else 'NO')
| true | true |
f733165323bb66266cb28f9208819bcb4c35f98f | 420 | py | Python | mp_google/_setup.py | moonstock/mp_google | 21e610491fcad5a4e003cfe1f6726ae893ce89f4 | [
"MIT"
] | null | null | null | mp_google/_setup.py | moonstock/mp_google | 21e610491fcad5a4e003cfe1f6726ae893ce89f4 | [
"MIT"
] | null | null | null | mp_google/_setup.py | moonstock/mp_google | 21e610491fcad5a4e003cfe1f6726ae893ce89f4 | [
"MIT"
] | null | null | null | import glob
import yaml
import json
def to_json(path):
# print(f"path: {path}")
stem = path.rsplit(".", 1)[0]
data = yaml.load(open(path, "r", encoding="UTF-8"), Loader=yaml.FullLoader)
print(f"stem: {stem}.json")
json.dump(data, open(f"{stem}.json", "w", encoding="utf-8"), ensure_ascii=False, indent=4)
for path in [f.replace("\\", "/") for f in glob.glob("./configs/*.yml")]:
to_json(path) | 28 | 94 | 0.616667 | import glob
import yaml
import json
def to_json(path):
stem = path.rsplit(".", 1)[0]
data = yaml.load(open(path, "r", encoding="UTF-8"), Loader=yaml.FullLoader)
print(f"stem: {stem}.json")
json.dump(data, open(f"{stem}.json", "w", encoding="utf-8"), ensure_ascii=False, indent=4)
for path in [f.replace("\\", "/") for f in glob.glob("./configs/*.yml")]:
to_json(path) | true | true |
f7331699d16cc32769f8d737d2a508e299277049 | 700 | py | Python | others/maddpg/utils/noise.py | manish-pra/trcopo | df8730f07ef554970c7a0aa653cc42d4886948ec | [
"MIT"
] | 5 | 2020-06-22T17:13:51.000Z | 2021-11-02T14:19:58.000Z | others/maddpg/utils/noise.py | manish-pra/trcopo | df8730f07ef554970c7a0aa653cc42d4886948ec | [
"MIT"
] | null | null | null | others/maddpg/utils/noise.py | manish-pra/trcopo | df8730f07ef554970c7a0aa653cc42d4886948ec | [
"MIT"
] | 1 | 2020-09-14T06:41:03.000Z | 2020-09-14T06:41:03.000Z | import numpy as np
# from https://github.com/songrotek/DDPG/blob/master/ou_noise.py
class OUNoise:
def __init__(self, action_dimension, scale=0.1, mu=0, theta=0.15, sigma=1):#sigma=0.2
self.action_dimension = action_dimension
self.scale = scale
self.mu = mu
self.theta = theta
self.sigma = sigma
self.state = np.ones(self.action_dimension) * self.mu
self.reset()
def reset(self):
self.state = np.ones(self.action_dimension) * self.mu
def noise(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))
self.state = x + dx
return self.state * self.scale
| 30.434783 | 89 | 0.615714 | import numpy as np
class OUNoise:
def __init__(self, action_dimension, scale=0.1, mu=0, theta=0.15, sigma=1):
self.action_dimension = action_dimension
self.scale = scale
self.mu = mu
self.theta = theta
self.sigma = sigma
self.state = np.ones(self.action_dimension) * self.mu
self.reset()
def reset(self):
self.state = np.ones(self.action_dimension) * self.mu
def noise(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))
self.state = x + dx
return self.state * self.scale
| true | true |
f7331854f3dfbfd98cf8caeb459471b1519084a0 | 137 | py | Python | dash/__main__.py | thepearson/dash-cli | 706c0671e6dcff6c440eb030a8d8b35b9d62b907 | [
"MIT"
] | 7 | 2018-03-19T10:08:16.000Z | 2018-07-25T01:22:45.000Z | dash/__main__.py | thepearson/dash-cli | 706c0671e6dcff6c440eb030a8d8b35b9d62b907 | [
"MIT"
] | 1 | 2018-10-24T06:07:15.000Z | 2018-10-24T06:07:15.000Z | dash/__main__.py | thepearson/dash-cli | 706c0671e6dcff6c440eb030a8d8b35b9d62b907 | [
"MIT"
] | 2 | 2018-07-19T01:46:19.000Z | 2018-08-20T22:55:57.000Z | # -*- coding: utf-8 -*-
"""dash.cli.__main__: executed when bootstrap directory is called as script."""
from .cli import main
main()
| 15.222222 | 79 | 0.671533 |
from .cli import main
main()
| true | true |
f7331a32a725e59a59a5bb4c7a897b4dc351bfb3 | 424 | py | Python | packages/python/plotly/plotly/validators/heatmapgl/hoverlabel/_alignsrc.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/heatmapgl/hoverlabel/_alignsrc.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/heatmapgl/hoverlabel/_alignsrc.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | import _plotly_utils.basevalidators
class AlignsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="alignsrc", parent_name="heatmapgl.hoverlabel", **kwargs
):
super(AlignsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
| 30.285714 | 82 | 0.658019 | import _plotly_utils.basevalidators
class AlignsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="alignsrc", parent_name="heatmapgl.hoverlabel", **kwargs
):
super(AlignsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
| true | true |
f7331b05e413882e1b54fe0fb81c1630497f0cc4 | 562 | py | Python | scalica/web/scalica/micro/urls.py | BlindGhosty/LSWA-Project | 950fa492426bcebe60872c9a24e00399ff63f045 | [
"MIT"
] | null | null | null | scalica/web/scalica/micro/urls.py | BlindGhosty/LSWA-Project | 950fa492426bcebe60872c9a24e00399ff63f045 | [
"MIT"
] | 2 | 2017-12-03T23:04:13.000Z | 2017-12-03T23:49:25.000Z | scalica/web/scalica/micro/urls.py | BlindGhosty/LSWA-Project | 950fa492426bcebe60872c9a24e00399ff63f045 | [
"MIT"
] | null | null | null | from django.conf.urls import include, url
from . import views
from django.contrib.auth import views as auth_views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^home/$', views.home, name='home'),
url(r'^stream/(?P<user_id>[0-9]+)/$', views.stream, name='stream'),
url(r'^post/$', views.post, name='post'),
url(r'^follow/$', views.follow, name='follow'),
url(r'^register/$', views.register, name='register'),
url(r'^recommend/$', views.recommend, name='recommend'),
url('^', include('django.contrib.auth.urls'))
]
| 35.125 | 71 | 0.635231 | from django.conf.urls import include, url
from . import views
from django.contrib.auth import views as auth_views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^home/$', views.home, name='home'),
url(r'^stream/(?P<user_id>[0-9]+)/$', views.stream, name='stream'),
url(r'^post/$', views.post, name='post'),
url(r'^follow/$', views.follow, name='follow'),
url(r'^register/$', views.register, name='register'),
url(r'^recommend/$', views.recommend, name='recommend'),
url('^', include('django.contrib.auth.urls'))
]
| true | true |
f7331b74808c449c76dc0e45eaf36d36615704d5 | 13,969 | py | Python | tensorflow_estimator/python/estimator/tpu/tpu_estimator_signals_test.py | cyc/estimator | 742a07296c8f584150bb02f97be7207130ded5fd | [
"Apache-2.0"
] | 2 | 2019-05-13T16:37:32.000Z | 2019-09-17T22:01:38.000Z | tensorflow_estimator/python/estimator/tpu/tpu_estimator_signals_test.py | cyc/estimator | 742a07296c8f584150bb02f97be7207130ded5fd | [
"Apache-2.0"
] | 1 | 2020-01-31T21:04:31.000Z | 2020-01-31T21:04:31.000Z | tensorflow_estimator/python/estimator/tpu/tpu_estimator_signals_test.py | cyc/estimator | 742a07296c8f584150bb02f97be7207130ded5fd | [
"Apache-2.0"
] | 2 | 2019-09-17T22:02:05.000Z | 2020-10-22T03:56:07.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU Estimator Signalling Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow_estimator.python.estimator.tpu import tpu_estimator
def make_input_fn(num_samples):
a = np.linspace(0, 100.0, num=num_samples)
b = np.reshape(np.array(a, dtype=np.float32), (len(a), 1))
def input_fn(params):
batch_size = params['batch_size']
da1 = dataset_ops.Dataset.from_tensor_slices(a)
da2 = dataset_ops.Dataset.from_tensor_slices(b)
dataset = dataset_ops.Dataset.zip((da1, da2))
dataset = dataset.map(lambda fa, fb: {'a': fa, 'b': fb})
dataset = dataset.batch(batch_size)
return dataset
return input_fn, (a, b)
def make_input_fn_with_labels(num_samples):
a = np.linspace(0, 100.0, num=num_samples)
b = np.reshape(np.array(a, dtype=np.float32), (len(a), 1))
def input_fn(params):
batch_size = params['batch_size']
da1 = dataset_ops.Dataset.from_tensor_slices(a)
da2 = dataset_ops.Dataset.from_tensor_slices(b)
dataset = dataset_ops.Dataset.zip((da1, da2))
dataset = dataset.map(lambda fa, fb: ({'a': fa}, fb))
dataset = dataset.batch(batch_size)
return dataset
return input_fn, (a, b)
class TPUEstimatorStoppingSignalsTest(test.TestCase):
def test_normal_output_without_signals(self):
num_samples = 4
batch_size = 2
params = {'batch_size': batch_size}
input_fn, (a, b) = make_input_fn(num_samples=num_samples)
with ops.Graph().as_default():
dataset = input_fn(params)
features = dataset_ops.make_one_shot_iterator(dataset).get_next()
# With tf.data.Dataset.batch, the batch is None, i.e., dynamic shape.
self.assertIsNone(features['a'].shape.as_list()[0])
with session.Session() as sess:
result = sess.run(features)
self.assertAllEqual(a[:batch_size], result['a'])
self.assertAllEqual(b[:batch_size], result['b'])
# This run should work as num_samples / batch_size = 2.
result = sess.run(features)
self.assertAllEqual(a[batch_size:num_samples], result['a'])
self.assertAllEqual(b[batch_size:num_samples], result['b'])
with self.assertRaises(errors.OutOfRangeError):
# Given num_samples and batch_size, this run should fail.
sess.run(features)
def test_output_with_stopping_signals(self):
num_samples = 4
batch_size = 2
params = {'batch_size': batch_size}
input_fn, (a, b) = make_input_fn(num_samples=num_samples)
with ops.Graph().as_default():
dataset = input_fn(params)
inputs = tpu_estimator._InputsWithStoppingSignals(dataset, batch_size)
dataset_initializer = inputs.dataset_initializer()
features, _ = inputs.features_and_labels()
signals = inputs.signals()
# With tf.data.Dataset.batch, the batch is None, i.e., dynamic shape.
self.assertIsNone(features['a'].shape.as_list()[0])
with session.Session() as sess:
sess.run(dataset_initializer)
result, evaluated_signals = sess.run([features, signals])
self.assertAllEqual(a[:batch_size], result['a'])
self.assertAllEqual(b[:batch_size], result['b'])
self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])
# This run should work as num_samples / batch_size = 2.
result, evaluated_signals = sess.run([features, signals])
self.assertAllEqual(a[batch_size:num_samples], result['a'])
self.assertAllEqual(b[batch_size:num_samples], result['b'])
self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])
# This run should work, *but* see STOP ('1') as signals
_, evaluated_signals = sess.run([features, signals])
self.assertAllEqual([[1.]] * batch_size, evaluated_signals['stopping'])
with self.assertRaises(errors.OutOfRangeError):
sess.run(features)
class TPUEstimatorStoppingSignalsWithPaddingTest(test.TestCase):
def test_num_samples_divisible_by_batch_size(self):
num_samples = 4
batch_size = 2
params = {'batch_size': batch_size}
input_fn, (a, b) = make_input_fn(num_samples=num_samples)
with ops.Graph().as_default():
dataset = input_fn(params)
inputs = tpu_estimator._InputsWithStoppingSignals(dataset, batch_size,
add_padding=True)
dataset_initializer = inputs.dataset_initializer()
features, _ = inputs.features_and_labels()
signals = inputs.signals()
# With padding, all shapes are static now.
self.assertEqual(batch_size, features['a'].shape.as_list()[0])
with session.Session() as sess:
sess.run(dataset_initializer)
result, evaluated_signals = sess.run([features, signals])
self.assertAllEqual(a[:batch_size], result['a'])
self.assertAllEqual(b[:batch_size], result['b'])
self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])
self.assertAllEqual([0.] * batch_size,
evaluated_signals['padding_mask'])
# This run should work as num_samples / batch_size = 2.
result, evaluated_signals = sess.run([features, signals])
self.assertAllEqual(a[batch_size:num_samples], result['a'])
self.assertAllEqual(b[batch_size:num_samples], result['b'])
self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])
self.assertAllEqual([0.] * batch_size,
evaluated_signals['padding_mask'])
# This run should work, *but* see STOP ('1') as signals
_, evaluated_signals = sess.run([features, signals])
self.assertAllEqual([[1.]] * batch_size, evaluated_signals['stopping'])
with self.assertRaises(errors.OutOfRangeError):
sess.run(features)
def test_num_samples_not_divisible_by_batch_size(self):
num_samples = 5
batch_size = 2
params = {'batch_size': batch_size}
input_fn, (a, b) = make_input_fn_with_labels(num_samples=num_samples)
with ops.Graph().as_default():
dataset = input_fn(params)
inputs = tpu_estimator._InputsWithStoppingSignals(dataset, batch_size,
add_padding=True)
dataset_initializer = inputs.dataset_initializer()
features, labels = inputs.features_and_labels()
signals = inputs.signals()
# With padding, all shapes are static.
self.assertEqual(batch_size, features['a'].shape.as_list()[0])
with session.Session() as sess:
sess.run(dataset_initializer)
evaluated_features, evaluated_labels, evaluated_signals = (
sess.run([features, labels, signals]))
self.assertAllEqual(a[:batch_size], evaluated_features['a'])
self.assertAllEqual(b[:batch_size], evaluated_labels)
self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])
self.assertAllEqual([0.] * batch_size,
evaluated_signals['padding_mask'])
# This run should work as num_samples / batch_size >= 2.
evaluated_features, evaluated_labels, evaluated_signals = (
sess.run([features, labels, signals]))
self.assertAllEqual(a[batch_size:2*batch_size], evaluated_features['a'])
self.assertAllEqual(b[batch_size:2*batch_size], evaluated_labels)
self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])
self.assertAllEqual([0.] * batch_size,
evaluated_signals['padding_mask'])
# This is the final partial batch.
evaluated_features, evaluated_labels, evaluated_signals = (
sess.run([features, labels, signals]))
real_batch_size = num_samples % batch_size
# Assert the real part.
self.assertAllEqual(a[2*batch_size:num_samples],
evaluated_features['a'][:real_batch_size])
self.assertAllEqual(b[2*batch_size:num_samples],
evaluated_labels[:real_batch_size])
# Assert the padded part.
self.assertAllEqual([0.0] * (batch_size - real_batch_size),
evaluated_features['a'][real_batch_size:])
self.assertAllEqual([[0.0]] * (batch_size - real_batch_size),
evaluated_labels[real_batch_size:])
self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])
padding = ([.0] * real_batch_size
+ [1.] * (batch_size - real_batch_size))
self.assertAllEqual(padding, evaluated_signals['padding_mask'])
# This run should work, *but* see STOP ('1') as signals
_, evaluated_signals = sess.run([features, signals])
self.assertAllEqual([[1.]] * batch_size, evaluated_signals['stopping'])
with self.assertRaises(errors.OutOfRangeError):
sess.run(features)
def test_slice(self):
num_samples = 3
batch_size = 2
params = {'batch_size': batch_size}
input_fn, (a, b) = make_input_fn(num_samples=num_samples)
with ops.Graph().as_default():
dataset = input_fn(params)
inputs = tpu_estimator._InputsWithStoppingSignals(dataset, batch_size,
add_padding=True)
dataset_initializer = inputs.dataset_initializer()
features, _ = inputs.features_and_labels()
signals = inputs.signals()
sliced_features = (
tpu_estimator._PaddingSignals.slice_tensor_or_dict(
features, signals))
with session.Session() as sess:
sess.run(dataset_initializer)
result, evaluated_signals = sess.run([sliced_features, signals])
self.assertAllEqual(a[:batch_size], result['a'])
self.assertAllEqual(b[:batch_size], result['b'])
self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])
# This is the final partial batch.
result, evaluated_signals = sess.run([sliced_features, signals])
self.assertEqual(1, len(result['a']))
self.assertAllEqual(a[batch_size:num_samples], result['a'])
self.assertAllEqual(b[batch_size:num_samples], result['b'])
self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])
# This run should work, *but* see STOP ('1') as signals
_, evaluated_signals = sess.run([sliced_features, signals])
self.assertAllEqual([[1.]] * batch_size, evaluated_signals['stopping'])
with self.assertRaises(errors.OutOfRangeError):
sess.run(sliced_features)
def test_slice_with_multi_invocations_per_step(self):
num_samples = 3
batch_size = 2
params = {'batch_size': batch_size}
input_fn, (a, b) = make_input_fn(num_samples=num_samples)
with ops.Graph().as_default():
dataset = input_fn(params)
inputs = tpu_estimator._InputsWithStoppingSignals(
dataset, batch_size, add_padding=True, num_invocations_per_step=2)
dataset_initializer = inputs.dataset_initializer()
features, _ = inputs.features_and_labels()
signals = inputs.signals()
sliced_features = (
tpu_estimator._PaddingSignals.slice_tensor_or_dict(features, signals))
with session.Session() as sess:
sess.run(dataset_initializer)
result, evaluated_signals = sess.run([sliced_features, signals])
self.assertAllEqual(a[:batch_size], result['a'])
self.assertAllEqual(b[:batch_size], result['b'])
self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])
# This is the final partial batch.
result, evaluated_signals = sess.run([sliced_features, signals])
self.assertEqual(1, len(result['a']))
self.assertAllEqual(a[batch_size:num_samples], result['a'])
self.assertAllEqual(b[batch_size:num_samples], result['b'])
self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])
# We should see 3 continuous batches with STOP ('1') as signals and all
# of them have mask 1.
_, evaluated_signals = sess.run([sliced_features, signals])
self.assertAllEqual([[1.]] * batch_size, evaluated_signals['stopping'])
self.assertAllEqual([1.] * batch_size,
evaluated_signals['padding_mask'])
_, evaluated_signals = sess.run([sliced_features, signals])
self.assertAllEqual([[1.]] * batch_size, evaluated_signals['stopping'])
self.assertAllEqual([1.] * batch_size,
evaluated_signals['padding_mask'])
_, evaluated_signals = sess.run([sliced_features, signals])
self.assertAllEqual([[1.]] * batch_size, evaluated_signals['stopping'])
self.assertAllEqual([1.] * batch_size,
evaluated_signals['padding_mask'])
with self.assertRaises(errors.OutOfRangeError):
sess.run(sliced_features)
if __name__ == '__main__':
test.main()
| 41.085294 | 80 | 0.662467 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow_estimator.python.estimator.tpu import tpu_estimator
def make_input_fn(num_samples):
a = np.linspace(0, 100.0, num=num_samples)
b = np.reshape(np.array(a, dtype=np.float32), (len(a), 1))
def input_fn(params):
batch_size = params['batch_size']
da1 = dataset_ops.Dataset.from_tensor_slices(a)
da2 = dataset_ops.Dataset.from_tensor_slices(b)
dataset = dataset_ops.Dataset.zip((da1, da2))
dataset = dataset.map(lambda fa, fb: {'a': fa, 'b': fb})
dataset = dataset.batch(batch_size)
return dataset
return input_fn, (a, b)
def make_input_fn_with_labels(num_samples):
a = np.linspace(0, 100.0, num=num_samples)
b = np.reshape(np.array(a, dtype=np.float32), (len(a), 1))
def input_fn(params):
batch_size = params['batch_size']
da1 = dataset_ops.Dataset.from_tensor_slices(a)
da2 = dataset_ops.Dataset.from_tensor_slices(b)
dataset = dataset_ops.Dataset.zip((da1, da2))
dataset = dataset.map(lambda fa, fb: ({'a': fa}, fb))
dataset = dataset.batch(batch_size)
return dataset
return input_fn, (a, b)
class TPUEstimatorStoppingSignalsTest(test.TestCase):
def test_normal_output_without_signals(self):
num_samples = 4
batch_size = 2
params = {'batch_size': batch_size}
input_fn, (a, b) = make_input_fn(num_samples=num_samples)
with ops.Graph().as_default():
dataset = input_fn(params)
features = dataset_ops.make_one_shot_iterator(dataset).get_next()
self.assertIsNone(features['a'].shape.as_list()[0])
with session.Session() as sess:
result = sess.run(features)
self.assertAllEqual(a[:batch_size], result['a'])
self.assertAllEqual(b[:batch_size], result['b'])
result = sess.run(features)
self.assertAllEqual(a[batch_size:num_samples], result['a'])
self.assertAllEqual(b[batch_size:num_samples], result['b'])
with self.assertRaises(errors.OutOfRangeError):
sess.run(features)
def test_output_with_stopping_signals(self):
num_samples = 4
batch_size = 2
params = {'batch_size': batch_size}
input_fn, (a, b) = make_input_fn(num_samples=num_samples)
with ops.Graph().as_default():
dataset = input_fn(params)
inputs = tpu_estimator._InputsWithStoppingSignals(dataset, batch_size)
dataset_initializer = inputs.dataset_initializer()
features, _ = inputs.features_and_labels()
signals = inputs.signals()
self.assertIsNone(features['a'].shape.as_list()[0])
with session.Session() as sess:
sess.run(dataset_initializer)
result, evaluated_signals = sess.run([features, signals])
self.assertAllEqual(a[:batch_size], result['a'])
self.assertAllEqual(b[:batch_size], result['b'])
self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])
result, evaluated_signals = sess.run([features, signals])
self.assertAllEqual(a[batch_size:num_samples], result['a'])
self.assertAllEqual(b[batch_size:num_samples], result['b'])
self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])
_, evaluated_signals = sess.run([features, signals])
self.assertAllEqual([[1.]] * batch_size, evaluated_signals['stopping'])
with self.assertRaises(errors.OutOfRangeError):
sess.run(features)
class TPUEstimatorStoppingSignalsWithPaddingTest(test.TestCase):
def test_num_samples_divisible_by_batch_size(self):
num_samples = 4
batch_size = 2
params = {'batch_size': batch_size}
input_fn, (a, b) = make_input_fn(num_samples=num_samples)
with ops.Graph().as_default():
dataset = input_fn(params)
inputs = tpu_estimator._InputsWithStoppingSignals(dataset, batch_size,
add_padding=True)
dataset_initializer = inputs.dataset_initializer()
features, _ = inputs.features_and_labels()
signals = inputs.signals()
self.assertEqual(batch_size, features['a'].shape.as_list()[0])
with session.Session() as sess:
sess.run(dataset_initializer)
result, evaluated_signals = sess.run([features, signals])
self.assertAllEqual(a[:batch_size], result['a'])
self.assertAllEqual(b[:batch_size], result['b'])
self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])
self.assertAllEqual([0.] * batch_size,
evaluated_signals['padding_mask'])
result, evaluated_signals = sess.run([features, signals])
self.assertAllEqual(a[batch_size:num_samples], result['a'])
self.assertAllEqual(b[batch_size:num_samples], result['b'])
self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])
self.assertAllEqual([0.] * batch_size,
evaluated_signals['padding_mask'])
_, evaluated_signals = sess.run([features, signals])
self.assertAllEqual([[1.]] * batch_size, evaluated_signals['stopping'])
with self.assertRaises(errors.OutOfRangeError):
sess.run(features)
def test_num_samples_not_divisible_by_batch_size(self):
num_samples = 5
batch_size = 2
params = {'batch_size': batch_size}
input_fn, (a, b) = make_input_fn_with_labels(num_samples=num_samples)
with ops.Graph().as_default():
dataset = input_fn(params)
inputs = tpu_estimator._InputsWithStoppingSignals(dataset, batch_size,
add_padding=True)
dataset_initializer = inputs.dataset_initializer()
features, labels = inputs.features_and_labels()
signals = inputs.signals()
self.assertEqual(batch_size, features['a'].shape.as_list()[0])
with session.Session() as sess:
sess.run(dataset_initializer)
evaluated_features, evaluated_labels, evaluated_signals = (
sess.run([features, labels, signals]))
self.assertAllEqual(a[:batch_size], evaluated_features['a'])
self.assertAllEqual(b[:batch_size], evaluated_labels)
self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])
self.assertAllEqual([0.] * batch_size,
evaluated_signals['padding_mask'])
evaluated_features, evaluated_labels, evaluated_signals = (
sess.run([features, labels, signals]))
self.assertAllEqual(a[batch_size:2*batch_size], evaluated_features['a'])
self.assertAllEqual(b[batch_size:2*batch_size], evaluated_labels)
self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])
self.assertAllEqual([0.] * batch_size,
evaluated_signals['padding_mask'])
evaluated_features, evaluated_labels, evaluated_signals = (
sess.run([features, labels, signals]))
real_batch_size = num_samples % batch_size
self.assertAllEqual(a[2*batch_size:num_samples],
evaluated_features['a'][:real_batch_size])
self.assertAllEqual(b[2*batch_size:num_samples],
evaluated_labels[:real_batch_size])
self.assertAllEqual([0.0] * (batch_size - real_batch_size),
evaluated_features['a'][real_batch_size:])
self.assertAllEqual([[0.0]] * (batch_size - real_batch_size),
evaluated_labels[real_batch_size:])
self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])
padding = ([.0] * real_batch_size
+ [1.] * (batch_size - real_batch_size))
self.assertAllEqual(padding, evaluated_signals['padding_mask'])
_, evaluated_signals = sess.run([features, signals])
self.assertAllEqual([[1.]] * batch_size, evaluated_signals['stopping'])
with self.assertRaises(errors.OutOfRangeError):
sess.run(features)
def test_slice(self):
num_samples = 3
batch_size = 2
params = {'batch_size': batch_size}
input_fn, (a, b) = make_input_fn(num_samples=num_samples)
with ops.Graph().as_default():
dataset = input_fn(params)
inputs = tpu_estimator._InputsWithStoppingSignals(dataset, batch_size,
add_padding=True)
dataset_initializer = inputs.dataset_initializer()
features, _ = inputs.features_and_labels()
signals = inputs.signals()
sliced_features = (
tpu_estimator._PaddingSignals.slice_tensor_or_dict(
features, signals))
with session.Session() as sess:
sess.run(dataset_initializer)
result, evaluated_signals = sess.run([sliced_features, signals])
self.assertAllEqual(a[:batch_size], result['a'])
self.assertAllEqual(b[:batch_size], result['b'])
self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])
result, evaluated_signals = sess.run([sliced_features, signals])
self.assertEqual(1, len(result['a']))
self.assertAllEqual(a[batch_size:num_samples], result['a'])
self.assertAllEqual(b[batch_size:num_samples], result['b'])
self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])
_, evaluated_signals = sess.run([sliced_features, signals])
self.assertAllEqual([[1.]] * batch_size, evaluated_signals['stopping'])
with self.assertRaises(errors.OutOfRangeError):
sess.run(sliced_features)
def test_slice_with_multi_invocations_per_step(self):
num_samples = 3
batch_size = 2
params = {'batch_size': batch_size}
input_fn, (a, b) = make_input_fn(num_samples=num_samples)
with ops.Graph().as_default():
dataset = input_fn(params)
inputs = tpu_estimator._InputsWithStoppingSignals(
dataset, batch_size, add_padding=True, num_invocations_per_step=2)
dataset_initializer = inputs.dataset_initializer()
features, _ = inputs.features_and_labels()
signals = inputs.signals()
sliced_features = (
tpu_estimator._PaddingSignals.slice_tensor_or_dict(features, signals))
with session.Session() as sess:
sess.run(dataset_initializer)
result, evaluated_signals = sess.run([sliced_features, signals])
self.assertAllEqual(a[:batch_size], result['a'])
self.assertAllEqual(b[:batch_size], result['b'])
self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])
result, evaluated_signals = sess.run([sliced_features, signals])
self.assertEqual(1, len(result['a']))
self.assertAllEqual(a[batch_size:num_samples], result['a'])
self.assertAllEqual(b[batch_size:num_samples], result['b'])
self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])
_, evaluated_signals = sess.run([sliced_features, signals])
self.assertAllEqual([[1.]] * batch_size, evaluated_signals['stopping'])
self.assertAllEqual([1.] * batch_size,
evaluated_signals['padding_mask'])
_, evaluated_signals = sess.run([sliced_features, signals])
self.assertAllEqual([[1.]] * batch_size, evaluated_signals['stopping'])
self.assertAllEqual([1.] * batch_size,
evaluated_signals['padding_mask'])
_, evaluated_signals = sess.run([sliced_features, signals])
self.assertAllEqual([[1.]] * batch_size, evaluated_signals['stopping'])
self.assertAllEqual([1.] * batch_size,
evaluated_signals['padding_mask'])
with self.assertRaises(errors.OutOfRangeError):
sess.run(sliced_features)
if __name__ == '__main__':
test.main()
| true | true |
f7331bba94c19b8a27742a366d84b1bebf5a1400 | 428 | py | Python | src/utils/color.py | nixiesquid/mf-dakoker | 106a05d3cc55ca18c2f0d3111ff6c7110af368b9 | [
"MIT"
] | null | null | null | src/utils/color.py | nixiesquid/mf-dakoker | 106a05d3cc55ca18c2f0d3111ff6c7110af368b9 | [
"MIT"
] | 1 | 2020-06-25T18:19:36.000Z | 2020-06-25T18:19:36.000Z | src/utils/color.py | nixiesquid/mf-dakoker | 106a05d3cc55ca18c2f0d3111ff6c7110af368b9 | [
"MIT"
] | 1 | 2021-06-08T07:48:09.000Z | 2021-06-08T07:48:09.000Z | # coding:utf-8
class Color(object):
RED = "\033[31m"
GREEN = "\033[32m"
YELLOW = "\033[33m"
BLUE = "\033[34m"
PURPLE = "\033[35m"
CYAN = "\033[36m"
WHITE = "\033[37m"
BOLD = "\033[1m"
END = "\033[0m"
@classmethod
def get_colored(cls, color, text):
return color + text + cls.END
@classmethod
def print(cls, color, text):
print(cls.get_colored(color, text))
| 19.454545 | 43 | 0.549065 |
class Color(object):
RED = "\033[31m"
GREEN = "\033[32m"
YELLOW = "\033[33m"
BLUE = "\033[34m"
PURPLE = "\033[35m"
CYAN = "\033[36m"
WHITE = "\033[37m"
BOLD = "\033[1m"
END = "\033[0m"
@classmethod
def get_colored(cls, color, text):
return color + text + cls.END
@classmethod
def print(cls, color, text):
print(cls.get_colored(color, text))
| true | true |
f7331bd9f0a0ac254108834149813e5d1164e927 | 1,739 | py | Python | server/sqlmap/tamper/between.py | kurpav/volcano | 31d5f8f6f5a282abbea3861368eb39cfe33bba77 | [
"MIT"
] | null | null | null | server/sqlmap/tamper/between.py | kurpav/volcano | 31d5f8f6f5a282abbea3861368eb39cfe33bba77 | [
"MIT"
] | null | null | null | server/sqlmap/tamper/between.py | kurpav/volcano | 31d5f8f6f5a282abbea3861368eb39cfe33bba77 | [
"MIT"
] | 1 | 2018-07-04T18:35:16.000Z | 2018-07-04T18:35:16.000Z | #!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import re
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.HIGHEST
def dependencies():
pass
def tamper(payload, **kwargs):
"""
Replaces greater than operator ('>') with 'NOT BETWEEN 0 AND #'
Replaces equals operator ('=') with 'BETWEEN # AND #'
Tested against:
* Microsoft SQL Server 2005
* MySQL 4, 5.0 and 5.5
* Oracle 10g
* PostgreSQL 8.3, 8.4, 9.0
Notes:
* Useful to bypass weak and bespoke web application firewalls that
filter the greater than character
* The BETWEEN clause is SQL standard. Hence, this tamper script
should work against all (?) databases
>>> tamper('1 AND A > B--')
'1 AND A NOT BETWEEN 0 AND B--'
>>> tamper('1 AND A = B--')
'1 AND A BETWEEN B AND B--'
"""
retVal = payload
if payload:
match = re.search(r"(?i)(\b(AND|OR)\b\s+)(?!.*\b(AND|OR)\b)([^>]+?)\s*>\s*([^>]+)\s*\Z", payload)
if match:
_ = "%s %s NOT BETWEEN 0 AND %s" % (match.group(2), match.group(4), match.group(5))
retVal = retVal.replace(match.group(0), _)
else:
retVal = re.sub(r"\s*>\s*(\d+|'[^']+'|\w+\(\d+\))", " NOT BETWEEN 0 AND \g<1>", payload)
if retVal == payload:
match = re.search(r"(?i)(\b(AND|OR)\b\s+)(?!.*\b(AND|OR)\b)([^=]+?)\s*=\s*(\w+)\s*", payload)
if match:
_ = "%s %s BETWEEN %s AND %s" % (match.group(2), match.group(4), match.group(5), match.group(5))
retVal = retVal.replace(match.group(0), _)
return retVal
| 28.983333 | 112 | 0.547441 |
import re
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.HIGHEST
def dependencies():
pass
def tamper(payload, **kwargs):
retVal = payload
if payload:
match = re.search(r"(?i)(\b(AND|OR)\b\s+)(?!.*\b(AND|OR)\b)([^>]+?)\s*>\s*([^>]+)\s*\Z", payload)
if match:
_ = "%s %s NOT BETWEEN 0 AND %s" % (match.group(2), match.group(4), match.group(5))
retVal = retVal.replace(match.group(0), _)
else:
retVal = re.sub(r"\s*>\s*(\d+|'[^']+'|\w+\(\d+\))", " NOT BETWEEN 0 AND \g<1>", payload)
if retVal == payload:
match = re.search(r"(?i)(\b(AND|OR)\b\s+)(?!.*\b(AND|OR)\b)([^=]+?)\s*=\s*(\w+)\s*", payload)
if match:
_ = "%s %s BETWEEN %s AND %s" % (match.group(2), match.group(4), match.group(5), match.group(5))
retVal = retVal.replace(match.group(0), _)
return retVal
| true | true |
f7331c1e0b73761c6966f8baf01e754a417fc2a3 | 1,528 | py | Python | src/asyncioffmpeg/ffplay.py | scivision/asyncio-subprocess-ffmpeg | 915a9c3a24e011059cdd38ae74bee31135f0a34f | [
"MIT"
] | 3 | 2019-11-11T14:59:54.000Z | 2020-09-11T15:05:53.000Z | src/asyncioffmpeg/ffplay.py | scivision/asyncio-subprocess-ffmpeg | 915a9c3a24e011059cdd38ae74bee31135f0a34f | [
"MIT"
] | null | null | null | src/asyncioffmpeg/ffplay.py | scivision/asyncio-subprocess-ffmpeg | 915a9c3a24e011059cdd38ae74bee31135f0a34f | [
"MIT"
] | null | null | null | """
This example uses a finite number of workers, rather than slamming the system with endless subprocesses.
This is more effective than endless context switching for an overloaded CPU.
"""
import asyncio
from pathlib import Path
import shutil
import sys
from typing import Iterable
import os
FFPLAY = shutil.which("ffplay")
if not FFPLAY:
raise ImportError("FFPLAY not found")
async def ffplay(queue: asyncio.Queue):
"""
Play media asynchronously.
Each task runs endlessly until .cancel()
"""
assert isinstance(FFPLAY, str)
while True:
filein = await queue.get()
cmd = [FFPLAY, "-loglevel", "warning", "-autoexit", str(filein)]
proc = await asyncio.create_subprocess_exec(*cmd)
ret = await proc.wait()
if ret != 0:
print(filein, "playback failure", file=sys.stderr)
queue.task_done()
async def main(flist: Iterable[Path]):
Ntask = os.cpu_count() # includes logical cores
if not isinstance(Ntask, int):
Ntask = 2
# %% setup queue
queue = asyncio.Queue() # type: ignore
for f in flist:
await queue.put(f)
# %% setup Tasks
if sys.version_info >= (3, 7):
tasks = [asyncio.create_task(ffplay(queue)) for i in range(Ntask)]
else:
tasks = [asyncio.ensure_future(ffplay(queue)) for i in range(Ntask)]
await queue.join()
# %% program done, teardown Tasks
for task in tasks:
task.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
| 24.253968 | 104 | 0.651178 |
import asyncio
from pathlib import Path
import shutil
import sys
from typing import Iterable
import os
FFPLAY = shutil.which("ffplay")
if not FFPLAY:
raise ImportError("FFPLAY not found")
async def ffplay(queue: asyncio.Queue):
assert isinstance(FFPLAY, str)
while True:
filein = await queue.get()
cmd = [FFPLAY, "-loglevel", "warning", "-autoexit", str(filein)]
proc = await asyncio.create_subprocess_exec(*cmd)
ret = await proc.wait()
if ret != 0:
print(filein, "playback failure", file=sys.stderr)
queue.task_done()
async def main(flist: Iterable[Path]):
Ntask = os.cpu_count()
if not isinstance(Ntask, int):
Ntask = 2
queue = asyncio.Queue()
for f in flist:
await queue.put(f)
if sys.version_info >= (3, 7):
tasks = [asyncio.create_task(ffplay(queue)) for i in range(Ntask)]
else:
tasks = [asyncio.ensure_future(ffplay(queue)) for i in range(Ntask)]
await queue.join()
for task in tasks:
task.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
| true | true |
f7331cdfb63678d19d68f9fda8a16e100ca99eba | 4,621 | py | Python | test/dll_path.py | MaxSac/build | 482c25f3a26171073c7e6c59f0427f2259a63fec | [
"BSL-1.0"
] | 50 | 2019-08-30T13:20:19.000Z | 2022-02-12T16:25:38.000Z | test/dll_path.py | MaxSac/build | 482c25f3a26171073c7e6c59f0427f2259a63fec | [
"BSL-1.0"
] | 5 | 2019-09-02T17:42:07.000Z | 2020-07-17T09:30:47.000Z | test/dll_path.py | MaxSac/build | 482c25f3a26171073c7e6c59f0427f2259a63fec | [
"BSL-1.0"
] | 8 | 2015-11-03T14:12:19.000Z | 2020-09-22T19:20:54.000Z | #!/usr/bin/python
# Copyright (C) 2003. Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Test that the <dll-path> property is correctly set when using
# <hardcode-dll-paths>true.
import BoostBuild
t = BoostBuild.Tester(use_test_config=False)
# The point of this test is to have exe "main" which uses library "b", which
# uses library "a". When "main" is built with <hardcode-dll-paths>true, paths
# to both libraries should be present as values of <dll-path> feature. We
# create a special target type which reports <dll-path> values on its sources
# and compare the list of found values with out expectations.
t.write("jamroot.jam", "using dll_paths ;")
t.write("jamfile.jam", """\
exe main : main.cpp b//b ;
explicit main ;
path-list mp : main ;
""")
t.write("main.cpp", "int main() {}\n")
t.write("dll_paths.jam", """\
import "class" : new ;
import feature ;
import generators ;
import print ;
import sequence ;
import type ;
rule init ( )
{
type.register PATH_LIST : pathlist ;
class dll-paths-list-generator : generator
{
rule __init__ ( )
{
generator.__init__ dll_paths.list : EXE : PATH_LIST ;
}
rule generated-targets ( sources + : property-set : project name ? )
{
local dll-paths ;
for local s in $(sources)
{
local a = [ $(s).action ] ;
if $(a)
{
local p = [ $(a).properties ] ;
dll-paths += [ $(p).get <dll-path> ] ;
}
}
return [ generator.generated-targets $(sources) :
[ $(property-set).add-raw $(dll-paths:G=<dll-path>) ] :
$(project) $(name) ] ;
}
}
generators.register [ new dll-paths-list-generator ] ;
}
rule list ( target : sources * : properties * )
{
local paths = [ feature.get-values <dll-path> : $(properties) ] ;
paths = [ sequence.insertion-sort $(paths) ] ;
print.output $(target) ;
print.text $(paths) ;
}
""")
t.write("dll_paths.py", """\
import bjam
import b2.build.type as type
import b2.build.generators as generators
from b2.manager import get_manager
def init():
type.register("PATH_LIST", ["pathlist"])
class DllPathsListGenerator(generators.Generator):
def __init__(self):
generators.Generator.__init__(self, "dll_paths.list", False,
["EXE"], ["PATH_LIST"])
def generated_targets(self, sources, ps, project, name):
dll_paths = []
for s in sources:
a = s.action()
if a:
p = a.properties()
dll_paths += p.get('dll-path')
dll_paths.sort()
return generators.Generator.generated_targets(self, sources,
ps.add_raw(["<dll-path>" + p for p in dll_paths]), project,
name)
generators.register(DllPathsListGenerator())
command = \"\"\"
echo $(PATHS) > $(<[1])
\"\"\"
def function(target, sources, ps):
bjam.call('set-target-variable', target, "PATHS", ps.get('dll-path'))
get_manager().engine().register_action("dll_paths.list", command,
function=function)
""")
t.write("a/jamfile.jam", "lib a : a.cpp ;")
t.write("a/a.cpp", """\
void
#if defined(_WIN32)
__declspec(dllexport)
#endif
foo() {}
""")
t.write("b/jamfile.jam", "lib b : b.cpp ../a//a ;")
t.write("b/b.cpp", """\
void
#if defined(_WIN32)
__declspec(dllexport)
#endif
bar() {}
""")
t.run_build_system(["hardcode-dll-paths=true"])
t.expect_addition("bin/$toolset/debug*/mp.pathlist")
es1 = t.adjust_name("a/bin/$toolset/debug*")
es2 = t.adjust_name("b/bin/$toolset/debug*")
t.expect_content_lines("bin/$toolset/debug*/mp.pathlist", "*" + es1)
t.expect_content_lines("bin/$toolset/debug*/mp.pathlist", "*" + es2)
t.rm("bin/$toolset/debug*/mp.pathlist")
# Now run the same checks with pre-built libraries
adll = t.glob_file("a/bin/$toolset/debug*/a.dll")
bdll = t.glob_file("b/bin/$toolset/debug*/b.dll")
t.write("b/jamfile.jam", """
local bdll = %s ;
# Make sure that it is found even with multiple source-locations
project : source-location c $(bdll:D) ;
lib b : ../a//a : <file>$(bdll:D=) ;
""" % bdll.replace("\\", "\\\\"))
t.run_build_system(["hardcode-dll-paths=true"])
t.expect_addition("bin/$toolset/debug*/mp.pathlist")
t.expect_content_lines("bin/$toolset/debug*/mp.pathlist", "*" + es1)
t.expect_content_lines("bin/$toolset/debug*/mp.pathlist", "*" + es2)
t.cleanup()
| 28.176829 | 77 | 0.61069 |
import BoostBuild
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", "using dll_paths ;")
t.write("jamfile.jam", """\
exe main : main.cpp b//b ;
explicit main ;
path-list mp : main ;
""")
t.write("main.cpp", "int main() {}\n")
t.write("dll_paths.jam", """\
import "class" : new ;
import feature ;
import generators ;
import print ;
import sequence ;
import type ;
rule init ( )
{
type.register PATH_LIST : pathlist ;
class dll-paths-list-generator : generator
{
rule __init__ ( )
{
generator.__init__ dll_paths.list : EXE : PATH_LIST ;
}
rule generated-targets ( sources + : property-set : project name ? )
{
local dll-paths ;
for local s in $(sources)
{
local a = [ $(s).action ] ;
if $(a)
{
local p = [ $(a).properties ] ;
dll-paths += [ $(p).get <dll-path> ] ;
}
}
return [ generator.generated-targets $(sources) :
[ $(property-set).add-raw $(dll-paths:G=<dll-path>) ] :
$(project) $(name) ] ;
}
}
generators.register [ new dll-paths-list-generator ] ;
}
rule list ( target : sources * : properties * )
{
local paths = [ feature.get-values <dll-path> : $(properties) ] ;
paths = [ sequence.insertion-sort $(paths) ] ;
print.output $(target) ;
print.text $(paths) ;
}
""")
t.write("dll_paths.py", """\
import bjam
import b2.build.type as type
import b2.build.generators as generators
from b2.manager import get_manager
def init():
type.register("PATH_LIST", ["pathlist"])
class DllPathsListGenerator(generators.Generator):
def __init__(self):
generators.Generator.__init__(self, "dll_paths.list", False,
["EXE"], ["PATH_LIST"])
def generated_targets(self, sources, ps, project, name):
dll_paths = []
for s in sources:
a = s.action()
if a:
p = a.properties()
dll_paths += p.get('dll-path')
dll_paths.sort()
return generators.Generator.generated_targets(self, sources,
ps.add_raw(["<dll-path>" + p for p in dll_paths]), project,
name)
generators.register(DllPathsListGenerator())
command = \"\"\"
echo $(PATHS) > $(<[1])
\"\"\"
def function(target, sources, ps):
bjam.call('set-target-variable', target, "PATHS", ps.get('dll-path'))
get_manager().engine().register_action("dll_paths.list", command,
function=function)
""")
t.write("a/jamfile.jam", "lib a : a.cpp ;")
t.write("a/a.cpp", """\
void
#if defined(_WIN32)
__declspec(dllexport)
#endif
foo() {}
""")
t.write("b/jamfile.jam", "lib b : b.cpp ../a//a ;")
t.write("b/b.cpp", """\
void
#if defined(_WIN32)
__declspec(dllexport)
#endif
bar() {}
""")
t.run_build_system(["hardcode-dll-paths=true"])
t.expect_addition("bin/$toolset/debug*/mp.pathlist")
es1 = t.adjust_name("a/bin/$toolset/debug*")
es2 = t.adjust_name("b/bin/$toolset/debug*")
t.expect_content_lines("bin/$toolset/debug*/mp.pathlist", "*" + es1)
t.expect_content_lines("bin/$toolset/debug*/mp.pathlist", "*" + es2)
t.rm("bin/$toolset/debug*/mp.pathlist")
adll = t.glob_file("a/bin/$toolset/debug*/a.dll")
bdll = t.glob_file("b/bin/$toolset/debug*/b.dll")
t.write("b/jamfile.jam", """
local bdll = %s ;
# Make sure that it is found even with multiple source-locations
project : source-location c $(bdll:D) ;
lib b : ../a//a : <file>$(bdll:D=) ;
""" % bdll.replace("\\", "\\\\"))
t.run_build_system(["hardcode-dll-paths=true"])
t.expect_addition("bin/$toolset/debug*/mp.pathlist")
t.expect_content_lines("bin/$toolset/debug*/mp.pathlist", "*" + es1)
t.expect_content_lines("bin/$toolset/debug*/mp.pathlist", "*" + es2)
t.cleanup()
| true | true |
f7331d89ea2c95a3ddeecfb941f15be9e6fe77fe | 148 | py | Python | opensfm/log.py | YonatanSimson/OpenSfM | 358843738359f4b5d767b22df2f3960ded31c981 | [
"BSD-2-Clause"
] | 1 | 2020-11-18T03:14:45.000Z | 2020-11-18T03:14:45.000Z | opensfm/log.py | YonatanSimson/OpenSfM | 358843738359f4b5d767b22df2f3960ded31c981 | [
"BSD-2-Clause"
] | null | null | null | opensfm/log.py | YonatanSimson/OpenSfM | 358843738359f4b5d767b22df2f3960ded31c981 | [
"BSD-2-Clause"
] | null | null | null | import logging
import os
def setup():
logging.basicConfig(
format="%(asctime)s %(levelname)s: %(message)s", level=logging.DEBUG
)
| 16.444444 | 76 | 0.648649 | import logging
import os
def setup():
logging.basicConfig(
format="%(asctime)s %(levelname)s: %(message)s", level=logging.DEBUG
)
| true | true |
f7331f00e53db23e56e03175308569c9a9f459e0 | 817 | py | Python | scraper/services/WebScraper.py | grimmhud/django-scraper | 66902fa73cc6fcb3504bad97c19e95e2951e9754 | [
"MIT"
] | null | null | null | scraper/services/WebScraper.py | grimmhud/django-scraper | 66902fa73cc6fcb3504bad97c19e95e2951e9754 | [
"MIT"
] | null | null | null | scraper/services/WebScraper.py | grimmhud/django-scraper | 66902fa73cc6fcb3504bad97c19e95e2951e9754 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import requests
import ast
def scrap_website(url, filter):
soup = __get_html_content_as_soup(url)
return __extract_data(soup, filter)
def __get_html_content_as_soup(url):
response = requests.get(url)
return BeautifulSoup(response.text, 'lxml')
def __extract_data(soup, filter):
selected_content = soup.select(filter)
data = []
for content in selected_content:
data.append(content.text.strip())
return __clean_data(data)
def __clean_data(data):
data_str = str(data)
if '\\n' in data_str or '\\r' in data_str:
data_str = data_str.replace('\\r','').replace('\\n','')
if '\n' in data_str or '\r' in data_str:
data_str = data_str.replace('\r','').replace('\n','')
return ast.literal_eval(data_str) | 26.354839 | 63 | 0.665851 | from bs4 import BeautifulSoup
import requests
import ast
def scrap_website(url, filter):
soup = __get_html_content_as_soup(url)
return __extract_data(soup, filter)
def __get_html_content_as_soup(url):
response = requests.get(url)
return BeautifulSoup(response.text, 'lxml')
def __extract_data(soup, filter):
selected_content = soup.select(filter)
data = []
for content in selected_content:
data.append(content.text.strip())
return __clean_data(data)
def __clean_data(data):
data_str = str(data)
if '\\n' in data_str or '\\r' in data_str:
data_str = data_str.replace('\\r','').replace('\\n','')
if '\n' in data_str or '\r' in data_str:
data_str = data_str.replace('\r','').replace('\n','')
return ast.literal_eval(data_str) | true | true |
f7331f8ce198dccf836db66c2e0a80f3f5329d05 | 1,053 | py | Python | leetcode/48_Rotate_Image.py | PhillipLeeHub/algorithm-and-data-structure | c0c27fee1b4fd634084da0b41395a26307d76e69 | [
"MIT"
] | 1 | 2020-05-01T21:29:17.000Z | 2020-05-01T21:29:17.000Z | leetcode/48_Rotate_Image.py | PhillipLeeHub/algorithm-and-data-structure | c0c27fee1b4fd634084da0b41395a26307d76e69 | [
"MIT"
] | null | null | null | leetcode/48_Rotate_Image.py | PhillipLeeHub/algorithm-and-data-structure | c0c27fee1b4fd634084da0b41395a26307d76e69 | [
"MIT"
] | 1 | 2020-06-12T23:32:14.000Z | 2020-06-12T23:32:14.000Z | '''
48. Rotate Image Medium
You are given an n x n 2D matrix representing an image, rotate the image by 90 degrees (clockwise).
You have to rotate the image in-place, which means you have to modify the input 2D matrix directly. DO NOT allocate another 2D matrix and do the rotation.
'''
class Solution:
def rotate(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
self.transpose(matrix)
self.reflex(matrix)
def transpose(self, matrix):
# Since matrix size nxn
m_len = len(matrix)
for r in range(m_len):
for c in range(r, m_len):
matrix[c][r], matrix[r][c] = matrix[r][c], matrix[c][r]
# Reflex matrix by middle vertical axis
def reflex(self, matrix):
for r in range(len(matrix)):
for c in range(len(matrix)//2):
matrix[r][c], matrix[r][len(matrix)-1-c] = matrix[r][len(matrix)-1-c], matrix[r][c]
| 35.1 | 154 | 0.578348 | class Solution:
def rotate(self, matrix: List[List[int]]) -> None:
self.transpose(matrix)
self.reflex(matrix)
def transpose(self, matrix):
m_len = len(matrix)
for r in range(m_len):
for c in range(r, m_len):
matrix[c][r], matrix[r][c] = matrix[r][c], matrix[c][r]
def reflex(self, matrix):
for r in range(len(matrix)):
for c in range(len(matrix)//2):
matrix[r][c], matrix[r][len(matrix)-1-c] = matrix[r][len(matrix)-1-c], matrix[r][c]
| true | true |
f7331fbd624a7306d9d51e241d7a6f3f2d83e072 | 803 | py | Python | reframe/__init__.py | jacwah/reframe | d650bbbb2f87c6ae5f354e50b50bcfd98fafe77b | [
"BSD-3-Clause"
] | null | null | null | reframe/__init__.py | jacwah/reframe | d650bbbb2f87c6ae5f354e50b50bcfd98fafe77b | [
"BSD-3-Clause"
] | null | null | null | reframe/__init__.py | jacwah/reframe | d650bbbb2f87c6ae5f354e50b50bcfd98fafe77b | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2016-2022 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import sys
VERSION = '3.11.0-dev.2'
INSTALL_PREFIX = os.path.normpath(
os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
)
MIN_PYTHON_VERSION = (3, 6, 0)
# Check python version
if sys.version_info[:3] < MIN_PYTHON_VERSION:
sys.stderr.write('Unsupported Python version: '
'Python >= %d.%d.%d is required\n' % MIN_PYTHON_VERSION)
sys.exit(1)
os.environ['RFM_INSTALL_PREFIX'] = INSTALL_PREFIX
# Import important names for user tests
from reframe.core.pipeline import * # noqa: F401, F403
from reframe.core.decorators import * # noqa: F401, F403
| 29.740741 | 77 | 0.713574 |
import os
import sys
VERSION = '3.11.0-dev.2'
INSTALL_PREFIX = os.path.normpath(
os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
)
MIN_PYTHON_VERSION = (3, 6, 0)
if sys.version_info[:3] < MIN_PYTHON_VERSION:
sys.stderr.write('Unsupported Python version: '
'Python >= %d.%d.%d is required\n' % MIN_PYTHON_VERSION)
sys.exit(1)
os.environ['RFM_INSTALL_PREFIX'] = INSTALL_PREFIX
from reframe.core.pipeline import *
from reframe.core.decorators import *
| true | true |
f7332037c48c1a294de655fb80ab1613b7eb5f5e | 4,220 | py | Python | program_synthesis/algolisp/dataset/evaluation.py | kavigupta/program_synthesis | 0b04b1d3b63954ba3d404a8d96c4da18667a1b02 | [
"Apache-2.0"
] | 123 | 2018-06-09T00:49:39.000Z | 2022-03-09T05:41:20.000Z | program_synthesis/algolisp/dataset/evaluation.py | kavigupta/program_synthesis | 0b04b1d3b63954ba3d404a8d96c4da18667a1b02 | [
"Apache-2.0"
] | 9 | 2018-06-12T01:01:17.000Z | 2022-03-18T09:06:39.000Z | program_synthesis/algolisp/dataset/evaluation.py | kavigupta/program_synthesis | 0b04b1d3b63954ba3d404a8d96c4da18667a1b02 | [
"Apache-2.0"
] | 24 | 2018-06-09T00:42:46.000Z | 2021-09-29T08:23:32.000Z | import numpy as np
from program_synthesis.algolisp.tools import bleu
from program_synthesis.algolisp.dataset import executor
def is_same_code(example, res):
correct = False
if hasattr(res, 'code_sequence'):
if res.code_sequence is not None:
correct = res.code_sequence == example.code_sequence
elif res.code_tree is not None:
correct = res.code_tree == example.code_tree
else:
correct = res == example.code_sequence
return correct
def compute_bleu(example, res):
try:
if hasattr(res, 'code_sequence'):
if res.code_sequence is not None:
score = bleu.compute_bleu([example.code_sequence], [res.code_sequence])
else:
score = bleu.compute_bleu([example.code_sequence], [res])
return np.asscalar(score)
except ZeroDivisionError:
return 0.0
def get_stats_from_code(args):
res, example, executor_ = args
if len(example.tests) == 0:
return None
if executor_ is not None:
stats = executor.evaluate_code(
res.code_tree if res.code_tree else res.code_sequence, example.schema.args, example.tests,
executor_)
stats['exact-code-match'] = is_same_code(example, res)
stats['correct-program'] = int(stats['tests-executed'] == stats['tests-passed'])
else:
stats = {'tests-executed': 0, 'tests-passed': 0, 'result-none': 0, 'syntax-error': 0,
'runtime-exception': 0, 'exceptions': []}
stats['correct-program'] = stats['exact-code-match'] = is_same_code(example, res)
stats['bleu'] = compute_bleu(example, res)
stats['example'] = example.to_dict()
stats['res'] = res.to_dict() if hasattr(res, 'to_dict') else res
return stats
def run_inference(dataset, model, executor_):
"""Runs inference of given model on eval set, and executes resulting code.
Args:
dataset: Dataset, iterable of CodeExample to evaluate on.
model: Model that runs the inference.
executor: executor class from executor.py.
"""
for batch in dataset:
results = model.inference(batch)
for stats in model.worker_pool.imap(get_stats_from_code, zip(results, batch, [executor_]*len(batch))):
if stats is not None:
yield stats
return
def compute_metrics(all_stats):
tests_num = 0
programs_num = 0
bleu_acc = 0.0
correct_program_acc = 0
# Almost correct programs are those that were executed on more than one test and passed at least 50% tests.
almost_correct_program_acc = 0
exact_code_match_acc = 0
syntax_error_acc = 0
runtime_exception_acc = 0
other_exception_acc = 0
for stats in all_stats:
tests_num += stats['tests-executed']
programs_num += 1
bleu_acc += stats['bleu']
correct_program_acc += stats['correct-program']
if (stats['correct-program'] != 0 or
stats['tests-executed'] > 1 and stats['tests-passed']/stats['tests-executed'] >= 0.5):
almost_correct_program_acc += 1
exact_code_match_acc += stats['exact-code-match']
syntax_error_acc += stats['syntax-error']
runtime_exception_acc += stats['runtime-exception']
other_exception_acc += len(stats['exceptions'])
return {'bleu': (bleu_acc/programs_num) if programs_num else 0.0,
'accuracy': (correct_program_acc/programs_num) if programs_num else 0.0,
'50p_accuracy': (almost_correct_program_acc/programs_num) if programs_num else 0.0,
'exact_match_accuracy': (exact_code_match_acc/programs_num) if programs_num else 0.0,
'syntax_error_freq': (syntax_error_acc/tests_num) if tests_num else 0.0,
'runtime_exception_freq': (runtime_exception_acc/tests_num) if tests_num else 0.0,
'other_exception_freq': (other_exception_acc/tests_num) if tests_num else 0.0,
'programs_num': programs_num,
'tests_num': tests_num,
'correct_program_num': correct_program_acc,
'almost_correct_program_num': almost_correct_program_acc,
'exact_code_match_num': exact_code_match_acc,
}
| 40.576923 | 111 | 0.657346 | import numpy as np
from program_synthesis.algolisp.tools import bleu
from program_synthesis.algolisp.dataset import executor
def is_same_code(example, res):
correct = False
if hasattr(res, 'code_sequence'):
if res.code_sequence is not None:
correct = res.code_sequence == example.code_sequence
elif res.code_tree is not None:
correct = res.code_tree == example.code_tree
else:
correct = res == example.code_sequence
return correct
def compute_bleu(example, res):
try:
if hasattr(res, 'code_sequence'):
if res.code_sequence is not None:
score = bleu.compute_bleu([example.code_sequence], [res.code_sequence])
else:
score = bleu.compute_bleu([example.code_sequence], [res])
return np.asscalar(score)
except ZeroDivisionError:
return 0.0
def get_stats_from_code(args):
res, example, executor_ = args
if len(example.tests) == 0:
return None
if executor_ is not None:
stats = executor.evaluate_code(
res.code_tree if res.code_tree else res.code_sequence, example.schema.args, example.tests,
executor_)
stats['exact-code-match'] = is_same_code(example, res)
stats['correct-program'] = int(stats['tests-executed'] == stats['tests-passed'])
else:
stats = {'tests-executed': 0, 'tests-passed': 0, 'result-none': 0, 'syntax-error': 0,
'runtime-exception': 0, 'exceptions': []}
stats['correct-program'] = stats['exact-code-match'] = is_same_code(example, res)
stats['bleu'] = compute_bleu(example, res)
stats['example'] = example.to_dict()
stats['res'] = res.to_dict() if hasattr(res, 'to_dict') else res
return stats
def run_inference(dataset, model, executor_):
for batch in dataset:
results = model.inference(batch)
for stats in model.worker_pool.imap(get_stats_from_code, zip(results, batch, [executor_]*len(batch))):
if stats is not None:
yield stats
return
def compute_metrics(all_stats):
tests_num = 0
programs_num = 0
bleu_acc = 0.0
correct_program_acc = 0
almost_correct_program_acc = 0
exact_code_match_acc = 0
syntax_error_acc = 0
runtime_exception_acc = 0
other_exception_acc = 0
for stats in all_stats:
tests_num += stats['tests-executed']
programs_num += 1
bleu_acc += stats['bleu']
correct_program_acc += stats['correct-program']
if (stats['correct-program'] != 0 or
stats['tests-executed'] > 1 and stats['tests-passed']/stats['tests-executed'] >= 0.5):
almost_correct_program_acc += 1
exact_code_match_acc += stats['exact-code-match']
syntax_error_acc += stats['syntax-error']
runtime_exception_acc += stats['runtime-exception']
other_exception_acc += len(stats['exceptions'])
return {'bleu': (bleu_acc/programs_num) if programs_num else 0.0,
'accuracy': (correct_program_acc/programs_num) if programs_num else 0.0,
'50p_accuracy': (almost_correct_program_acc/programs_num) if programs_num else 0.0,
'exact_match_accuracy': (exact_code_match_acc/programs_num) if programs_num else 0.0,
'syntax_error_freq': (syntax_error_acc/tests_num) if tests_num else 0.0,
'runtime_exception_freq': (runtime_exception_acc/tests_num) if tests_num else 0.0,
'other_exception_freq': (other_exception_acc/tests_num) if tests_num else 0.0,
'programs_num': programs_num,
'tests_num': tests_num,
'correct_program_num': correct_program_acc,
'almost_correct_program_num': almost_correct_program_acc,
'exact_code_match_num': exact_code_match_acc,
}
| true | true |
f7332097fae90562e0b8ddcc60167c43486e039a | 793 | py | Python | migrations/versions/f1578ff17ae1_new_fields_in_user_moodel.py | aboladebaba/flaskTut | 9da02e2d8a1c78ffdd84c11796bedb8e08913c85 | [
"MIT"
] | null | null | null | migrations/versions/f1578ff17ae1_new_fields_in_user_moodel.py | aboladebaba/flaskTut | 9da02e2d8a1c78ffdd84c11796bedb8e08913c85 | [
"MIT"
] | null | null | null | migrations/versions/f1578ff17ae1_new_fields_in_user_moodel.py | aboladebaba/flaskTut | 9da02e2d8a1c78ffdd84c11796bedb8e08913c85 | [
"MIT"
] | null | null | null | """new fields in user moodel
Revision ID: f1578ff17ae1
Revises: bda639e5aafd
Create Date: 2021-01-11 10:01:54.417977
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f1578ff17ae1'
down_revision = 'bda639e5aafd'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('about_me', sa.String(length=140), nullable=True))
op.add_column('user', sa.Column('last_seen', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'last_seen')
op.drop_column('user', 'about_me')
# ### end Alembic commands ###
| 25.580645 | 86 | 0.691047 | from alembic import op
import sqlalchemy as sa
revision = 'f1578ff17ae1'
down_revision = 'bda639e5aafd'
branch_labels = None
depends_on = None
def upgrade():
| true | true |
f73320b0766ec5ad6b88eb37a6a3cde6d5816d5c | 4,066 | py | Python | s2_organisation_and_version_control/exercise_files/typing_exercise_solution.py | oliverkinch/dtu_mlops | ce3a1f8f02ee95105b7b907735c39ad082321a4b | [
"Apache-2.0"
] | 94 | 2021-06-01T09:53:45.000Z | 2022-03-29T21:06:22.000Z | s2_organisation_and_version_control/exercise_files/typing_exercise_solution.py | oliverkinch/dtu_mlops | ce3a1f8f02ee95105b7b907735c39ad082321a4b | [
"Apache-2.0"
] | 4 | 2021-06-07T08:28:40.000Z | 2022-01-07T19:56:40.000Z | s2_organisation_and_version_control/exercise_files/typing_exercise_solution.py | oliverkinch/dtu_mlops | ce3a1f8f02ee95105b7b907735c39ad082321a4b | [
"Apache-2.0"
] | 133 | 2021-06-05T07:20:37.000Z | 2022-03-22T10:56:51.000Z | from typing import Callable, Tuple, Union, Optional, List
import torch
import torch.nn.functional as F
from torch import nn
class Network(nn.Module):
def __init__(self, input_size: int, output_size: int, hidden_layers: List[int], drop_p: float = 0.5) -> None:
''' Builds a feedforward network with arbitrary hidden layers.
Arguments
---------
input_size: integer, size of the input layer
output_size: integer, size of the output layer
hidden_layers: list of integers, the sizes of the hidden layers
'''
super().__init__()
# Input to a hidden layer
self.hidden_layers = nn.ModuleList([nn.Linear(input_size, hidden_layers[0])])
# Add a variable number of more hidden layers
layer_sizes = zip(hidden_layers[:-1], hidden_layers[1:])
self.hidden_layers.extend([nn.Linear(h1, h2) for h1, h2 in layer_sizes])
self.output = nn.Linear(hidden_layers[-1], output_size)
self.dropout = nn.Dropout(p=drop_p)
def forward(self, x: torch.Tensor) -> torch.Tensor:
''' Forward pass through the network, returns the output logits '''
for each in self.hidden_layers:
x = F.relu(each(x))
x = self.dropout(x)
x = self.output(x)
return F.log_softmax(x, dim=1)
def validation(
model: nn.Module,
testloader: torch.utils.data.DataLoader,
criterion: Union[Callable, nn.Module]
) -> Tuple[float, float]:
accuracy = 0
test_loss = 0
for images, labels in testloader:
images = images.resize_(images.size()[0], 784)
output = model.forward(images)
test_loss += criterion(output, labels).item()
## Calculating the accuracy
# Model's output is log-softmax, take exponential to get the probabilities
ps = torch.exp(output)
# Class with highest probability is our predicted class, compare with true label
equality = (labels.data == ps.max(1)[1])
# Accuracy is number of correct predictions divided by all predictions, just take the mean
accuracy += equality.type_as(torch.FloatTensor()).mean().item()
return test_loss, accuracy
def train(
model: nn.Module,
trainloader: torch.utils.data.DataLoader,
testloader: torch.utils.data.DataLoader,
criterion: Union[Callable, nn.Module],
optimizer: Optional[torch.optim.Optimizer] = None,
epochs: int = 5,
print_every: int = 40,
) -> None:
if optimizer is None:
optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)
steps = 0
running_loss = 0
for e in range(epochs):
# Model in training mode, dropout is on
model.train()
for images, labels in trainloader:
steps += 1
# Flatten images into a 784 long vector
images.resize_(images.size()[0], 784)
optimizer.zero_grad()
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
# Model in inference mode, dropout is off
model.eval()
# Turn off gradients for validation, will speed up inference
with torch.no_grad():
test_loss, accuracy = validation(model, testloader, criterion)
print("Epoch: {}/{}.. ".format(e+1, epochs),
"Training Loss: {:.3f}.. ".format(running_loss/print_every),
"Test Loss: {:.3f}.. ".format(test_loss/len(testloader)),
"Test Accuracy: {:.3f}".format(accuracy/len(testloader)))
running_loss = 0
# Make sure dropout and grads are on for training
model.train()
| 35.666667 | 113 | 0.57575 | from typing import Callable, Tuple, Union, Optional, List
import torch
import torch.nn.functional as F
from torch import nn
class Network(nn.Module):
def __init__(self, input_size: int, output_size: int, hidden_layers: List[int], drop_p: float = 0.5) -> None:
super().__init__()
self.hidden_layers = nn.ModuleList([nn.Linear(input_size, hidden_layers[0])])
layer_sizes = zip(hidden_layers[:-1], hidden_layers[1:])
self.hidden_layers.extend([nn.Linear(h1, h2) for h1, h2 in layer_sizes])
self.output = nn.Linear(hidden_layers[-1], output_size)
self.dropout = nn.Dropout(p=drop_p)
def forward(self, x: torch.Tensor) -> torch.Tensor:
for each in self.hidden_layers:
x = F.relu(each(x))
x = self.dropout(x)
x = self.output(x)
return F.log_softmax(x, dim=1)
def validation(
model: nn.Module,
testloader: torch.utils.data.DataLoader,
criterion: Union[Callable, nn.Module]
) -> Tuple[float, float]:
accuracy = 0
test_loss = 0
for images, labels in testloader:
images = images.resize_(images.size()[0], 784)
output = model.forward(images)
test_loss += criterion(output, labels).item()
h.exp(output)
# Class with highest probability is our predicted class, compare with true label
equality = (labels.data == ps.max(1)[1])
# Accuracy is number of correct predictions divided by all predictions, just take the mean
accuracy += equality.type_as(torch.FloatTensor()).mean().item()
return test_loss, accuracy
def train(
model: nn.Module,
trainloader: torch.utils.data.DataLoader,
testloader: torch.utils.data.DataLoader,
criterion: Union[Callable, nn.Module],
optimizer: Optional[torch.optim.Optimizer] = None,
epochs: int = 5,
print_every: int = 40,
) -> None:
if optimizer is None:
optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)
steps = 0
running_loss = 0
for e in range(epochs):
# Model in training mode, dropout is on
model.train()
for images, labels in trainloader:
steps += 1
# Flatten images into a 784 long vector
images.resize_(images.size()[0], 784)
optimizer.zero_grad()
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
# Model in inference mode, dropout is off
model.eval()
# Turn off gradients for validation, will speed up inference
with torch.no_grad():
test_loss, accuracy = validation(model, testloader, criterion)
print("Epoch: {}/{}.. ".format(e+1, epochs),
"Training Loss: {:.3f}.. ".format(running_loss/print_every),
"Test Loss: {:.3f}.. ".format(test_loss/len(testloader)),
"Test Accuracy: {:.3f}".format(accuracy/len(testloader)))
running_loss = 0
# Make sure dropout and grads are on for training
model.train()
| true | true |
f733214d74349e47d083be5a9c8839cd0b56b8fc | 6,801 | py | Python | python_modules/dagster/dagster/core/execution/execute_in_process_result.py | makotonium/dagster | f5d56514b7e7c5bca28ea14060316d242f51b71b | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/core/execution/execute_in_process_result.py | makotonium/dagster | f5d56514b7e7c5bca28ea14060316d242f51b71b | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/core/execution/execute_in_process_result.py | makotonium/dagster | f5d56514b7e7c5bca28ea14060316d242f51b71b | [
"Apache-2.0"
] | 1 | 2021-12-08T18:13:19.000Z | 2021-12-08T18:13:19.000Z | from typing import Any, Dict, List, Optional, cast
from dagster import DagsterEvent, check
from dagster.core.definitions import NodeDefinition, NodeHandle
from dagster.core.definitions.utils import DEFAULT_OUTPUT
from dagster.core.errors import DagsterInvariantViolationError
from dagster.core.execution.plan.outputs import StepOutputHandle
class ExecuteInProcessResult:
def __init__(
self,
node_def: NodeDefinition,
all_events: List[DagsterEvent],
run_id: str,
output_capture: Optional[Dict[StepOutputHandle, Any]],
):
self._node_def = node_def
# If top-level result, no handle will be provided
self._handle = NodeHandle(node_def.name, parent=None)
self._event_list = all_events
self._run_id = run_id
self._output_capture = check.opt_dict_param(
output_capture, "output_capture", key_type=StepOutputHandle
)
@property
def success(self) -> bool:
"""bool: Whether execution was successful."""
return all([not event.is_failure for event in self._event_list])
@property
def all_node_events(self) -> List[DagsterEvent]:
"""List[DagsterEvent]: All dagster events from the in-process execution."""
step_events = []
for node_name in self._node_def.ensure_graph_def().node_dict.keys():
handle = NodeHandle(node_name, None)
step_events += _filter_events_by_handle(self._event_list, handle)
return step_events
@property
def run_id(self) -> str:
"""str: The run id for the executed run"""
return self._run_id
def events_for_node(self, node_name: str) -> List[DagsterEvent]:
"""Retrieves all dagster events for a specific node.
Args:
node_name (str): The name of the node for which outputs should be retrieved.
Returns:
List[DagsterEvent]: A list of all dagster events associated with provided node name.
"""
check.str_param(node_name, "node_name")
return _filter_events_by_handle(self._event_list, NodeHandle.from_string(node_name))
def output_value(self, output_name: str = DEFAULT_OUTPUT) -> Any:
"""Retrieves output of top-level job, if an output is returned.
If the top-level job has no output, calling this method will result in a
DagsterInvariantViolationError.
Args:
output_name (Optional[str]): The name of the output to retrieve. Defaults to `result`,
the default output name in dagster.
Returns:
Any: The value of the retrieved output.
"""
check.str_param(output_name, "output_name")
graph_def = self._node_def.ensure_graph_def()
if not graph_def.has_output(output_name) and len(graph_def.output_mappings) == 0:
raise DagsterInvariantViolationError(
f"Attempted to retrieve top-level outputs for '{graph_def.name}', which has no outputs."
)
elif not graph_def.has_output(output_name):
raise DagsterInvariantViolationError(
f"Could not find top-level output '{output_name}' in '{graph_def.name}'."
)
# Resolve the first layer of mapping
output_mapping = graph_def.get_output_mapping(output_name)
mapped_node = graph_def.solid_named(output_mapping.maps_from.solid_name)
origin_output_def, origin_handle = mapped_node.definition.resolve_output_to_origin(
output_mapping.maps_from.output_name,
NodeHandle(mapped_node.name, None),
)
# Get output from origin node
return _filter_outputs_by_handle(
self._output_capture, origin_handle, origin_output_def.name
)
def output_for_node(self, node_str: str, output_name: Optional[str] = DEFAULT_OUTPUT) -> Any:
"""Retrieves output value with a particular name from the in-process run of the job.
Args:
node_str (str): Name of the op/graph whose output should be retrieved. If the intended
graph/op is nested within another graph, the syntax is `outer_graph.inner_node`.
output_name (Optional[str]): Name of the output on the op/graph to retrieve. Defaults to
`result`, the default output name in dagster.
Returns:
Any: The value of the retrieved output.
"""
# resolve handle of node that node_str is referring to
target_handle = NodeHandle.from_string(node_str)
target_node_def = self._node_def.ensure_graph_def().get_solid(target_handle).definition
origin_output_def, origin_handle = target_node_def.resolve_output_to_origin(
output_name, NodeHandle.from_string(node_str)
)
# retrieve output value from resolved handle
return _filter_outputs_by_handle(
self._output_capture, origin_handle, origin_output_def.name
)
def _filter_events_by_handle(
event_list: List[DagsterEvent], handle: NodeHandle
) -> List[DagsterEvent]:
step_events = []
for event in event_list:
if event.is_step_event:
event_handle = cast(
NodeHandle, event.solid_handle
) # step events are guaranteed to have a node handle.
if event_handle.is_or_descends_from(handle):
step_events.append(event)
return step_events
def _filter_outputs_by_handle(
output_dict: Dict[StepOutputHandle, Any],
node_handle: NodeHandle,
output_name: str,
) -> Any:
mapped_outputs = {}
step_key = str(node_handle)
output_found = False
for step_output_handle, value in output_dict.items():
# For the mapped output case, where step keys are in the format
# "step_key[upstream_mapped_output_name]" within the step output handle.
if step_output_handle.step_key.startswith(f"{step_key}["):
output_found = True
key_start = step_output_handle.step_key.find("[")
key_end = step_output_handle.step_key.find("]")
upstream_mapped_output_name = step_output_handle.step_key[key_start + 1 : key_end]
mapped_outputs[upstream_mapped_output_name] = value
# For all other cases, search for exact match.
elif (
step_key == step_output_handle.step_key
and step_output_handle.output_name == output_name
):
output_found = True
if not step_output_handle.mapping_key:
return output_dict[step_output_handle]
mapped_outputs[step_output_handle.mapping_key] = value
if not output_found:
raise DagsterInvariantViolationError(f"No outputs found for node '{node_handle}'.")
return mapped_outputs
| 38.862857 | 104 | 0.670931 | from typing import Any, Dict, List, Optional, cast
from dagster import DagsterEvent, check
from dagster.core.definitions import NodeDefinition, NodeHandle
from dagster.core.definitions.utils import DEFAULT_OUTPUT
from dagster.core.errors import DagsterInvariantViolationError
from dagster.core.execution.plan.outputs import StepOutputHandle
class ExecuteInProcessResult:
def __init__(
self,
node_def: NodeDefinition,
all_events: List[DagsterEvent],
run_id: str,
output_capture: Optional[Dict[StepOutputHandle, Any]],
):
self._node_def = node_def
self._handle = NodeHandle(node_def.name, parent=None)
self._event_list = all_events
self._run_id = run_id
self._output_capture = check.opt_dict_param(
output_capture, "output_capture", key_type=StepOutputHandle
)
@property
def success(self) -> bool:
return all([not event.is_failure for event in self._event_list])
@property
def all_node_events(self) -> List[DagsterEvent]:
step_events = []
for node_name in self._node_def.ensure_graph_def().node_dict.keys():
handle = NodeHandle(node_name, None)
step_events += _filter_events_by_handle(self._event_list, handle)
return step_events
@property
def run_id(self) -> str:
return self._run_id
def events_for_node(self, node_name: str) -> List[DagsterEvent]:
check.str_param(node_name, "node_name")
return _filter_events_by_handle(self._event_list, NodeHandle.from_string(node_name))
def output_value(self, output_name: str = DEFAULT_OUTPUT) -> Any:
check.str_param(output_name, "output_name")
graph_def = self._node_def.ensure_graph_def()
if not graph_def.has_output(output_name) and len(graph_def.output_mappings) == 0:
raise DagsterInvariantViolationError(
f"Attempted to retrieve top-level outputs for '{graph_def.name}', which has no outputs."
)
elif not graph_def.has_output(output_name):
raise DagsterInvariantViolationError(
f"Could not find top-level output '{output_name}' in '{graph_def.name}'."
)
output_mapping = graph_def.get_output_mapping(output_name)
mapped_node = graph_def.solid_named(output_mapping.maps_from.solid_name)
origin_output_def, origin_handle = mapped_node.definition.resolve_output_to_origin(
output_mapping.maps_from.output_name,
NodeHandle(mapped_node.name, None),
)
return _filter_outputs_by_handle(
self._output_capture, origin_handle, origin_output_def.name
)
def output_for_node(self, node_str: str, output_name: Optional[str] = DEFAULT_OUTPUT) -> Any:
target_handle = NodeHandle.from_string(node_str)
target_node_def = self._node_def.ensure_graph_def().get_solid(target_handle).definition
origin_output_def, origin_handle = target_node_def.resolve_output_to_origin(
output_name, NodeHandle.from_string(node_str)
)
return _filter_outputs_by_handle(
self._output_capture, origin_handle, origin_output_def.name
)
def _filter_events_by_handle(
event_list: List[DagsterEvent], handle: NodeHandle
) -> List[DagsterEvent]:
step_events = []
for event in event_list:
if event.is_step_event:
event_handle = cast(
NodeHandle, event.solid_handle
)
if event_handle.is_or_descends_from(handle):
step_events.append(event)
return step_events
def _filter_outputs_by_handle(
output_dict: Dict[StepOutputHandle, Any],
node_handle: NodeHandle,
output_name: str,
) -> Any:
mapped_outputs = {}
step_key = str(node_handle)
output_found = False
for step_output_handle, value in output_dict.items():
if step_output_handle.step_key.startswith(f"{step_key}["):
output_found = True
key_start = step_output_handle.step_key.find("[")
key_end = step_output_handle.step_key.find("]")
upstream_mapped_output_name = step_output_handle.step_key[key_start + 1 : key_end]
mapped_outputs[upstream_mapped_output_name] = value
elif (
step_key == step_output_handle.step_key
and step_output_handle.output_name == output_name
):
output_found = True
if not step_output_handle.mapping_key:
return output_dict[step_output_handle]
mapped_outputs[step_output_handle.mapping_key] = value
if not output_found:
raise DagsterInvariantViolationError(f"No outputs found for node '{node_handle}'.")
return mapped_outputs
| true | true |
f73322ecb27294a01b81c53f96a5bf1229d121ca | 221 | py | Python | server/apps/user/admin.py | arun-thekkuden/django-app-structure | fa55696bcd175b11c9dacd8084241393f6ffb3f0 | [
"MIT"
] | null | null | null | server/apps/user/admin.py | arun-thekkuden/django-app-structure | fa55696bcd175b11c9dacd8084241393f6ffb3f0 | [
"MIT"
] | null | null | null | server/apps/user/admin.py | arun-thekkuden/django-app-structure | fa55696bcd175b11c9dacd8084241393f6ffb3f0 | [
"MIT"
] | 1 | 2021-02-28T09:48:05.000Z | 2021-02-28T09:48:05.000Z | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import User
class CustomUserAdmin(UserAdmin):
readonly_fields = ('email', )
admin.site.register(User, CustomUserAdmin)
| 20.090909 | 47 | 0.78733 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import User
class CustomUserAdmin(UserAdmin):
readonly_fields = ('email', )
admin.site.register(User, CustomUserAdmin)
| true | true |
f73323917401f52a7716451a042bd30c18ecda5f | 4,250 | py | Python | src/list-availability-zones.py | AWSToolbox/list-availability-zones | 84c4c3ccaa76b1c050d13891247a200bbe36d4e3 | [
"MIT"
] | 1 | 2021-10-04T09:04:19.000Z | 2021-10-04T09:04:19.000Z | src/list-availability-zones.py | AWSToolbox/list-availability-zones | 84c4c3ccaa76b1c050d13891247a200bbe36d4e3 | [
"MIT"
] | null | null | null | src/list-availability-zones.py | AWSToolbox/list-availability-zones | 84c4c3ccaa76b1c050d13891247a200bbe36d4e3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
This script will query the AWS API using boto3 and provide a list (table) of all regions and availability zones.
Example Usage:
./list-availability-zones.py
"""
from __future__ import print_function
import boto3
import requests
import sys
from botocore.exceptions import ClientError
from prettytable import PrettyTable
unknown_string = 'unknown'
country_mapping = {
'af-south-1': 'Africa (Cape Town)',
'ap-east-1': 'Asia Pacific (Hong Kong)',
'ap-south-1': 'Asia Pacific (Mumbai)',
'ap-northeast-2': 'Asia Pacific (Seoul)',
'ap-southeast-1': 'Asia Pacific (Singapore)',
'ap-southeast-2': 'Asia Pacific (Sydney)',
'ap-northeast-1': 'Asia Pacific (Tokyo)',
'ca-central-1': 'Canada (Central)',
'eu-central-1': 'Europe (Frankfurt)',
'eu-west-1': 'Europe (Ireland)',
'eu-west-2': 'Europe (London)',
'eu-west-3': 'Europe (Paris)',
'eu-north-1': 'Europe (Stockholm)',
'eu-south-1': 'Europe (Milan)',
'me-south-1': 'Middle East (Bahrain)',
'sa-east-1': 'South America (Sao Paulo)',
'us-east-2': 'US East (Ohio)',
'us-east-1': 'US East (North Virginia)',
'us-west-1': 'US West (California) ',
'us-west-2': 'US West (Oregon)',
}
def main(cmdline=None):
"""
The main function. This takes the command line arguments provided and parse them.
"""
client = boto3.client('ec2')
results = query_api(client)
display_results(results)
def query_api(client):
"""
Query the API
"""
results = []
try:
response = client.describe_regions(AllRegions=True)
except ClientError as e:
print("Error: " + str(e))
else:
if 'Regions' in response:
for region in response['Regions']:
azs = []
my_region_name = region['RegionName']
ec2_region = boto3.client('ec2', region_name=my_region_name)
my_region = [{'Name': 'region-name', 'Values': [my_region_name]}]
try:
aws_azs = ec2_region.describe_availability_zones(Filters=my_region)
except ClientError as e:
az_list = ''
az_count = ''
else:
for az in aws_azs['AvailabilityZones']:
zone = az['ZoneName'].replace(my_region_name, '')
azs.append(zone)
az_list = ', '.join(azs)
az_count = len(azs)
results.append({
'RegionName': my_region_name,
'Location': country_mapping[my_region_name] if my_region_name in country_mapping else unknown_string,
'AZS': az_list,
'AZ_COUNT': az_count,
})
return results
def display_results(results):
"""
Display the results
"""
table = PrettyTable()
table.field_names = [
'Region Name',
'Location',
'Availability Zones',
'Count',
]
for parts in results:
table.add_row([
parts['RegionName'],
parts['Location'],
parts['AZS'],
parts['AZ_COUNT'],
])
table.sortby = 'Region Name'
print(table)
if __name__ == "__main__":
# This runs when the application is run from the command it grabs sys.argv[1:] which is everything after
# the program name and passes it to main the return value from main is then used as the argument to
# sys.exit, which you can test for in the shell. program exit codes are usually 0 for ok, and non-zero
# for something going wrong.
sys.exit(main(sys.argv[1:]))
| 32.442748 | 133 | 0.499059 |
from __future__ import print_function
import boto3
import requests
import sys
from botocore.exceptions import ClientError
from prettytable import PrettyTable
unknown_string = 'unknown'
country_mapping = {
'af-south-1': 'Africa (Cape Town)',
'ap-east-1': 'Asia Pacific (Hong Kong)',
'ap-south-1': 'Asia Pacific (Mumbai)',
'ap-northeast-2': 'Asia Pacific (Seoul)',
'ap-southeast-1': 'Asia Pacific (Singapore)',
'ap-southeast-2': 'Asia Pacific (Sydney)',
'ap-northeast-1': 'Asia Pacific (Tokyo)',
'ca-central-1': 'Canada (Central)',
'eu-central-1': 'Europe (Frankfurt)',
'eu-west-1': 'Europe (Ireland)',
'eu-west-2': 'Europe (London)',
'eu-west-3': 'Europe (Paris)',
'eu-north-1': 'Europe (Stockholm)',
'eu-south-1': 'Europe (Milan)',
'me-south-1': 'Middle East (Bahrain)',
'sa-east-1': 'South America (Sao Paulo)',
'us-east-2': 'US East (Ohio)',
'us-east-1': 'US East (North Virginia)',
'us-west-1': 'US West (California) ',
'us-west-2': 'US West (Oregon)',
}
def main(cmdline=None):
client = boto3.client('ec2')
results = query_api(client)
display_results(results)
def query_api(client):
results = []
try:
response = client.describe_regions(AllRegions=True)
except ClientError as e:
print("Error: " + str(e))
else:
if 'Regions' in response:
for region in response['Regions']:
azs = []
my_region_name = region['RegionName']
ec2_region = boto3.client('ec2', region_name=my_region_name)
my_region = [{'Name': 'region-name', 'Values': [my_region_name]}]
try:
aws_azs = ec2_region.describe_availability_zones(Filters=my_region)
except ClientError as e:
az_list = ''
az_count = ''
else:
for az in aws_azs['AvailabilityZones']:
zone = az['ZoneName'].replace(my_region_name, '')
azs.append(zone)
az_list = ', '.join(azs)
az_count = len(azs)
results.append({
'RegionName': my_region_name,
'Location': country_mapping[my_region_name] if my_region_name in country_mapping else unknown_string,
'AZS': az_list,
'AZ_COUNT': az_count,
})
return results
def display_results(results):
table = PrettyTable()
table.field_names = [
'Region Name',
'Location',
'Availability Zones',
'Count',
]
for parts in results:
table.add_row([
parts['RegionName'],
parts['Location'],
parts['AZS'],
parts['AZ_COUNT'],
])
table.sortby = 'Region Name'
print(table)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| true | true |
f73323c5248a8c99f4a1341c763f28ceb0bd51bf | 5,206 | py | Python | mars/services/subtask/tests/test_service.py | yuyiming/mars | 5e6990d1ea022444dd646c56697e596ef5d7e747 | [
"Apache-2.0"
] | 1 | 2022-02-24T08:39:26.000Z | 2022-02-24T08:39:26.000Z | mars/services/subtask/tests/test_service.py | yuyiming/mars | 5e6990d1ea022444dd646c56697e596ef5d7e747 | [
"Apache-2.0"
] | null | null | null | mars/services/subtask/tests/test_service.py | yuyiming/mars | 5e6990d1ea022444dd646c56697e596ef5d7e747 | [
"Apache-2.0"
] | null | null | null | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import time
import numpy as np
import pytest
from .... import oscar as mo
from .... import tensor as mt
from .... import remote as mr
from ....core.graph import TileableGraph, TileableGraphBuilder, ChunkGraphBuilder
from ....resource import Resource
from ....utils import Timer
from ... import start_services, stop_services, NodeRole
from ...meta import MetaAPI
from ...session import SessionAPI
from ...storage import MockStorageAPI
from ...task import new_task_id
from ...task.supervisor.manager import TaskManagerActor
from .. import SubtaskAPI, Subtask, SubtaskResult
class FakeTaskManager(TaskManagerActor):
def set_subtask_result(self, subtask_result: SubtaskResult):
return
def _gen_subtask(t, session_id):
graph = TileableGraph([t.data])
next(TileableGraphBuilder(graph).build())
chunk_graph = next(ChunkGraphBuilder(graph, fuse_enabled=False).build())
subtask = Subtask(new_task_id(), session_id, new_task_id(), chunk_graph)
return subtask
@pytest.fixture
async def actor_pools():
async def start_pool(is_worker: bool):
if is_worker:
kw = dict(
n_process=2,
labels=["main"] + ["numa-0"] * 2,
subprocess_start_method="spawn",
)
else:
kw = dict(n_process=0, subprocess_start_method="spawn")
pool = await mo.create_actor_pool("127.0.0.1", **kw)
await pool.start()
return pool
sv_pool, worker_pool = await asyncio.gather(start_pool(False), start_pool(True))
yield sv_pool, worker_pool
await asyncio.gather(sv_pool.stop(), worker_pool.stop())
@pytest.mark.asyncio
async def test_subtask_service(actor_pools):
sv_pool, worker_pool = actor_pools
config = {
"services": [
"cluster",
"session",
"meta",
"lifecycle",
"scheduling",
"subtask",
"task",
"mutable",
],
"cluster": {
"backend": "fixed",
"lookup_address": sv_pool.external_address,
"resource": {"numa-0": Resource(num_cpus=2)},
},
"meta": {"store": "dict"},
"scheduling": {},
"subtask": {},
}
await start_services(NodeRole.SUPERVISOR, config, address=sv_pool.external_address)
await start_services(NodeRole.WORKER, config, address=worker_pool.external_address)
session_id = "test_session"
session_api = await SessionAPI.create(sv_pool.external_address)
await session_api.create_session(session_id)
ref = await mo.actor_ref(
FakeTaskManager.gen_uid(session_id), address=sv_pool.external_address
)
await mo.destroy_actor(ref)
await mo.create_actor(
FakeTaskManager,
session_id,
uid=FakeTaskManager.gen_uid(session_id),
address=sv_pool.external_address,
)
subtask_api = await SubtaskAPI.create(worker_pool.external_address)
# create mock meta and storage APIs
meta_api = await MetaAPI.create(session_id, sv_pool.external_address)
storage_api = await MockStorageAPI.create(session_id, worker_pool.external_address)
a = mt.ones((10, 10), chunk_size=10)
b = a + 1
subtask = _gen_subtask(b, session_id)
assert "TensorAdd" in repr(subtask)
await subtask_api.run_subtask_in_slot("numa-0", 0, subtask)
# check storage
expected = np.ones((10, 10)) + 1
result_key = subtask.chunk_graph.results[0].key
result = await storage_api.get(result_key)
np.testing.assert_array_equal(expected, result)
# check meta
chunk_meta = await meta_api.get_chunk_meta(result_key)
assert chunk_meta is not None
assert chunk_meta["bands"][0] == (worker_pool.external_address, "numa-0")
def sleep(timeout: int):
time.sleep(timeout)
return timeout
b = mr.spawn(sleep, 1)
subtask2 = _gen_subtask(b, session_id)
asyncio.create_task(subtask_api.run_subtask_in_slot("numa-0", 0, subtask2))
await asyncio.sleep(0.2)
with Timer() as timer:
# normal cancel by cancel asyncio Task
await asyncio.wait_for(
subtask_api.cancel_subtask_in_slot("numa-0", 0), timeout=2
)
# need 1 sec to reach timeout, then killing actor and wait for auto recovering
# the time would not be over 5 sec
assert timer.duration < 2
await MockStorageAPI.cleanup(worker_pool.external_address)
await stop_services(NodeRole.WORKER, config, address=worker_pool.external_address)
await stop_services(NodeRole.SUPERVISOR, config, address=sv_pool.external_address)
| 33.159236 | 87 | 0.684595 |
import asyncio
import time
import numpy as np
import pytest
from .... import oscar as mo
from .... import tensor as mt
from .... import remote as mr
from ....core.graph import TileableGraph, TileableGraphBuilder, ChunkGraphBuilder
from ....resource import Resource
from ....utils import Timer
from ... import start_services, stop_services, NodeRole
from ...meta import MetaAPI
from ...session import SessionAPI
from ...storage import MockStorageAPI
from ...task import new_task_id
from ...task.supervisor.manager import TaskManagerActor
from .. import SubtaskAPI, Subtask, SubtaskResult
class FakeTaskManager(TaskManagerActor):
def set_subtask_result(self, subtask_result: SubtaskResult):
return
def _gen_subtask(t, session_id):
graph = TileableGraph([t.data])
next(TileableGraphBuilder(graph).build())
chunk_graph = next(ChunkGraphBuilder(graph, fuse_enabled=False).build())
subtask = Subtask(new_task_id(), session_id, new_task_id(), chunk_graph)
return subtask
@pytest.fixture
async def actor_pools():
async def start_pool(is_worker: bool):
if is_worker:
kw = dict(
n_process=2,
labels=["main"] + ["numa-0"] * 2,
subprocess_start_method="spawn",
)
else:
kw = dict(n_process=0, subprocess_start_method="spawn")
pool = await mo.create_actor_pool("127.0.0.1", **kw)
await pool.start()
return pool
sv_pool, worker_pool = await asyncio.gather(start_pool(False), start_pool(True))
yield sv_pool, worker_pool
await asyncio.gather(sv_pool.stop(), worker_pool.stop())
@pytest.mark.asyncio
async def test_subtask_service(actor_pools):
sv_pool, worker_pool = actor_pools
config = {
"services": [
"cluster",
"session",
"meta",
"lifecycle",
"scheduling",
"subtask",
"task",
"mutable",
],
"cluster": {
"backend": "fixed",
"lookup_address": sv_pool.external_address,
"resource": {"numa-0": Resource(num_cpus=2)},
},
"meta": {"store": "dict"},
"scheduling": {},
"subtask": {},
}
await start_services(NodeRole.SUPERVISOR, config, address=sv_pool.external_address)
await start_services(NodeRole.WORKER, config, address=worker_pool.external_address)
session_id = "test_session"
session_api = await SessionAPI.create(sv_pool.external_address)
await session_api.create_session(session_id)
ref = await mo.actor_ref(
FakeTaskManager.gen_uid(session_id), address=sv_pool.external_address
)
await mo.destroy_actor(ref)
await mo.create_actor(
FakeTaskManager,
session_id,
uid=FakeTaskManager.gen_uid(session_id),
address=sv_pool.external_address,
)
subtask_api = await SubtaskAPI.create(worker_pool.external_address)
meta_api = await MetaAPI.create(session_id, sv_pool.external_address)
storage_api = await MockStorageAPI.create(session_id, worker_pool.external_address)
a = mt.ones((10, 10), chunk_size=10)
b = a + 1
subtask = _gen_subtask(b, session_id)
assert "TensorAdd" in repr(subtask)
await subtask_api.run_subtask_in_slot("numa-0", 0, subtask)
expected = np.ones((10, 10)) + 1
result_key = subtask.chunk_graph.results[0].key
result = await storage_api.get(result_key)
np.testing.assert_array_equal(expected, result)
chunk_meta = await meta_api.get_chunk_meta(result_key)
assert chunk_meta is not None
assert chunk_meta["bands"][0] == (worker_pool.external_address, "numa-0")
def sleep(timeout: int):
time.sleep(timeout)
return timeout
b = mr.spawn(sleep, 1)
subtask2 = _gen_subtask(b, session_id)
asyncio.create_task(subtask_api.run_subtask_in_slot("numa-0", 0, subtask2))
await asyncio.sleep(0.2)
with Timer() as timer:
await asyncio.wait_for(
subtask_api.cancel_subtask_in_slot("numa-0", 0), timeout=2
)
assert timer.duration < 2
await MockStorageAPI.cleanup(worker_pool.external_address)
await stop_services(NodeRole.WORKER, config, address=worker_pool.external_address)
await stop_services(NodeRole.SUPERVISOR, config, address=sv_pool.external_address)
| true | true |
f73324757c8fe86199a89fe5528fe1ae141249f5 | 2,766 | py | Python | tools/Bitcoin Parser/blockchain_parser/tests/test_output.py | SimeoW/bitcoin_researcher | 3644405f06c8b16a437513e8c02f0f061b91be2e | [
"MIT"
] | 1 | 2020-02-15T21:44:04.000Z | 2020-02-15T21:44:04.000Z | tools/Bitcoin Parser/blockchain_parser/tests/test_output.py | SimeoW/bitcoin_researcher | 3644405f06c8b16a437513e8c02f0f061b91be2e | [
"MIT"
] | null | null | null | tools/Bitcoin Parser/blockchain_parser/tests/test_output.py | SimeoW/bitcoin_researcher | 3644405f06c8b16a437513e8c02f0f061b91be2e | [
"MIT"
] | null | null | null | # Copyright (C) 2015-2016 The bitcoin-blockchain-parser developers
#
# This file is part of bitcoin-blockchain-parser.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of bitcoin-blockchain-parser, including this file, may be copied,
# modified, propagated, or distributed except according to the terms contained
# in the LICENSE file.
import unittest
from binascii import a2b_hex
from blockchain_parser.output import Output
class TestOutput(unittest.TestCase):
def test_pubkeyhash_from_hex(self):
raw_output = "01000000000000001976a91432ba382cf668657bae15ee0a97fa87" \
"f12e1bc89f88ac"
output = Output.from_hex(a2b_hex(raw_output))
self.assertTrue(output.is_pubkeyhash())
self.assertEqual("pubkeyhash", output.type)
self.assertEqual(1, output.value)
self.assertEqual(1, len(output.addresses))
def test_pubkey_from_hex(self):
raw_output = "0100000000000000232102c0993f639534d348e1dca30566491e6c" \
"b11c14afa13ec244c05396a9839aeb17ac"
output = Output.from_hex(a2b_hex(raw_output))
self.assertTrue(output.is_pubkey())
self.assertEqual("pubkey", output.type)
self.assertEqual(1, len(output.addresses))
def test_p2sh_from_hex(self):
raw_output = "010000000000000017a91471c5c3727fac8dbace94bd38cf8ac16a" \
"034a794787"
output = Output.from_hex(a2b_hex(raw_output))
self.assertTrue(output.is_p2sh())
self.assertEqual("p2sh", output.type)
self.assertEqual(1, len(output.addresses))
def test_return_from_hex(self):
raw_output = "01000000000000002a6a2846610000000024958857cc0da391b" \
"7b2bf61bcba59bb9ee438873f902c25da4c079e53d0c55fe991"
output = Output.from_hex(a2b_hex(raw_output))
self.assertTrue(output.is_return())
self.assertEqual("OP_RETURN", output.type)
self.assertEqual(0, len(output.addresses))
def test_multisig_from_hex(self):
raw_output = "0100000000000000475121025cd452979d4d5e928d47c3581bb287" \
"41b2cf9c54185e7d563a663707b00d956d2102ff99d00aa9d195b9" \
"3732254def8bfe80a786a7973ef8e63afd8d2a65e97b6c3b52ae"
output = Output.from_hex(a2b_hex(raw_output))
self.assertTrue(output.is_multisig())
self.assertEqual("multisig", output.type)
self.assertEqual(2, len(output.addresses))
def test_unknown_from_hex(self):
raw_output = "01000000000000000151"
output = Output.from_hex(a2b_hex(raw_output))
self.assertEqual("unknown", output.type)
self.assertEqual(0, len(output.addresses))
| 41.283582 | 79 | 0.707158 |
import unittest
from binascii import a2b_hex
from blockchain_parser.output import Output
class TestOutput(unittest.TestCase):
def test_pubkeyhash_from_hex(self):
raw_output = "01000000000000001976a91432ba382cf668657bae15ee0a97fa87" \
"f12e1bc89f88ac"
output = Output.from_hex(a2b_hex(raw_output))
self.assertTrue(output.is_pubkeyhash())
self.assertEqual("pubkeyhash", output.type)
self.assertEqual(1, output.value)
self.assertEqual(1, len(output.addresses))
def test_pubkey_from_hex(self):
raw_output = "0100000000000000232102c0993f639534d348e1dca30566491e6c" \
"b11c14afa13ec244c05396a9839aeb17ac"
output = Output.from_hex(a2b_hex(raw_output))
self.assertTrue(output.is_pubkey())
self.assertEqual("pubkey", output.type)
self.assertEqual(1, len(output.addresses))
def test_p2sh_from_hex(self):
raw_output = "010000000000000017a91471c5c3727fac8dbace94bd38cf8ac16a" \
"034a794787"
output = Output.from_hex(a2b_hex(raw_output))
self.assertTrue(output.is_p2sh())
self.assertEqual("p2sh", output.type)
self.assertEqual(1, len(output.addresses))
def test_return_from_hex(self):
raw_output = "01000000000000002a6a2846610000000024958857cc0da391b" \
"7b2bf61bcba59bb9ee438873f902c25da4c079e53d0c55fe991"
output = Output.from_hex(a2b_hex(raw_output))
self.assertTrue(output.is_return())
self.assertEqual("OP_RETURN", output.type)
self.assertEqual(0, len(output.addresses))
def test_multisig_from_hex(self):
raw_output = "0100000000000000475121025cd452979d4d5e928d47c3581bb287" \
"41b2cf9c54185e7d563a663707b00d956d2102ff99d00aa9d195b9" \
"3732254def8bfe80a786a7973ef8e63afd8d2a65e97b6c3b52ae"
output = Output.from_hex(a2b_hex(raw_output))
self.assertTrue(output.is_multisig())
self.assertEqual("multisig", output.type)
self.assertEqual(2, len(output.addresses))
def test_unknown_from_hex(self):
raw_output = "01000000000000000151"
output = Output.from_hex(a2b_hex(raw_output))
self.assertEqual("unknown", output.type)
self.assertEqual(0, len(output.addresses))
| true | true |
f7332501c73d1e51adfaedc1f6ffe078f2e3b8c2 | 8,367 | py | Python | tools/Vitis-AI-Quantizer/vai_q_pytorch/nndct_shared/compile/deploy_optimizer.py | bluetiger9/Vitis-AI | a7728733bbcfc292ff3afa46b9c8b03e94b740b3 | [
"Apache-2.0"
] | null | null | null | tools/Vitis-AI-Quantizer/vai_q_pytorch/nndct_shared/compile/deploy_optimizer.py | bluetiger9/Vitis-AI | a7728733bbcfc292ff3afa46b9c8b03e94b740b3 | [
"Apache-2.0"
] | null | null | null | tools/Vitis-AI-Quantizer/vai_q_pytorch/nndct_shared/compile/deploy_optimizer.py | bluetiger9/Vitis-AI | a7728733bbcfc292ff3afa46b9c8b03e94b740b3 | [
"Apache-2.0"
] | null | null | null |
#
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Tensor, Graph, Node
from nndct_shared.utils import NndctDebugLogger, NndctOption
from typing import List, Dict
from .op_evaluator import Evaluator
from collections import defaultdict
class DevGraphOptimizer(object):
"""Optimze graph for device computation
"""
def __init__(self, nndct_graph):
self._dev_graph = copy.deepcopy(nndct_graph)
self._evalute_func_map = {
NNDCT_OP.SHAPE: Evaluator.shape,
NNDCT_OP.CAST: Evaluator.cast,
NNDCT_OP.INT: Evaluator.int,
NNDCT_OP.SCALAR_MUL: Evaluator.mul,
NNDCT_OP.TENSOR: Evaluator.tensor,
NNDCT_OP.FLOOR: Evaluator.floor,
NNDCT_OP.DIV: Evaluator.elemwise_div,
NNDCT_OP.FLOOR_DIV: Evaluator.floor_div,
NNDCT_OP.ADD: Evaluator.add,
NNDCT_OP.SCALAR_ADD: Evaluator.add
}
# self._redundant_ops = [NNDCT_OP.CONTIGUOUS]
# def freeze_graph(self):
# self._infer_tensor_layout()
# self._strip_redundant_ops()
# self._constant_folding()
# if NndctOption.nndct_parse_debug.value >= 3:
# NndctDebugLogger.write(f"\nfrozen dev graph:\n{self._dev_graph}")
def strip_redundant_ops(self):
# remove unsupported op in xmodel
redundant_op_types = [NNDCT_OP.CONTIGUOUS]
self._dev_graph.remove_node_by_types(redundant_op_types)
# remove redundant permute op
permute_nodes = [node for node in self._dev_graph.nodes if node.op.type == NNDCT_OP.PERMUTE]
for permute in permute_nodes:
if permute.node_attr(permute.op.AttrName.ORDER) == [0, 1, 2, 3]:
self._dev_graph.remove_node(permute)
def constant_folding(self):
folding_nodes = set()
for node in self._dev_graph.nodes:
if node.in_quant_part is False:
continue
if hasattr(node.op, "AttrName"):
for attr_name in node.op.attrs.keys():
attr_val = node.node_attr(attr_name)
if isinstance(attr_val, list):
for i, val in enumerate(attr_val):
attr_val[i] = self._materialize(node, val, folding_nodes)
else:
attr_val = self._materialize(node, attr_val, folding_nodes)
if node.op.attrs[attr_name].type == list:
attr_val = [attr_val]
node.set_node_attr(attr_name, attr_val)
if folding_nodes:
for node_name in folding_nodes:
self._dev_graph.remove_node_forcely(self._dev_graph.node(node_name))
self._dev_graph.reconnect_nodes()
@staticmethod
def _infer_op_value_immediately(op_type):
return op_type in [NNDCT_OP.SHAPE, NNDCT_OP.CONST]
def _eval_node_value(self, node):
if node.out_tensors[0].data is None:
self._evalute_func_map[node.op.type](node)
def _materialize(self, cur_node, value, folding_nodes):
visited = set()
def dfs(node):
visited.add(node.name)
if self._infer_op_value_immediately(node.op.type):
folding_nodes.add(node.name)
self._eval_node_value(node)
return True
elif node.name in folding_nodes:
self._eval_node_value(node)
return True
elif node.op.type not in self._evalute_func_map:
return False
find_evaluable_op = False
for tensor in node.in_tensors:
if tensor.node and tensor.node.name not in visited: # and tensor.data is None:
find_evaluable_op = dfs(tensor.node)
if find_evaluable_op is False:
break
if find_evaluable_op:
folding_nodes.add(node.name)
self._eval_node_value(node)
return find_evaluable_op
if not isinstance(value, Tensor):
return value
else:
is_evaluable = dfs(value.node)
if is_evaluable:
data = value.node.out_tensors[0].data
cur_node.in_tensors.remove(value)
if not cur_node.in_tensors and cur_node.op.type not in [NNDCT_OP.ZEROS]:
folding_nodes.add(cur_node.name)
# debug
# print(cur_node.name, cur_node.op.type, value.name)
# print(folding_nodes)
return data
else:
return value
def infer_tensor_layout(self):
# TODO: Don't support NHWC in pytorch inference
for node in self._dev_graph.nodes:
if node.op.type == NNDCT_OP.PERMUTE and node.in_tensors[0].ndim == 4:
if node.in_tensors[0].layout is None:
if node.node_attr(node.op.AttrName.ORDER) == [0, 1, 2, 3]:
node.out_tensors[0].layout = Tensor.Layout.NHWC
else:
node.out_tensors[0].layout = node.in_tensors[0].layout
elif node.out_tensors and node.in_tensors:
node.out_tensors[0].layout = node.in_tensors[0].layout
else:
continue
def partition_by_quant_part(self) -> List[Graph]:
if not any([node.op.type == NNDCT_OP.QUANT_STUB for node in self._dev_graph.nodes]):
return [self._dev_graph]
id2nodes = defaultdict(set)
def collect_node_set(node, set_id, visited=None):
# if not node.in_quant_part:
# return
if not visited:
visited = []
if not hasattr(node, "set_id"):
node.set_id = set_id
id2nodes[set_id].add(node)
visited.append(node)
for cn in self._dev_graph.children(node):
if cn not in visited and cn.in_quant_part:
collect_node_set(cn, set_id, visited)
def get_set_id_from_nodeset(nodeset):
return min([node.set_id for node in nodeset])
def partition_check(quant_graphs):
for node in self._dev_graph.nodes:
if node.in_quant_part and all([node not in graph for graph in quant_graphs]):
raise RuntimeError(f"Please check graph partition: the quant node '{node.name}' should be in quant graph.")
elif not node.in_quant_part and any([node in graph for graph in quant_graphs]):
raise RuntimeError(f"Please check graph partition: the non-quant node '{node.name}' included in quant graph.")
set_id = 0
for node in self._dev_graph.nodes:
if node.op.type == NNDCT_OP.QUANT_STUB or (not node.in_nodes and node.in_quant_part):
collect_node_set(node, set_id)
set_id += 1
merged_id2nodes = defaultdict(set)
for nodeset in id2nodes.values():
id = get_set_id_from_nodeset(nodeset)
merged_id2nodes[id].update(nodeset)
quant_dev_graph = []
for graph_id, nodes in merged_id2nodes.items():
subgraph = Graph.create_subgraph_from_nodeset(self._dev_graph, nodes, f"{self._dev_graph.name}_{graph_id}")
quant_dev_graph.append(subgraph)
partition_check(quant_dev_graph)
return quant_dev_graph
@property
def frozen_graph(self):
return self._dev_graph
"""
def partition_by_quant_part(self) -> List[Graph]:
if not any([node.op.type == NNDCT_OP.QUANT_STUB for node in self._dev_graph.nodes]):
return [self._dev_graph]
id2nodes: Dict[int, List[Node]] = defaultdict(list)
count = -1
start_quant = False
for node in self._dev_graph.nodes:
if not node.in_quant_part:
start_quant = False
continue
if node.op.type == NNDCT_OP.QUANT_STUB and start_quant is False:
start_quant = True
count += 1
id2nodes[count].append(node)
elif start_quant:
id2nodes[count].append(node)
quant_dev_graph = []
for graph_id, nodes in id2nodes.items():
graph = Graph(f"{self._dev_graph.name}_{graph_id}")
for node in nodes:
graph.add_node(node)
quant_dev_graph.append(graph)
return quant_dev_graph
"""
| 32.811765 | 123 | 0.659018 |
import copy
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Tensor, Graph, Node
from nndct_shared.utils import NndctDebugLogger, NndctOption
from typing import List, Dict
from .op_evaluator import Evaluator
from collections import defaultdict
class DevGraphOptimizer(object):
def __init__(self, nndct_graph):
self._dev_graph = copy.deepcopy(nndct_graph)
self._evalute_func_map = {
NNDCT_OP.SHAPE: Evaluator.shape,
NNDCT_OP.CAST: Evaluator.cast,
NNDCT_OP.INT: Evaluator.int,
NNDCT_OP.SCALAR_MUL: Evaluator.mul,
NNDCT_OP.TENSOR: Evaluator.tensor,
NNDCT_OP.FLOOR: Evaluator.floor,
NNDCT_OP.DIV: Evaluator.elemwise_div,
NNDCT_OP.FLOOR_DIV: Evaluator.floor_div,
NNDCT_OP.ADD: Evaluator.add,
NNDCT_OP.SCALAR_ADD: Evaluator.add
}
def strip_redundant_ops(self):
redundant_op_types = [NNDCT_OP.CONTIGUOUS]
self._dev_graph.remove_node_by_types(redundant_op_types)
permute_nodes = [node for node in self._dev_graph.nodes if node.op.type == NNDCT_OP.PERMUTE]
for permute in permute_nodes:
if permute.node_attr(permute.op.AttrName.ORDER) == [0, 1, 2, 3]:
self._dev_graph.remove_node(permute)
def constant_folding(self):
folding_nodes = set()
for node in self._dev_graph.nodes:
if node.in_quant_part is False:
continue
if hasattr(node.op, "AttrName"):
for attr_name in node.op.attrs.keys():
attr_val = node.node_attr(attr_name)
if isinstance(attr_val, list):
for i, val in enumerate(attr_val):
attr_val[i] = self._materialize(node, val, folding_nodes)
else:
attr_val = self._materialize(node, attr_val, folding_nodes)
if node.op.attrs[attr_name].type == list:
attr_val = [attr_val]
node.set_node_attr(attr_name, attr_val)
if folding_nodes:
for node_name in folding_nodes:
self._dev_graph.remove_node_forcely(self._dev_graph.node(node_name))
self._dev_graph.reconnect_nodes()
@staticmethod
def _infer_op_value_immediately(op_type):
return op_type in [NNDCT_OP.SHAPE, NNDCT_OP.CONST]
def _eval_node_value(self, node):
if node.out_tensors[0].data is None:
self._evalute_func_map[node.op.type](node)
def _materialize(self, cur_node, value, folding_nodes):
visited = set()
def dfs(node):
visited.add(node.name)
if self._infer_op_value_immediately(node.op.type):
folding_nodes.add(node.name)
self._eval_node_value(node)
return True
elif node.name in folding_nodes:
self._eval_node_value(node)
return True
elif node.op.type not in self._evalute_func_map:
return False
find_evaluable_op = False
for tensor in node.in_tensors:
if tensor.node and tensor.node.name not in visited:
find_evaluable_op = dfs(tensor.node)
if find_evaluable_op is False:
break
if find_evaluable_op:
folding_nodes.add(node.name)
self._eval_node_value(node)
return find_evaluable_op
if not isinstance(value, Tensor):
return value
else:
is_evaluable = dfs(value.node)
if is_evaluable:
data = value.node.out_tensors[0].data
cur_node.in_tensors.remove(value)
if not cur_node.in_tensors and cur_node.op.type not in [NNDCT_OP.ZEROS]:
folding_nodes.add(cur_node.name)
return data
else:
return value
def infer_tensor_layout(self):
for node in self._dev_graph.nodes:
if node.op.type == NNDCT_OP.PERMUTE and node.in_tensors[0].ndim == 4:
if node.in_tensors[0].layout is None:
if node.node_attr(node.op.AttrName.ORDER) == [0, 1, 2, 3]:
node.out_tensors[0].layout = Tensor.Layout.NHWC
else:
node.out_tensors[0].layout = node.in_tensors[0].layout
elif node.out_tensors and node.in_tensors:
node.out_tensors[0].layout = node.in_tensors[0].layout
else:
continue
def partition_by_quant_part(self) -> List[Graph]:
if not any([node.op.type == NNDCT_OP.QUANT_STUB for node in self._dev_graph.nodes]):
return [self._dev_graph]
id2nodes = defaultdict(set)
def collect_node_set(node, set_id, visited=None):
# if not node.in_quant_part:
# return
if not visited:
visited = []
if not hasattr(node, "set_id"):
node.set_id = set_id
id2nodes[set_id].add(node)
visited.append(node)
for cn in self._dev_graph.children(node):
if cn not in visited and cn.in_quant_part:
collect_node_set(cn, set_id, visited)
def get_set_id_from_nodeset(nodeset):
return min([node.set_id for node in nodeset])
def partition_check(quant_graphs):
for node in self._dev_graph.nodes:
if node.in_quant_part and all([node not in graph for graph in quant_graphs]):
raise RuntimeError(f"Please check graph partition: the quant node '{node.name}' should be in quant graph.")
elif not node.in_quant_part and any([node in graph for graph in quant_graphs]):
raise RuntimeError(f"Please check graph partition: the non-quant node '{node.name}' included in quant graph.")
set_id = 0
for node in self._dev_graph.nodes:
if node.op.type == NNDCT_OP.QUANT_STUB or (not node.in_nodes and node.in_quant_part):
collect_node_set(node, set_id)
set_id += 1
merged_id2nodes = defaultdict(set)
for nodeset in id2nodes.values():
id = get_set_id_from_nodeset(nodeset)
merged_id2nodes[id].update(nodeset)
quant_dev_graph = []
for graph_id, nodes in merged_id2nodes.items():
subgraph = Graph.create_subgraph_from_nodeset(self._dev_graph, nodes, f"{self._dev_graph.name}_{graph_id}")
quant_dev_graph.append(subgraph)
partition_check(quant_dev_graph)
return quant_dev_graph
@property
def frozen_graph(self):
return self._dev_graph
| true | true |
f733273aec9a49b209744ed817dbfc31753f022d | 27,671 | py | Python | elit/components/srl/span_rank/span_ranking_srl_model.py | emorynlp/stem-cell-hypothesis | 48a628093d93d653865fbac6409d179cddd99293 | [
"Apache-2.0"
] | 4 | 2021-09-17T15:23:31.000Z | 2022-02-28T10:18:04.000Z | elit/components/srl/span_rank/span_ranking_srl_model.py | emorynlp/stem-cell-hypothesis | 48a628093d93d653865fbac6409d179cddd99293 | [
"Apache-2.0"
] | null | null | null | elit/components/srl/span_rank/span_ranking_srl_model.py | emorynlp/stem-cell-hypothesis | 48a628093d93d653865fbac6409d179cddd99293 | [
"Apache-2.0"
] | null | null | null | from typing import Dict
from alnlp.modules.feedforward import FeedForward
from alnlp.modules.time_distributed import TimeDistributed
from .highway_variational_lstm import *
import torch
from alnlp.modules import util
from ...parsers.biaffine.biaffine import Biaffine
def initializer_1d(input_tensor, initializer):
assert len(input_tensor.size()) == 1
input_tensor = input_tensor.view(-1, 1)
input_tensor = initializer(input_tensor)
return input_tensor.view(-1)
class SpanRankingSRLDecoder(nn.Module):
def __init__(self, context_layer_output_dim, label_space_size, config) -> None:
super().__init__()
self.config = config
self.label_space_size = label_space_size
self.dropout = float(config.dropout)
self.use_gold_predicates = config.use_gold_predicates
# span width feature embedding
self.span_width_embedding = nn.Embedding(self.config.max_arg_width, self.config.span_width_feature_size)
# self.context_projective_layer = nn.Linear(2 * self.lstm_hidden_size, self.config.num_attention_heads)
# span scores
self.span_emb_size = 3 * context_layer_output_dim + self.config.span_width_feature_size
self.arg_unary_score_layers = nn.ModuleList([nn.Linear(self.span_emb_size, self.config.ffnn_size) if i == 0
else nn.Linear(self.config.ffnn_size, self.config.ffnn_size) for i
in range(self.config.ffnn_depth)]) # [,150]
self.arg_dropout_layers = nn.ModuleList([nn.Dropout(self.dropout) for _ in range(self.config.ffnn_depth)])
self.arg_unary_score_projection = nn.Linear(self.config.ffnn_size, 1)
# predicate scores
self.pred_unary_score_layers = nn.ModuleList(
[nn.Linear(context_layer_output_dim, self.config.ffnn_size) if i == 0
else nn.Linear(self.config.ffnn_size, self.config.ffnn_size) for i
in range(self.config.ffnn_depth)]) # [,150]
self.pred_dropout_layers = nn.ModuleList([nn.Dropout(self.dropout) for _ in range(self.config.ffnn_depth)])
self.pred_unary_score_projection = nn.Linear(self.config.ffnn_size, 1)
# srl scores
self.srl_unary_score_input_size = self.span_emb_size + context_layer_output_dim
self.srl_unary_score_layers = nn.ModuleList([nn.Linear(self.srl_unary_score_input_size, self.config.ffnn_size)
if i == 0 else nn.Linear(self.config.ffnn_size,
self.config.ffnn_size)
for i in range(self.config.ffnn_depth)])
self.srl_dropout_layers = nn.ModuleList([nn.Dropout(self.dropout) for _ in range(self.config.ffnn_depth)])
self.srl_unary_score_projection = nn.Linear(self.config.ffnn_size, self.label_space_size - 1)
if config.use_biaffine:
self.predicate_scale = TimeDistributed(FeedForward(context_layer_output_dim, 1, self.span_emb_size, 'ReLU'))
self.biaffine = Biaffine(self.span_emb_size, self.label_space_size - 1)
self.loss_reduction = config.loss_reduction
self.reset_parameters()
def reset_parameters(self):
init.xavier_uniform_(self.span_width_embedding.weight)
# init.xavier_uniform_(self.context_projective_layer.weight)
# initializer_1d(self.context_projective_layer.bias, init.xavier_uniform_)
for layer in self.arg_unary_score_layers:
init.xavier_uniform_(layer.weight)
initializer_1d(layer.bias, init.xavier_uniform_)
init.xavier_uniform_(self.arg_unary_score_projection.weight)
initializer_1d(self.arg_unary_score_projection.bias, init.xavier_uniform_)
for layer in self.pred_unary_score_layers:
init.xavier_uniform_(layer.weight)
initializer_1d(layer.bias, init.xavier_uniform_)
init.xavier_uniform_(self.pred_unary_score_projection.weight)
initializer_1d(self.pred_unary_score_projection.bias, init.xavier_uniform_)
for layer in self.srl_unary_score_layers:
init.xavier_uniform_(layer.weight)
initializer_1d(layer.bias, init.xavier_uniform_)
init.xavier_uniform_(self.srl_unary_score_projection.weight)
initializer_1d(self.srl_unary_score_projection.bias, init.xavier_uniform_)
return None
def forward(self, hidden_states, batch, mask=None):
gold_arg_ends, gold_arg_labels, gold_arg_starts, gold_predicates, masks, sent_lengths = SpanRankingSRLModel.unpack(
batch, mask=mask, training=self.training)
return self.decode(hidden_states, sent_lengths, masks, gold_arg_starts, gold_arg_ends, gold_arg_labels,
gold_predicates)
@staticmethod
def get_candidate_spans(sent_lengths: torch.Tensor, max_sent_length, max_arg_width):
num_sentences = len(sent_lengths)
device = sent_lengths.device
candidate_starts = torch.arange(0, max_sent_length, device=device).expand(num_sentences, max_arg_width, -1)
candidate_width = torch.arange(0, max_arg_width, device=device).view(1, -1, 1)
candidate_ends = candidate_starts + candidate_width
candidate_starts = candidate_starts.contiguous().view(num_sentences, max_sent_length * max_arg_width)
candidate_ends = candidate_ends.contiguous().view(num_sentences, max_sent_length * max_arg_width)
actual_sent_lengths = sent_lengths.view(-1, 1).expand(-1, max_sent_length * max_arg_width)
candidate_mask = candidate_ends < actual_sent_lengths
candidate_starts = candidate_starts * candidate_mask
candidate_ends = candidate_ends * candidate_mask
return candidate_starts, candidate_ends, candidate_mask
@staticmethod
def exclusive_cumsum(input: torch.Tensor, exclusive=True):
"""
Args:
input: input is the sentence lengths tensor.
exclusive: exclude the last sentence length (Default value = True)
input(torch.Tensor :):
input: torch.Tensor:
Returns:
"""
assert exclusive is True
if exclusive is True:
exclusive_sent_lengths = input.new_zeros(1, dtype=torch.long)
result = torch.cumsum(torch.cat([exclusive_sent_lengths, input], 0)[:-1], 0).view(-1, 1)
else:
result = torch.cumsum(input, 0).view(-1, 1)
return result
def flatten_emb(self, emb):
num_sentences, max_sentence_length = emb.size()[0], emb.size()[1]
assert len(emb.size()) == 3
flatted_emb = emb.contiguous().view(num_sentences * max_sentence_length, -1)
return flatted_emb
def flatten_emb_in_sentence(self, emb, batch_sentences_mask):
num_sentences, max_sentence_length = emb.size()[0], emb.size()[1]
flatted_emb = self.flatten_emb(emb)
return flatted_emb[batch_sentences_mask.reshape(num_sentences * max_sentence_length)]
def get_span_emb(self, flatted_context_emb, flatted_candidate_starts, flatted_candidate_ends,
config, dropout=0.0):
batch_word_num = flatted_context_emb.size()[0]
# gather slices from embeddings according to indices
span_start_emb = flatted_context_emb[flatted_candidate_starts]
span_end_emb = flatted_context_emb[flatted_candidate_ends]
span_emb_feature_list = [span_start_emb, span_end_emb] # store the span vector representations for span rep.
span_width = 1 + flatted_candidate_ends - flatted_candidate_starts # [num_spans], generate the span width
max_arg_width = config.max_arg_width
# get the span width feature emb
span_width_index = span_width - 1
span_width_emb = self.span_width_embedding(span_width_index)
span_width_emb = F.dropout(span_width_emb, dropout, self.training)
span_emb_feature_list.append(span_width_emb)
"""head features"""
cpu_flatted_candidte_starts = flatted_candidate_starts
span_indices = torch.arange(0, max_arg_width, device=flatted_context_emb.device).view(1, -1) + \
cpu_flatted_candidte_starts.view(-1, 1) # For all the i, where i in [begin, ..i, end] for span
# reset the position index to the batch_word_num index with index - 1
span_indices = torch.clamp(span_indices, max=batch_word_num - 1)
num_spans, spans_width = span_indices.size()[0], span_indices.size()[1]
flatted_span_indices = span_indices.view(-1) # so Huge!!!, column is the span?
# if torch.cuda.is_available():
flatted_span_indices = flatted_span_indices
span_text_emb = flatted_context_emb.index_select(0, flatted_span_indices).view(num_spans, spans_width, -1)
span_indices_mask = util.lengths_to_mask(span_width, max_len=max_arg_width)
# project context output to num head
# head_scores = self.context_projective_layer.forward(flatted_context_emb)
# get span attention
# span_attention = head_scores.index_select(0, flatted_span_indices).view(num_spans, spans_width)
# span_attention = torch.add(span_attention, expanded_span_indices_log_mask).unsqueeze(2) # control the span len
# span_attention = F.softmax(span_attention, dim=1)
span_text_emb = span_text_emb * span_indices_mask.unsqueeze(2).expand(-1, -1, span_text_emb.size()[-1])
span_head_emb = torch.mean(span_text_emb, 1)
span_emb_feature_list.append(span_head_emb)
span_emb = torch.cat(span_emb_feature_list, 1)
return span_emb, None, span_text_emb, span_indices, span_indices_mask
def get_arg_unary_scores(self, span_emb):
"""Compute span score with FFNN(span embedding)
Args:
span_emb: tensor of [num_sentences, num_spans, emb_size]
config: param dropout:
num_labels: param name:
Returns:
"""
input = span_emb
for i, ffnn in enumerate(self.arg_unary_score_layers):
input = F.relu(ffnn.forward(input))
input = self.arg_dropout_layers[i].forward(input)
output = self.arg_unary_score_projection.forward(input)
return output
def get_pred_unary_scores(self, span_emb):
input = span_emb
for i, ffnn in enumerate(self.pred_unary_score_layers):
input = F.relu(ffnn.forward(input))
input = self.pred_dropout_layers[i].forward(input)
output = self.pred_unary_score_projection.forward(input)
return output
def extract_spans(self, candidate_scores, candidate_starts, candidate_ends, topk, max_sentence_length,
sort_spans, enforce_non_crossing):
"""extract the topk span indices
Args:
candidate_scores: param candidate_starts:
candidate_ends: param topk: [num_sentences]
max_sentence_length: param sort_spans:
enforce_non_crossing: return: indices [num_sentences, max_num_predictions]
candidate_starts:
topk:
sort_spans:
Returns:
"""
# num_sentences = candidate_scores.size()[0]
# num_input_spans = candidate_scores.size()[1]
max_num_output_spans = int(torch.max(topk))
indices = [score.topk(k)[1] for score, k in zip(candidate_scores, topk)]
output_span_indices_tensor = [F.pad(item, [0, max_num_output_spans - item.size()[0]], value=item[-1])
for item in indices]
output_span_indices_tensor = torch.stack(output_span_indices_tensor)
return output_span_indices_tensor
def batch_index_select(self, emb, indices):
num_sentences = emb.size()[0]
max_sent_length = emb.size()[1]
flatten_emb = self.flatten_emb(emb)
offset = (torch.arange(0, num_sentences, device=emb.device) * max_sent_length).unsqueeze(1)
return torch.index_select(flatten_emb, 0, (indices + offset).view(-1)) \
.view(indices.size()[0], indices.size()[1], -1)
def get_batch_topk(self, candidate_starts: torch.Tensor, candidate_ends, candidate_scores, topk_ratio, text_len,
max_sentence_length, sort_spans=False, enforce_non_crossing=True):
num_sentences = candidate_starts.size()[0]
max_sentence_length = candidate_starts.size()[1]
topk = torch.floor(text_len.to(torch.float) * topk_ratio).to(torch.long)
topk = torch.max(topk, torch.ones(num_sentences, device=candidate_starts.device, dtype=torch.long))
# this part should be implemented with C++
predicted_indices = self.extract_spans(candidate_scores, candidate_starts, candidate_ends, topk,
max_sentence_length, sort_spans, enforce_non_crossing)
predicted_starts = torch.gather(candidate_starts, 1, predicted_indices)
predicted_ends = torch.gather(candidate_ends, 1, predicted_indices)
predicted_scores = torch.gather(candidate_scores, 1, predicted_indices)
return predicted_starts, predicted_ends, predicted_scores, topk, predicted_indices
def get_dense_span_labels(self, span_starts, span_ends, span_labels, max_sentence_length,
span_parents=None):
num_sentences = span_starts.size()[0]
max_spans_num = span_starts.size()[1]
# span_starts = span_starts + 1 - (span_labels > 0).to(torch.long)
span_starts[(span_labels == 0) & (span_starts < max_sentence_length - 1)] += 1 # make start > end
sentence_indices = torch.arange(0, num_sentences, device=span_starts.device).unsqueeze(1).expand(-1,
max_spans_num)
sparse_indices = torch.cat([sentence_indices.unsqueeze(2), span_starts.unsqueeze(2), span_ends.unsqueeze(2)],
dim=2)
if span_parents is not None: # semantic span predicate offset
sparse_indices = torch.cat([sparse_indices, span_parents.unsqueeze(2)], 2)
rank = 3 if span_parents is None else 4
dense_labels = torch.sparse.LongTensor(sparse_indices.view(num_sentences * max_spans_num, rank).t(),
span_labels.view(-1),
torch.Size([num_sentences] + [max_sentence_length] * (rank - 1))) \
.to_dense()
return dense_labels
@staticmethod
def gather_4d(params, indices):
assert len(params.size()) == 4 and len(indices) == 4
indices_a, indices_b, indices_c, indices_d = indices
result = params[indices_a, indices_b, indices_c, indices_d]
return result
def get_srl_labels(self,
arg_starts,
arg_ends,
predicates,
gold_predicates,
gold_arg_starts,
gold_arg_ends,
gold_arg_labels,
max_sentence_length
):
num_sentences = arg_starts.size()[0]
max_arg_num = arg_starts.size()[1]
max_pred_num = predicates.size()[1]
sentence_indices_2d = torch.arange(0, num_sentences, device=arg_starts.device).unsqueeze(1).unsqueeze(2).expand(
-1, max_arg_num, max_pred_num)
expanded_arg_starts = arg_starts.unsqueeze(2).expand(-1, -1, max_pred_num)
expanded_arg_ends = arg_ends.unsqueeze(2).expand(-1, -1, max_pred_num)
expanded_predicates = predicates.unsqueeze(1).expand(-1, max_arg_num, -1)
dense_srl_labels = self.get_dense_span_labels(gold_arg_starts,
gold_arg_ends,
gold_arg_labels,
max_sentence_length, span_parents=gold_predicates) # ans
srl_labels = self.gather_4d(dense_srl_labels,
[sentence_indices_2d, expanded_arg_starts, expanded_arg_ends, expanded_predicates])
return srl_labels
def get_srl_unary_scores(self, span_emb):
input = span_emb
for i, ffnn in enumerate(self.srl_unary_score_layers):
input = F.relu(ffnn.forward(input))
input = self.srl_dropout_layers[i].forward(input)
output = self.srl_unary_score_projection.forward(input)
return output
def get_srl_scores(self, arg_emb, pred_emb, arg_scores, pred_scores, num_labels, config, dropout):
num_sentences = arg_emb.size()[0]
num_args = arg_emb.size()[1] # [batch_size, max_arg_num, arg_emb_size]
num_preds = pred_emb.size()[1] # [batch_size, max_pred_num, pred_emb_size]
unsqueezed_arg_emb = arg_emb.unsqueeze(2)
unsqueezed_pred_emb = pred_emb.unsqueeze(1)
expanded_arg_emb = unsqueezed_arg_emb.expand(-1, -1, num_preds, -1)
expanded_pred_emb = unsqueezed_pred_emb.expand(-1, num_args, -1, -1)
pair_emb_list = [expanded_arg_emb, expanded_pred_emb]
pair_emb = torch.cat(pair_emb_list, 3) # concatenate the argument emb and pre emb
pair_emb_size = pair_emb.size()[3]
flat_pair_emb = pair_emb.view(num_sentences * num_args * num_preds, pair_emb_size)
# get unary scores
flat_srl_scores = self.get_srl_unary_scores(flat_pair_emb)
srl_scores = flat_srl_scores.view(num_sentences, num_args, num_preds, -1)
if self.config.use_biaffine:
srl_scores += self.biaffine(arg_emb, self.predicate_scale(pred_emb)).permute([0, 2, 3, 1])
unsqueezed_arg_scores, unsqueezed_pred_scores = \
arg_scores.unsqueeze(2).unsqueeze(3), pred_scores.unsqueeze(1).unsqueeze(3)
srl_scores = srl_scores + unsqueezed_arg_scores + unsqueezed_pred_scores
dummy_scores = torch.zeros([num_sentences, num_args, num_preds, 1], device=arg_emb.device)
srl_scores = torch.cat([dummy_scores, srl_scores], 3)
return srl_scores
def get_srl_softmax_loss(self, srl_scores, srl_labels, num_predicted_args, num_predicted_preds):
srl_loss_mask = self.get_srl_loss_mask(srl_scores, num_predicted_args, num_predicted_preds)
loss = torch.nn.functional.cross_entropy(srl_scores[srl_loss_mask], srl_labels[srl_loss_mask],
reduction=self.loss_reduction)
return loss, srl_loss_mask
def get_srl_loss_mask(self, srl_scores, num_predicted_args, num_predicted_preds):
max_num_arg = srl_scores.size()[1]
max_num_pred = srl_scores.size()[2]
# num_predicted_args, 1D tensor; max_num_arg: a int variable means the gold ans's max arg number
args_mask = util.lengths_to_mask(num_predicted_args, max_num_arg)
pred_mask = util.lengths_to_mask(num_predicted_preds, max_num_pred)
srl_loss_mask = args_mask.unsqueeze(2) & pred_mask.unsqueeze(1)
return srl_loss_mask
def decode(self, contextualized_embeddings, sent_lengths, masks, gold_arg_starts, gold_arg_ends, gold_arg_labels,
gold_predicates):
num_sentences, max_sent_length = masks.size()
device = sent_lengths.device
"""generate candidate spans with argument pruning"""
# candidate_starts [num_sentences, max_sent_length * max_arg_width]
candidate_starts, candidate_ends, candidate_mask = self.get_candidate_spans(
sent_lengths, max_sent_length, self.config.max_arg_width)
flatted_candidate_mask = candidate_mask.view(-1)
batch_word_offset = self.exclusive_cumsum(sent_lengths) # get the word offset in a batch
# choose the flatted_candidate_starts with the actual existing positions, i.e. exclude the illegal starts
flatted_candidate_starts = candidate_starts + batch_word_offset
flatted_candidate_starts = flatted_candidate_starts.view(-1)[flatted_candidate_mask].to(torch.long)
flatted_candidate_ends = candidate_ends + batch_word_offset
flatted_candidate_ends = flatted_candidate_ends.view(-1)[flatted_candidate_mask].to(torch.long)
# flatten the lstm output according to the sentence mask, i.e. exclude the illegal (padding) lstm output
flatted_context_output = self.flatten_emb_in_sentence(contextualized_embeddings, masks)
"""generate the span embedding"""
candidate_span_emb, head_scores, span_head_emb, head_indices, head_indices_log_mask = self.get_span_emb(
flatted_context_output, flatted_candidate_starts, flatted_candidate_ends,
self.config, dropout=self.dropout)
"""Get the span ids"""
candidate_span_number = candidate_span_emb.size()[0]
max_candidate_spans_num_per_sentence = candidate_mask.size()[1]
sparse_indices = candidate_mask.nonzero(as_tuple=False)
sparse_values = torch.arange(0, candidate_span_number, device=device)
candidate_span_ids = torch.sparse.FloatTensor(sparse_indices.t(), sparse_values,
torch.Size([num_sentences,
max_candidate_spans_num_per_sentence])).to_dense()
spans_log_mask = torch.log(candidate_mask.to(torch.float))
predict_dict = {"candidate_starts": candidate_starts, "candidate_ends": candidate_ends,
'candidate_arg_mask': candidate_mask, "head_scores": head_scores}
"""Get unary scores and topk of candidate argument spans."""
flatted_candidate_arg_scores = self.get_arg_unary_scores(candidate_span_emb)
candidate_arg_scores = flatted_candidate_arg_scores.index_select(0, candidate_span_ids.view(-1)) \
.view(candidate_span_ids.size()[0], candidate_span_ids.size()[1])
candidate_arg_scores = candidate_arg_scores + spans_log_mask
arg_starts, arg_ends, arg_scores, num_args, top_arg_indices = \
self.get_batch_topk(candidate_starts, candidate_ends, candidate_arg_scores,
self.config.argument_ratio, sent_lengths, max_sent_length,
sort_spans=False, enforce_non_crossing=False)
"""Get the candidate predicate"""
candidate_pred_ids = torch.arange(0, max_sent_length, device=device).unsqueeze(0).expand(num_sentences, -1)
candidate_pred_emb = contextualized_embeddings
candidate_pred_scores = self.get_pred_unary_scores(candidate_pred_emb)
candidate_pred_scores = candidate_pred_scores + torch.log(masks.to(torch.float).unsqueeze(2))
candidate_pred_scores = candidate_pred_scores.squeeze(2)
if self.use_gold_predicates is True:
predicates = gold_predicates[0]
num_preds = gold_predicates[1]
pred_scores = torch.zeros_like(predicates)
top_pred_indices = predicates
else:
predicates, _, pred_scores, num_preds, top_pred_indices = self.get_batch_topk(
candidate_pred_ids, candidate_pred_ids, candidate_pred_scores, self.config.predicate_ratio,
sent_lengths, max_sent_length,
sort_spans=False, enforce_non_crossing=False)
"""Get top arg embeddings"""
arg_span_indices = torch.gather(candidate_span_ids, 1, top_arg_indices) # [num_sentences, max_num_args]
arg_emb = candidate_span_emb.index_select(0, arg_span_indices.view(-1)).view(
arg_span_indices.size()[0], arg_span_indices.size()[1], -1
) # [num_sentences, max_num_args, emb]
"""Get top predicate embeddings"""
pred_emb = self.batch_index_select(candidate_pred_emb,
top_pred_indices) # [num_sentences, max_num_preds, emb]
"""Get the srl scores according to the arg emb and pre emb."""
srl_scores = self.get_srl_scores(arg_emb, pred_emb, arg_scores, pred_scores, self.label_space_size, self.config,
self.dropout) # [num_sentences, max_num_args, max_num_preds, num_labels]
if gold_arg_labels is not None:
"""Get the answers according to the labels"""
srl_labels = self.get_srl_labels(arg_starts, arg_ends, predicates, gold_predicates, gold_arg_starts,
gold_arg_ends, gold_arg_labels, max_sent_length)
"""Compute the srl loss"""
srl_loss, srl_mask = self.get_srl_softmax_loss(srl_scores, srl_labels, num_args, num_preds)
predict_dict.update({
'srl_mask': srl_mask,
'loss': srl_loss
})
else:
predict_dict['srl_mask'] = self.get_srl_loss_mask(srl_scores, num_args, num_preds)
predict_dict.update({
"candidate_arg_scores": candidate_arg_scores,
"candidate_pred_scores": candidate_pred_scores,
"predicates": predicates,
"arg_starts": arg_starts,
"arg_ends": arg_ends,
"arg_scores": arg_scores,
"pred_scores": pred_scores,
"num_args": num_args,
"num_preds": num_preds,
"arg_labels": torch.max(srl_scores, 1)[1], # [num_sentences, num_args, num_preds]
"srl_scores": srl_scores,
})
return predict_dict
class SpanRankingSRLModel(nn.Module):
def __init__(self, config, embed: torch.nn.Module, context_layer: torch.nn.Module, label_space_size):
super(SpanRankingSRLModel, self).__init__()
self.config = config
self.dropout = float(config.dropout)
self.lexical_dropout = float(self.config.lexical_dropout)
self.label_space_size = label_space_size
# Initialize layers and parameters
self.word_embedding_dim = embed.get_output_dim() # get the embedding dim
self.embed = embed
# Initialize context layer
self.context_layer = context_layer
context_layer_output_dim = context_layer.get_output_dim()
self.decoder = SpanRankingSRLDecoder(context_layer_output_dim, label_space_size, config)
def forward(self,
batch: Dict[str, torch.Tensor]
):
gold_arg_ends, gold_arg_labels, gold_arg_starts, gold_predicates, masks, sent_lengths = \
self.unpack(batch, training=self.training)
context_embeddings = self.embed(batch)
context_embeddings = F.dropout(context_embeddings, self.lexical_dropout, self.training)
contextualized_embeddings = self.context_layer(context_embeddings, masks)
return self.decoder.decode(contextualized_embeddings, sent_lengths, masks, gold_arg_starts, gold_arg_ends,
gold_arg_labels, gold_predicates)
@staticmethod
def unpack(batch, mask=None, training=False):
keys = 'token_length', 'predicate_offset', 'argument_begin_offset', 'argument_end_offset', 'srl_label_id'
sent_lengths, gold_predicates, gold_arg_starts, gold_arg_ends, gold_arg_labels = [batch.get(k, None) for k in
keys]
if mask is None:
mask = util.lengths_to_mask(sent_lengths)
# elif not training:
# sent_lengths = mask.sum(dim=1)
return gold_arg_ends, gold_arg_labels, gold_arg_starts, gold_predicates, mask, sent_lengths
| 55.231537 | 123 | 0.668317 | from typing import Dict
from alnlp.modules.feedforward import FeedForward
from alnlp.modules.time_distributed import TimeDistributed
from .highway_variational_lstm import *
import torch
from alnlp.modules import util
from ...parsers.biaffine.biaffine import Biaffine
def initializer_1d(input_tensor, initializer):
assert len(input_tensor.size()) == 1
input_tensor = input_tensor.view(-1, 1)
input_tensor = initializer(input_tensor)
return input_tensor.view(-1)
class SpanRankingSRLDecoder(nn.Module):
def __init__(self, context_layer_output_dim, label_space_size, config) -> None:
super().__init__()
self.config = config
self.label_space_size = label_space_size
self.dropout = float(config.dropout)
self.use_gold_predicates = config.use_gold_predicates
self.span_width_embedding = nn.Embedding(self.config.max_arg_width, self.config.span_width_feature_size)
self.span_emb_size = 3 * context_layer_output_dim + self.config.span_width_feature_size
self.arg_unary_score_layers = nn.ModuleList([nn.Linear(self.span_emb_size, self.config.ffnn_size) if i == 0
else nn.Linear(self.config.ffnn_size, self.config.ffnn_size) for i
in range(self.config.ffnn_depth)])
self.arg_dropout_layers = nn.ModuleList([nn.Dropout(self.dropout) for _ in range(self.config.ffnn_depth)])
self.arg_unary_score_projection = nn.Linear(self.config.ffnn_size, 1)
self.pred_unary_score_layers = nn.ModuleList(
[nn.Linear(context_layer_output_dim, self.config.ffnn_size) if i == 0
else nn.Linear(self.config.ffnn_size, self.config.ffnn_size) for i
in range(self.config.ffnn_depth)])
self.pred_dropout_layers = nn.ModuleList([nn.Dropout(self.dropout) for _ in range(self.config.ffnn_depth)])
self.pred_unary_score_projection = nn.Linear(self.config.ffnn_size, 1)
self.srl_unary_score_input_size = self.span_emb_size + context_layer_output_dim
self.srl_unary_score_layers = nn.ModuleList([nn.Linear(self.srl_unary_score_input_size, self.config.ffnn_size)
if i == 0 else nn.Linear(self.config.ffnn_size,
self.config.ffnn_size)
for i in range(self.config.ffnn_depth)])
self.srl_dropout_layers = nn.ModuleList([nn.Dropout(self.dropout) for _ in range(self.config.ffnn_depth)])
self.srl_unary_score_projection = nn.Linear(self.config.ffnn_size, self.label_space_size - 1)
if config.use_biaffine:
self.predicate_scale = TimeDistributed(FeedForward(context_layer_output_dim, 1, self.span_emb_size, 'ReLU'))
self.biaffine = Biaffine(self.span_emb_size, self.label_space_size - 1)
self.loss_reduction = config.loss_reduction
self.reset_parameters()
def reset_parameters(self):
init.xavier_uniform_(self.span_width_embedding.weight)
for layer in self.arg_unary_score_layers:
init.xavier_uniform_(layer.weight)
initializer_1d(layer.bias, init.xavier_uniform_)
init.xavier_uniform_(self.arg_unary_score_projection.weight)
initializer_1d(self.arg_unary_score_projection.bias, init.xavier_uniform_)
for layer in self.pred_unary_score_layers:
init.xavier_uniform_(layer.weight)
initializer_1d(layer.bias, init.xavier_uniform_)
init.xavier_uniform_(self.pred_unary_score_projection.weight)
initializer_1d(self.pred_unary_score_projection.bias, init.xavier_uniform_)
for layer in self.srl_unary_score_layers:
init.xavier_uniform_(layer.weight)
initializer_1d(layer.bias, init.xavier_uniform_)
init.xavier_uniform_(self.srl_unary_score_projection.weight)
initializer_1d(self.srl_unary_score_projection.bias, init.xavier_uniform_)
return None
def forward(self, hidden_states, batch, mask=None):
gold_arg_ends, gold_arg_labels, gold_arg_starts, gold_predicates, masks, sent_lengths = SpanRankingSRLModel.unpack(
batch, mask=mask, training=self.training)
return self.decode(hidden_states, sent_lengths, masks, gold_arg_starts, gold_arg_ends, gold_arg_labels,
gold_predicates)
@staticmethod
def get_candidate_spans(sent_lengths: torch.Tensor, max_sent_length, max_arg_width):
num_sentences = len(sent_lengths)
device = sent_lengths.device
candidate_starts = torch.arange(0, max_sent_length, device=device).expand(num_sentences, max_arg_width, -1)
candidate_width = torch.arange(0, max_arg_width, device=device).view(1, -1, 1)
candidate_ends = candidate_starts + candidate_width
candidate_starts = candidate_starts.contiguous().view(num_sentences, max_sent_length * max_arg_width)
candidate_ends = candidate_ends.contiguous().view(num_sentences, max_sent_length * max_arg_width)
actual_sent_lengths = sent_lengths.view(-1, 1).expand(-1, max_sent_length * max_arg_width)
candidate_mask = candidate_ends < actual_sent_lengths
candidate_starts = candidate_starts * candidate_mask
candidate_ends = candidate_ends * candidate_mask
return candidate_starts, candidate_ends, candidate_mask
@staticmethod
def exclusive_cumsum(input: torch.Tensor, exclusive=True):
assert exclusive is True
if exclusive is True:
exclusive_sent_lengths = input.new_zeros(1, dtype=torch.long)
result = torch.cumsum(torch.cat([exclusive_sent_lengths, input], 0)[:-1], 0).view(-1, 1)
else:
result = torch.cumsum(input, 0).view(-1, 1)
return result
def flatten_emb(self, emb):
num_sentences, max_sentence_length = emb.size()[0], emb.size()[1]
assert len(emb.size()) == 3
flatted_emb = emb.contiguous().view(num_sentences * max_sentence_length, -1)
return flatted_emb
def flatten_emb_in_sentence(self, emb, batch_sentences_mask):
num_sentences, max_sentence_length = emb.size()[0], emb.size()[1]
flatted_emb = self.flatten_emb(emb)
return flatted_emb[batch_sentences_mask.reshape(num_sentences * max_sentence_length)]
def get_span_emb(self, flatted_context_emb, flatted_candidate_starts, flatted_candidate_ends,
config, dropout=0.0):
batch_word_num = flatted_context_emb.size()[0]
span_start_emb = flatted_context_emb[flatted_candidate_starts]
span_end_emb = flatted_context_emb[flatted_candidate_ends]
span_emb_feature_list = [span_start_emb, span_end_emb]
span_width = 1 + flatted_candidate_ends - flatted_candidate_starts
max_arg_width = config.max_arg_width
span_width_index = span_width - 1
span_width_emb = self.span_width_embedding(span_width_index)
span_width_emb = F.dropout(span_width_emb, dropout, self.training)
span_emb_feature_list.append(span_width_emb)
cpu_flatted_candidte_starts = flatted_candidate_starts
span_indices = torch.arange(0, max_arg_width, device=flatted_context_emb.device).view(1, -1) + \
cpu_flatted_candidte_starts.view(-1, 1)
span_indices = torch.clamp(span_indices, max=batch_word_num - 1)
num_spans, spans_width = span_indices.size()[0], span_indices.size()[1]
flatted_span_indices = span_indices.view(-1)
flatted_span_indices = flatted_span_indices
span_text_emb = flatted_context_emb.index_select(0, flatted_span_indices).view(num_spans, spans_width, -1)
span_indices_mask = util.lengths_to_mask(span_width, max_len=max_arg_width)
_text_emb = span_text_emb * span_indices_mask.unsqueeze(2).expand(-1, -1, span_text_emb.size()[-1])
span_head_emb = torch.mean(span_text_emb, 1)
span_emb_feature_list.append(span_head_emb)
span_emb = torch.cat(span_emb_feature_list, 1)
return span_emb, None, span_text_emb, span_indices, span_indices_mask
def get_arg_unary_scores(self, span_emb):
input = span_emb
for i, ffnn in enumerate(self.arg_unary_score_layers):
input = F.relu(ffnn.forward(input))
input = self.arg_dropout_layers[i].forward(input)
output = self.arg_unary_score_projection.forward(input)
return output
def get_pred_unary_scores(self, span_emb):
input = span_emb
for i, ffnn in enumerate(self.pred_unary_score_layers):
input = F.relu(ffnn.forward(input))
input = self.pred_dropout_layers[i].forward(input)
output = self.pred_unary_score_projection.forward(input)
return output
def extract_spans(self, candidate_scores, candidate_starts, candidate_ends, topk, max_sentence_length,
sort_spans, enforce_non_crossing):
max_num_output_spans = int(torch.max(topk))
indices = [score.topk(k)[1] for score, k in zip(candidate_scores, topk)]
output_span_indices_tensor = [F.pad(item, [0, max_num_output_spans - item.size()[0]], value=item[-1])
for item in indices]
output_span_indices_tensor = torch.stack(output_span_indices_tensor)
return output_span_indices_tensor
def batch_index_select(self, emb, indices):
num_sentences = emb.size()[0]
max_sent_length = emb.size()[1]
flatten_emb = self.flatten_emb(emb)
offset = (torch.arange(0, num_sentences, device=emb.device) * max_sent_length).unsqueeze(1)
return torch.index_select(flatten_emb, 0, (indices + offset).view(-1)) \
.view(indices.size()[0], indices.size()[1], -1)
def get_batch_topk(self, candidate_starts: torch.Tensor, candidate_ends, candidate_scores, topk_ratio, text_len,
max_sentence_length, sort_spans=False, enforce_non_crossing=True):
num_sentences = candidate_starts.size()[0]
max_sentence_length = candidate_starts.size()[1]
topk = torch.floor(text_len.to(torch.float) * topk_ratio).to(torch.long)
topk = torch.max(topk, torch.ones(num_sentences, device=candidate_starts.device, dtype=torch.long))
predicted_indices = self.extract_spans(candidate_scores, candidate_starts, candidate_ends, topk,
max_sentence_length, sort_spans, enforce_non_crossing)
predicted_starts = torch.gather(candidate_starts, 1, predicted_indices)
predicted_ends = torch.gather(candidate_ends, 1, predicted_indices)
predicted_scores = torch.gather(candidate_scores, 1, predicted_indices)
return predicted_starts, predicted_ends, predicted_scores, topk, predicted_indices
def get_dense_span_labels(self, span_starts, span_ends, span_labels, max_sentence_length,
span_parents=None):
num_sentences = span_starts.size()[0]
max_spans_num = span_starts.size()[1]
span_starts[(span_labels == 0) & (span_starts < max_sentence_length - 1)] += 1
sentence_indices = torch.arange(0, num_sentences, device=span_starts.device).unsqueeze(1).expand(-1,
max_spans_num)
sparse_indices = torch.cat([sentence_indices.unsqueeze(2), span_starts.unsqueeze(2), span_ends.unsqueeze(2)],
dim=2)
if span_parents is not None:
sparse_indices = torch.cat([sparse_indices, span_parents.unsqueeze(2)], 2)
rank = 3 if span_parents is None else 4
dense_labels = torch.sparse.LongTensor(sparse_indices.view(num_sentences * max_spans_num, rank).t(),
span_labels.view(-1),
torch.Size([num_sentences] + [max_sentence_length] * (rank - 1))) \
.to_dense()
return dense_labels
@staticmethod
def gather_4d(params, indices):
assert len(params.size()) == 4 and len(indices) == 4
indices_a, indices_b, indices_c, indices_d = indices
result = params[indices_a, indices_b, indices_c, indices_d]
return result
def get_srl_labels(self,
arg_starts,
arg_ends,
predicates,
gold_predicates,
gold_arg_starts,
gold_arg_ends,
gold_arg_labels,
max_sentence_length
):
num_sentences = arg_starts.size()[0]
max_arg_num = arg_starts.size()[1]
max_pred_num = predicates.size()[1]
sentence_indices_2d = torch.arange(0, num_sentences, device=arg_starts.device).unsqueeze(1).unsqueeze(2).expand(
-1, max_arg_num, max_pred_num)
expanded_arg_starts = arg_starts.unsqueeze(2).expand(-1, -1, max_pred_num)
expanded_arg_ends = arg_ends.unsqueeze(2).expand(-1, -1, max_pred_num)
expanded_predicates = predicates.unsqueeze(1).expand(-1, max_arg_num, -1)
dense_srl_labels = self.get_dense_span_labels(gold_arg_starts,
gold_arg_ends,
gold_arg_labels,
max_sentence_length, span_parents=gold_predicates)
srl_labels = self.gather_4d(dense_srl_labels,
[sentence_indices_2d, expanded_arg_starts, expanded_arg_ends, expanded_predicates])
return srl_labels
def get_srl_unary_scores(self, span_emb):
input = span_emb
for i, ffnn in enumerate(self.srl_unary_score_layers):
input = F.relu(ffnn.forward(input))
input = self.srl_dropout_layers[i].forward(input)
output = self.srl_unary_score_projection.forward(input)
return output
def get_srl_scores(self, arg_emb, pred_emb, arg_scores, pred_scores, num_labels, config, dropout):
num_sentences = arg_emb.size()[0]
num_args = arg_emb.size()[1]
num_preds = pred_emb.size()[1]
unsqueezed_arg_emb = arg_emb.unsqueeze(2)
unsqueezed_pred_emb = pred_emb.unsqueeze(1)
expanded_arg_emb = unsqueezed_arg_emb.expand(-1, -1, num_preds, -1)
expanded_pred_emb = unsqueezed_pred_emb.expand(-1, num_args, -1, -1)
pair_emb_list = [expanded_arg_emb, expanded_pred_emb]
pair_emb = torch.cat(pair_emb_list, 3)
pair_emb_size = pair_emb.size()[3]
flat_pair_emb = pair_emb.view(num_sentences * num_args * num_preds, pair_emb_size)
flat_srl_scores = self.get_srl_unary_scores(flat_pair_emb)
srl_scores = flat_srl_scores.view(num_sentences, num_args, num_preds, -1)
if self.config.use_biaffine:
srl_scores += self.biaffine(arg_emb, self.predicate_scale(pred_emb)).permute([0, 2, 3, 1])
unsqueezed_arg_scores, unsqueezed_pred_scores = \
arg_scores.unsqueeze(2).unsqueeze(3), pred_scores.unsqueeze(1).unsqueeze(3)
srl_scores = srl_scores + unsqueezed_arg_scores + unsqueezed_pred_scores
dummy_scores = torch.zeros([num_sentences, num_args, num_preds, 1], device=arg_emb.device)
srl_scores = torch.cat([dummy_scores, srl_scores], 3)
return srl_scores
def get_srl_softmax_loss(self, srl_scores, srl_labels, num_predicted_args, num_predicted_preds):
srl_loss_mask = self.get_srl_loss_mask(srl_scores, num_predicted_args, num_predicted_preds)
loss = torch.nn.functional.cross_entropy(srl_scores[srl_loss_mask], srl_labels[srl_loss_mask],
reduction=self.loss_reduction)
return loss, srl_loss_mask
def get_srl_loss_mask(self, srl_scores, num_predicted_args, num_predicted_preds):
max_num_arg = srl_scores.size()[1]
max_num_pred = srl_scores.size()[2]
args_mask = util.lengths_to_mask(num_predicted_args, max_num_arg)
pred_mask = util.lengths_to_mask(num_predicted_preds, max_num_pred)
srl_loss_mask = args_mask.unsqueeze(2) & pred_mask.unsqueeze(1)
return srl_loss_mask
def decode(self, contextualized_embeddings, sent_lengths, masks, gold_arg_starts, gold_arg_ends, gold_arg_labels,
gold_predicates):
num_sentences, max_sent_length = masks.size()
device = sent_lengths.device
# candidate_starts [num_sentences, max_sent_length * max_arg_width]
candidate_starts, candidate_ends, candidate_mask = self.get_candidate_spans(
sent_lengths, max_sent_length, self.config.max_arg_width)
flatted_candidate_mask = candidate_mask.view(-1)
batch_word_offset = self.exclusive_cumsum(sent_lengths) # get the word offset in a batch
# choose the flatted_candidate_starts with the actual existing positions, i.e. exclude the illegal starts
flatted_candidate_starts = candidate_starts + batch_word_offset
flatted_candidate_starts = flatted_candidate_starts.view(-1)[flatted_candidate_mask].to(torch.long)
flatted_candidate_ends = candidate_ends + batch_word_offset
flatted_candidate_ends = flatted_candidate_ends.view(-1)[flatted_candidate_mask].to(torch.long)
# flatten the lstm output according to the sentence mask, i.e. exclude the illegal (padding) lstm output
flatted_context_output = self.flatten_emb_in_sentence(contextualized_embeddings, masks)
candidate_span_emb, head_scores, span_head_emb, head_indices, head_indices_log_mask = self.get_span_emb(
flatted_context_output, flatted_candidate_starts, flatted_candidate_ends,
self.config, dropout=self.dropout)
candidate_span_number = candidate_span_emb.size()[0]
max_candidate_spans_num_per_sentence = candidate_mask.size()[1]
sparse_indices = candidate_mask.nonzero(as_tuple=False)
sparse_values = torch.arange(0, candidate_span_number, device=device)
candidate_span_ids = torch.sparse.FloatTensor(sparse_indices.t(), sparse_values,
torch.Size([num_sentences,
max_candidate_spans_num_per_sentence])).to_dense()
spans_log_mask = torch.log(candidate_mask.to(torch.float))
predict_dict = {"candidate_starts": candidate_starts, "candidate_ends": candidate_ends,
'candidate_arg_mask': candidate_mask, "head_scores": head_scores}
flatted_candidate_arg_scores = self.get_arg_unary_scores(candidate_span_emb)
candidate_arg_scores = flatted_candidate_arg_scores.index_select(0, candidate_span_ids.view(-1)) \
.view(candidate_span_ids.size()[0], candidate_span_ids.size()[1])
candidate_arg_scores = candidate_arg_scores + spans_log_mask
arg_starts, arg_ends, arg_scores, num_args, top_arg_indices = \
self.get_batch_topk(candidate_starts, candidate_ends, candidate_arg_scores,
self.config.argument_ratio, sent_lengths, max_sent_length,
sort_spans=False, enforce_non_crossing=False)
candidate_pred_ids = torch.arange(0, max_sent_length, device=device).unsqueeze(0).expand(num_sentences, -1)
candidate_pred_emb = contextualized_embeddings
candidate_pred_scores = self.get_pred_unary_scores(candidate_pred_emb)
candidate_pred_scores = candidate_pred_scores + torch.log(masks.to(torch.float).unsqueeze(2))
candidate_pred_scores = candidate_pred_scores.squeeze(2)
if self.use_gold_predicates is True:
predicates = gold_predicates[0]
num_preds = gold_predicates[1]
pred_scores = torch.zeros_like(predicates)
top_pred_indices = predicates
else:
predicates, _, pred_scores, num_preds, top_pred_indices = self.get_batch_topk(
candidate_pred_ids, candidate_pred_ids, candidate_pred_scores, self.config.predicate_ratio,
sent_lengths, max_sent_length,
sort_spans=False, enforce_non_crossing=False)
arg_span_indices = torch.gather(candidate_span_ids, 1, top_arg_indices) # [num_sentences, max_num_args]
arg_emb = candidate_span_emb.index_select(0, arg_span_indices.view(-1)).view(
arg_span_indices.size()[0], arg_span_indices.size()[1], -1
) # [num_sentences, max_num_args, emb]
pred_emb = self.batch_index_select(candidate_pred_emb,
top_pred_indices) # [num_sentences, max_num_preds, emb]
srl_scores = self.get_srl_scores(arg_emb, pred_emb, arg_scores, pred_scores, self.label_space_size, self.config,
self.dropout) # [num_sentences, max_num_args, max_num_preds, num_labels]
if gold_arg_labels is not None:
srl_labels = self.get_srl_labels(arg_starts, arg_ends, predicates, gold_predicates, gold_arg_starts,
gold_arg_ends, gold_arg_labels, max_sent_length)
srl_loss, srl_mask = self.get_srl_softmax_loss(srl_scores, srl_labels, num_args, num_preds)
predict_dict.update({
'srl_mask': srl_mask,
'loss': srl_loss
})
else:
predict_dict['srl_mask'] = self.get_srl_loss_mask(srl_scores, num_args, num_preds)
predict_dict.update({
"candidate_arg_scores": candidate_arg_scores,
"candidate_pred_scores": candidate_pred_scores,
"predicates": predicates,
"arg_starts": arg_starts,
"arg_ends": arg_ends,
"arg_scores": arg_scores,
"pred_scores": pred_scores,
"num_args": num_args,
"num_preds": num_preds,
"arg_labels": torch.max(srl_scores, 1)[1], # [num_sentences, num_args, num_preds]
"srl_scores": srl_scores,
})
return predict_dict
class SpanRankingSRLModel(nn.Module):
def __init__(self, config, embed: torch.nn.Module, context_layer: torch.nn.Module, label_space_size):
super(SpanRankingSRLModel, self).__init__()
self.config = config
self.dropout = float(config.dropout)
self.lexical_dropout = float(self.config.lexical_dropout)
self.label_space_size = label_space_size
# Initialize layers and parameters
self.word_embedding_dim = embed.get_output_dim() # get the embedding dim
self.embed = embed
# Initialize context layer
self.context_layer = context_layer
context_layer_output_dim = context_layer.get_output_dim()
self.decoder = SpanRankingSRLDecoder(context_layer_output_dim, label_space_size, config)
def forward(self,
batch: Dict[str, torch.Tensor]
):
gold_arg_ends, gold_arg_labels, gold_arg_starts, gold_predicates, masks, sent_lengths = \
self.unpack(batch, training=self.training)
context_embeddings = self.embed(batch)
context_embeddings = F.dropout(context_embeddings, self.lexical_dropout, self.training)
contextualized_embeddings = self.context_layer(context_embeddings, masks)
return self.decoder.decode(contextualized_embeddings, sent_lengths, masks, gold_arg_starts, gold_arg_ends,
gold_arg_labels, gold_predicates)
@staticmethod
def unpack(batch, mask=None, training=False):
keys = 'token_length', 'predicate_offset', 'argument_begin_offset', 'argument_end_offset', 'srl_label_id'
sent_lengths, gold_predicates, gold_arg_starts, gold_arg_ends, gold_arg_labels = [batch.get(k, None) for k in
keys]
if mask is None:
mask = util.lengths_to_mask(sent_lengths)
# elif not training:
# sent_lengths = mask.sum(dim=1)
return gold_arg_ends, gold_arg_labels, gold_arg_starts, gold_predicates, mask, sent_lengths
| true | true |
f73327804f950cdb77bf55735456e5bd94c31f50 | 3,701 | py | Python | app/api/copytter/migrations/0002_entry_follow_media_profile.py | T-8723/copytter | bf27545a010d5fd1e17a38b10adddd22858cbcea | [
"MIT"
] | null | null | null | app/api/copytter/migrations/0002_entry_follow_media_profile.py | T-8723/copytter | bf27545a010d5fd1e17a38b10adddd22858cbcea | [
"MIT"
] | null | null | null | app/api/copytter/migrations/0002_entry_follow_media_profile.py | T-8723/copytter | bf27545a010d5fd1e17a38b10adddd22858cbcea | [
"MIT"
] | null | null | null | # Generated by Django 3.2.9 on 2021-11-25 03:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('copytter', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gender', models.CharField(blank=True, max_length=20)),
('birth_date', models.DateField(blank=True, null=True)),
('location', models.CharField(blank=True, max_length=30)),
('age', models.IntegerField(blank=True, default=0)),
('icon_pass', models.ImageField(blank=True, upload_to='images/')),
('profile_message', models.TextField(blank=True, max_length=300)),
('status', models.CharField(choices=[('machine', 'BOT'), ('public', '一般'), ('block', '凍結'), ('close', '非公開'), ('official', '公式')], default='publish', max_length=10)),
('profile_user_id', models.CharField(default='riqURB89q4', max_length=32, unique=True)),
('sensitive_entry', models.BooleanField(default=False)),
('follow_count', models.IntegerField(default=0)),
('follower_count', models.IntegerField(default=0)),
('profile_first_registed', models.BooleanField(default=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Media',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('media_type', models.CharField(max_length=8)),
('media_url', models.URLField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Follow',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('follow_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='follow_user', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Entry',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField(max_length=300)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('draft', '下書き'), ('public', '公開中'), ('close', '非公開'), ('machine', 'BOT')], default='draft', max_length=8)),
('relation_id', models.CharField(max_length=8)),
('relation_cont', models.IntegerField(default=0)),
('like_count', models.IntegerField(default=0)),
('media_close', models.BooleanField(default=False)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 52.871429 | 182 | 0.598757 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('copytter', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gender', models.CharField(blank=True, max_length=20)),
('birth_date', models.DateField(blank=True, null=True)),
('location', models.CharField(blank=True, max_length=30)),
('age', models.IntegerField(blank=True, default=0)),
('icon_pass', models.ImageField(blank=True, upload_to='images/')),
('profile_message', models.TextField(blank=True, max_length=300)),
('status', models.CharField(choices=[('machine', 'BOT'), ('public', '一般'), ('block', '凍結'), ('close', '非公開'), ('official', '公式')], default='publish', max_length=10)),
('profile_user_id', models.CharField(default='riqURB89q4', max_length=32, unique=True)),
('sensitive_entry', models.BooleanField(default=False)),
('follow_count', models.IntegerField(default=0)),
('follower_count', models.IntegerField(default=0)),
('profile_first_registed', models.BooleanField(default=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Media',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('media_type', models.CharField(max_length=8)),
('media_url', models.URLField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Follow',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('follow_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='follow_user', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Entry',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField(max_length=300)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('draft', '下書き'), ('public', '公開中'), ('close', '非公開'), ('machine', 'BOT')], default='draft', max_length=8)),
('relation_id', models.CharField(max_length=8)),
('relation_cont', models.IntegerField(default=0)),
('like_count', models.IntegerField(default=0)),
('media_close', models.BooleanField(default=False)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f73327d5e75e9c13d3934310bd44d2656ccf8cef | 332,816 | py | Python | kubernetes_asyncio/client/api/rbac_authorization_v1alpha1_api.py | opsani/kubernetes_asyncio | 55283bf6f3690e5c0a0c589cd752221511e2be51 | [
"Apache-2.0"
] | 196 | 2018-05-23T16:55:41.000Z | 2022-03-31T10:09:40.000Z | kubernetes_asyncio/client/api/rbac_authorization_v1alpha1_api.py | tomplus/kubernetes_asyncio | e8c8686ec11be3a5295ae9d5d8728299492a61f8 | [
"Apache-2.0"
] | 164 | 2018-05-20T20:39:03.000Z | 2022-03-29T22:57:04.000Z | kubernetes_asyncio/client/api/rbac_authorization_v1alpha1_api.py | opsani/kubernetes_asyncio | 55283bf6f3690e5c0a0c589cd752221511e2be51 | [
"Apache-2.0"
] | 41 | 2018-06-08T00:39:53.000Z | 2022-01-12T18:19:06.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.18.20
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from kubernetes_asyncio.client.api_client import ApiClient
from kubernetes_asyncio.client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class RbacAuthorizationV1alpha1Api(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_cluster_role(self, body, **kwargs): # noqa: E501
"""create_cluster_role # noqa: E501
create a ClusterRole # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_cluster_role(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1alpha1ClusterRole body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1ClusterRole
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_cluster_role_with_http_info(body, **kwargs) # noqa: E501
def create_cluster_role_with_http_info(self, body, **kwargs): # noqa: E501
"""create_cluster_role # noqa: E501
create a ClusterRole # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_cluster_role_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1alpha1ClusterRole body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1ClusterRole, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_cluster_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_cluster_role`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterroles', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1ClusterRole', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_cluster_role_binding(self, body, **kwargs): # noqa: E501
"""create_cluster_role_binding # noqa: E501
create a ClusterRoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_cluster_role_binding(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1alpha1ClusterRoleBinding body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1ClusterRoleBinding
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_cluster_role_binding_with_http_info(body, **kwargs) # noqa: E501
def create_cluster_role_binding_with_http_info(self, body, **kwargs): # noqa: E501
"""create_cluster_role_binding # noqa: E501
create a ClusterRoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_cluster_role_binding_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1alpha1ClusterRoleBinding body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1ClusterRoleBinding, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_cluster_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_cluster_role_binding`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterrolebindings', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1ClusterRoleBinding', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_role(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_role # noqa: E501
create a Role # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_role(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1alpha1Role body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1Role
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_namespaced_role_with_http_info(namespace, body, **kwargs) # noqa: E501
def create_namespaced_role_with_http_info(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_role # noqa: E501
create a Role # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_role_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1alpha1Role body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1Role, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_role`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_role`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/roles', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1Role', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_role_binding(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_role_binding # noqa: E501
create a RoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_role_binding(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1alpha1RoleBinding body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1RoleBinding
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_namespaced_role_binding_with_http_info(namespace, body, **kwargs) # noqa: E501
def create_namespaced_role_binding_with_http_info(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_role_binding # noqa: E501
create a RoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_role_binding_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1alpha1RoleBinding body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1RoleBinding, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_role_binding`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_role_binding`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/rolebindings', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1RoleBinding', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_cluster_role(self, name, **kwargs): # noqa: E501
"""delete_cluster_role # noqa: E501
delete a ClusterRole # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_cluster_role(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterRole (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_cluster_role_with_http_info(name, **kwargs) # noqa: E501
def delete_cluster_role_with_http_info(self, name, **kwargs): # noqa: E501
"""delete_cluster_role # noqa: E501
delete a ClusterRole # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_cluster_role_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterRole (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_cluster_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_cluster_role`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterroles/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_cluster_role_binding(self, name, **kwargs): # noqa: E501
"""delete_cluster_role_binding # noqa: E501
delete a ClusterRoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_cluster_role_binding(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterRoleBinding (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_cluster_role_binding_with_http_info(name, **kwargs) # noqa: E501
def delete_cluster_role_binding_with_http_info(self, name, **kwargs): # noqa: E501
"""delete_cluster_role_binding # noqa: E501
delete a ClusterRoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_cluster_role_binding_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterRoleBinding (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_cluster_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_cluster_role_binding`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterrolebindings/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_cluster_role(self, **kwargs): # noqa: E501
"""delete_collection_cluster_role # noqa: E501
delete collection of ClusterRole # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_cluster_role(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_cluster_role_with_http_info(**kwargs) # noqa: E501
def delete_collection_cluster_role_with_http_info(self, **kwargs): # noqa: E501
"""delete_collection_cluster_role # noqa: E501
delete collection of ClusterRole # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_cluster_role_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_cluster_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterroles', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_cluster_role_binding(self, **kwargs): # noqa: E501
"""delete_collection_cluster_role_binding # noqa: E501
delete collection of ClusterRoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_cluster_role_binding(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_cluster_role_binding_with_http_info(**kwargs) # noqa: E501
def delete_collection_cluster_role_binding_with_http_info(self, **kwargs): # noqa: E501
"""delete_collection_cluster_role_binding # noqa: E501
delete collection of ClusterRoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_cluster_role_binding_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_cluster_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterrolebindings', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_role(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_role # noqa: E501
delete collection of Role # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_role(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_role_with_http_info(namespace, **kwargs) # noqa: E501
def delete_collection_namespaced_role_with_http_info(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_role # noqa: E501
delete collection of Role # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_role_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_role`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/roles', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_role_binding(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_role_binding # noqa: E501
delete collection of RoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_role_binding(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_role_binding_with_http_info(namespace, **kwargs) # noqa: E501
def delete_collection_namespaced_role_binding_with_http_info(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_role_binding # noqa: E501
delete collection of RoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_role_binding_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_role_binding`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/rolebindings', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_role(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_role # noqa: E501
delete a Role # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_role(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Role (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_role_with_http_info(name, namespace, **kwargs) # noqa: E501
def delete_namespaced_role_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_role # noqa: E501
delete a Role # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_role_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Role (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_role`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_role`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/roles/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_role_binding(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_role_binding # noqa: E501
delete a RoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_role_binding(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the RoleBinding (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_role_binding_with_http_info(name, namespace, **kwargs) # noqa: E501
def delete_namespaced_role_binding_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_role_binding # noqa: E501
delete a RoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_role_binding_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the RoleBinding (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_role_binding`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_role_binding`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/rolebindings/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_cluster_role(self, **kwargs): # noqa: E501
"""list_cluster_role # noqa: E501
list or watch objects of kind ClusterRole # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_cluster_role(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1ClusterRoleList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_cluster_role_with_http_info(**kwargs) # noqa: E501
def list_cluster_role_with_http_info(self, **kwargs): # noqa: E501
"""list_cluster_role # noqa: E501
list or watch objects of kind ClusterRole # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_cluster_role_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1ClusterRoleList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_cluster_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterroles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1ClusterRoleList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_cluster_role_binding(self, **kwargs): # noqa: E501
"""list_cluster_role_binding # noqa: E501
list or watch objects of kind ClusterRoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_cluster_role_binding(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1ClusterRoleBindingList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_cluster_role_binding_with_http_info(**kwargs) # noqa: E501
def list_cluster_role_binding_with_http_info(self, **kwargs): # noqa: E501
"""list_cluster_role_binding # noqa: E501
list or watch objects of kind ClusterRoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_cluster_role_binding_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1ClusterRoleBindingList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_cluster_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterrolebindings', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1ClusterRoleBindingList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_role(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_role # noqa: E501
list or watch objects of kind Role # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_role(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1RoleList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_namespaced_role_with_http_info(namespace, **kwargs) # noqa: E501
def list_namespaced_role_with_http_info(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_role # noqa: E501
list or watch objects of kind Role # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_role_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1RoleList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_role`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/roles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1RoleList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_role_binding(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_role_binding # noqa: E501
list or watch objects of kind RoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_role_binding(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1RoleBindingList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_namespaced_role_binding_with_http_info(namespace, **kwargs) # noqa: E501
def list_namespaced_role_binding_with_http_info(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_role_binding # noqa: E501
list or watch objects of kind RoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_role_binding_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1RoleBindingList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_role_binding`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/rolebindings', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1RoleBindingList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_role_binding_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_role_binding_for_all_namespaces # noqa: E501
list or watch objects of kind RoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_role_binding_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1RoleBindingList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_role_binding_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
def list_role_binding_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
"""list_role_binding_for_all_namespaces # noqa: E501
list or watch objects of kind RoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_role_binding_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1RoleBindingList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_role_binding_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/rolebindings', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1RoleBindingList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_role_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_role_for_all_namespaces # noqa: E501
list or watch objects of kind Role # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_role_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1RoleList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_role_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
def list_role_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
"""list_role_for_all_namespaces # noqa: E501
list or watch objects of kind Role # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_role_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1RoleList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_role_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/roles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1RoleList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_cluster_role(self, name, body, **kwargs): # noqa: E501
"""patch_cluster_role # noqa: E501
partially update the specified ClusterRole # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_cluster_role(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterRole (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1ClusterRole
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_cluster_role_with_http_info(name, body, **kwargs) # noqa: E501
def patch_cluster_role_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_cluster_role # noqa: E501
partially update the specified ClusterRole # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_cluster_role_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterRole (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1ClusterRole, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_cluster_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_cluster_role`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_cluster_role`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterroles/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1ClusterRole', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_cluster_role_binding(self, name, body, **kwargs): # noqa: E501
"""patch_cluster_role_binding # noqa: E501
partially update the specified ClusterRoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_cluster_role_binding(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterRoleBinding (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1ClusterRoleBinding
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_cluster_role_binding_with_http_info(name, body, **kwargs) # noqa: E501
def patch_cluster_role_binding_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_cluster_role_binding # noqa: E501
partially update the specified ClusterRoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_cluster_role_binding_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterRoleBinding (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1ClusterRoleBinding, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_cluster_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_cluster_role_binding`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_cluster_role_binding`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterrolebindings/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1ClusterRoleBinding', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_role(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_role # noqa: E501
partially update the specified Role # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_role(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Role (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1Role
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_role_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_role_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_role # noqa: E501
partially update the specified Role # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_role_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Role (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1Role, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_role`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_role`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_role`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/roles/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1Role', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_role_binding(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_role_binding # noqa: E501
partially update the specified RoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_role_binding(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the RoleBinding (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1RoleBinding
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_role_binding_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_role_binding_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_role_binding # noqa: E501
partially update the specified RoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_role_binding_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the RoleBinding (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1RoleBinding, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_role_binding`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_role_binding`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_role_binding`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/rolebindings/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1RoleBinding', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_cluster_role(self, name, **kwargs): # noqa: E501
"""read_cluster_role # noqa: E501
read the specified ClusterRole # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_cluster_role(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterRole (required)
:param str pretty: If 'true', then the output is pretty printed.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1ClusterRole
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_cluster_role_with_http_info(name, **kwargs) # noqa: E501
def read_cluster_role_with_http_info(self, name, **kwargs): # noqa: E501
"""read_cluster_role # noqa: E501
read the specified ClusterRole # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_cluster_role_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterRole (required)
:param str pretty: If 'true', then the output is pretty printed.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1ClusterRole, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_cluster_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_cluster_role`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterroles/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1ClusterRole', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_cluster_role_binding(self, name, **kwargs): # noqa: E501
"""read_cluster_role_binding # noqa: E501
read the specified ClusterRoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_cluster_role_binding(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterRoleBinding (required)
:param str pretty: If 'true', then the output is pretty printed.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1ClusterRoleBinding
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_cluster_role_binding_with_http_info(name, **kwargs) # noqa: E501
def read_cluster_role_binding_with_http_info(self, name, **kwargs): # noqa: E501
"""read_cluster_role_binding # noqa: E501
read the specified ClusterRoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_cluster_role_binding_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterRoleBinding (required)
:param str pretty: If 'true', then the output is pretty printed.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1ClusterRoleBinding, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_cluster_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_cluster_role_binding`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterrolebindings/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1ClusterRoleBinding', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_role(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_role # noqa: E501
read the specified Role # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_role(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Role (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1Role
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_role_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_role_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_role # noqa: E501
read the specified Role # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_role_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Role (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1Role, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_role`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_role`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/roles/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1Role', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_role_binding(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_role_binding # noqa: E501
read the specified RoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_role_binding(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the RoleBinding (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1RoleBinding
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_role_binding_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_role_binding_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_role_binding # noqa: E501
read the specified RoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_role_binding_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the RoleBinding (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1RoleBinding, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_role_binding`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_role_binding`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/rolebindings/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1RoleBinding', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_cluster_role(self, name, body, **kwargs): # noqa: E501
"""replace_cluster_role # noqa: E501
replace the specified ClusterRole # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_cluster_role(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterRole (required)
:param V1alpha1ClusterRole body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1ClusterRole
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_cluster_role_with_http_info(name, body, **kwargs) # noqa: E501
def replace_cluster_role_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_cluster_role # noqa: E501
replace the specified ClusterRole # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_cluster_role_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterRole (required)
:param V1alpha1ClusterRole body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1ClusterRole, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_cluster_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_cluster_role`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_cluster_role`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterroles/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1ClusterRole', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_cluster_role_binding(self, name, body, **kwargs): # noqa: E501
"""replace_cluster_role_binding # noqa: E501
replace the specified ClusterRoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_cluster_role_binding(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterRoleBinding (required)
:param V1alpha1ClusterRoleBinding body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1ClusterRoleBinding
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_cluster_role_binding_with_http_info(name, body, **kwargs) # noqa: E501
def replace_cluster_role_binding_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_cluster_role_binding # noqa: E501
replace the specified ClusterRoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_cluster_role_binding_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterRoleBinding (required)
:param V1alpha1ClusterRoleBinding body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1ClusterRoleBinding, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_cluster_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_cluster_role_binding`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_cluster_role_binding`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterrolebindings/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1ClusterRoleBinding', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_role(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_role # noqa: E501
replace the specified Role # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_role(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Role (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1alpha1Role body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1Role
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_role_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_role_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_role # noqa: E501
replace the specified Role # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_role_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Role (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1alpha1Role body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1Role, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_role`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_role`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_role`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/roles/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1Role', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_role_binding(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_role_binding # noqa: E501
replace the specified RoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_role_binding(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the RoleBinding (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1alpha1RoleBinding body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1RoleBinding
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_role_binding_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_role_binding_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_role_binding # noqa: E501
replace the specified RoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_role_binding_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the RoleBinding (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1alpha1RoleBinding body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1RoleBinding, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_role_binding`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_role_binding`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_role_binding`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/rolebindings/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1RoleBinding', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 73.355962 | 1,390 | 0.674574 |
from __future__ import absolute_import
import re
import six
from kubernetes_asyncio.client.api_client import ApiClient
from kubernetes_asyncio.client.exceptions import (
ApiTypeError,
ApiValueError
)
class RbacAuthorizationV1alpha1Api(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_cluster_role(self, body, **kwargs):
kwargs['_return_http_data_only'] = True
return self.create_cluster_role_with_http_info(body, **kwargs)
def create_cluster_role_with_http_info(self, body, **kwargs):
local_var_params = locals()
all_params = [
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_cluster_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('body' not in local_var_params or
local_var_params['body'] is None):
raise ApiValueError("Missing the required parameter `body` when calling `create_cluster_role`")
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None:
query_params.append(('fieldManager', local_var_params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterroles', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1ClusterRole',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_cluster_role_binding(self, body, **kwargs):
kwargs['_return_http_data_only'] = True
return self.create_cluster_role_binding_with_http_info(body, **kwargs)
def create_cluster_role_binding_with_http_info(self, body, **kwargs):
local_var_params = locals()
all_params = [
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_cluster_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('body' not in local_var_params or
local_var_params['body'] is None):
raise ApiValueError("Missing the required parameter `body` when calling `create_cluster_role_binding`")
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None:
query_params.append(('fieldManager', local_var_params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterrolebindings', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1ClusterRoleBinding',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_role(self, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
return self.create_namespaced_role_with_http_info(namespace, body, **kwargs)
def create_namespaced_role_with_http_info(self, namespace, body, **kwargs):
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_role`")
if self.api_client.client_side_validation and ('body' not in local_var_params or
local_var_params['body'] is None):
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_role`")
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None:
query_params.append(('fieldManager', local_var_params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/roles', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1Role',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_role_binding(self, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
return self.create_namespaced_role_binding_with_http_info(namespace, body, **kwargs)
def create_namespaced_role_binding_with_http_info(self, namespace, body, **kwargs):
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_role_binding`")
if self.api_client.client_side_validation and ('body' not in local_var_params or
local_var_params['body'] is None):
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_role_binding`")
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None:
query_params.append(('fieldManager', local_var_params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/rolebindings', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1RoleBinding',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_cluster_role(self, name, **kwargs):
kwargs['_return_http_data_only'] = True
return self.delete_cluster_role_with_http_info(name, **kwargs)
def delete_cluster_role_with_http_info(self, name, **kwargs):
local_var_params = locals()
all_params = [
'name',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_cluster_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `delete_cluster_role`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None:
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds']))
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None:
query_params.append(('orphanDependents', local_var_params['orphan_dependents']))
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None:
query_params.append(('propagationPolicy', local_var_params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterroles/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_cluster_role_binding(self, name, **kwargs):
kwargs['_return_http_data_only'] = True
return self.delete_cluster_role_binding_with_http_info(name, **kwargs)
def delete_cluster_role_binding_with_http_info(self, name, **kwargs):
local_var_params = locals()
all_params = [
'name',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_cluster_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `delete_cluster_role_binding`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None:
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds']))
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None:
query_params.append(('orphanDependents', local_var_params['orphan_dependents']))
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None:
query_params.append(('propagationPolicy', local_var_params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterrolebindings/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_cluster_role(self, **kwargs):
kwargs['_return_http_data_only'] = True
return self.delete_collection_cluster_role_with_http_info(**kwargs)
def delete_collection_cluster_role_with_http_info(self, **kwargs):
local_var_params = locals()
all_params = [
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_cluster_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if '_continue' in local_var_params and local_var_params['_continue'] is not None:
query_params.append(('continue', local_var_params['_continue']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None:
query_params.append(('fieldSelector', local_var_params['field_selector']))
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None:
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds']))
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None:
query_params.append(('labelSelector', local_var_params['label_selector']))
if 'limit' in local_var_params and local_var_params['limit'] is not None:
query_params.append(('limit', local_var_params['limit']))
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None:
query_params.append(('orphanDependents', local_var_params['orphan_dependents']))
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None:
query_params.append(('propagationPolicy', local_var_params['propagation_policy']))
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None:
query_params.append(('resourceVersion', local_var_params['resource_version']))
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None:
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterroles', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_cluster_role_binding(self, **kwargs):
kwargs['_return_http_data_only'] = True
return self.delete_collection_cluster_role_binding_with_http_info(**kwargs)
def delete_collection_cluster_role_binding_with_http_info(self, **kwargs):
local_var_params = locals()
all_params = [
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_cluster_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if '_continue' in local_var_params and local_var_params['_continue'] is not None:
query_params.append(('continue', local_var_params['_continue']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None:
query_params.append(('fieldSelector', local_var_params['field_selector']))
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None:
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds']))
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None:
query_params.append(('labelSelector', local_var_params['label_selector']))
if 'limit' in local_var_params and local_var_params['limit'] is not None:
query_params.append(('limit', local_var_params['limit']))
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None:
query_params.append(('orphanDependents', local_var_params['orphan_dependents']))
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None:
query_params.append(('propagationPolicy', local_var_params['propagation_policy']))
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None:
query_params.append(('resourceVersion', local_var_params['resource_version']))
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None:
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterrolebindings', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_role(self, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_role_with_http_info(namespace, **kwargs)
def delete_collection_namespaced_role_with_http_info(self, namespace, **kwargs):
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_role`")
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if '_continue' in local_var_params and local_var_params['_continue'] is not None:
query_params.append(('continue', local_var_params['_continue']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None:
query_params.append(('fieldSelector', local_var_params['field_selector']))
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None:
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds']))
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None:
query_params.append(('labelSelector', local_var_params['label_selector']))
if 'limit' in local_var_params and local_var_params['limit'] is not None:
query_params.append(('limit', local_var_params['limit']))
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None:
query_params.append(('orphanDependents', local_var_params['orphan_dependents']))
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None:
query_params.append(('propagationPolicy', local_var_params['propagation_policy']))
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None:
query_params.append(('resourceVersion', local_var_params['resource_version']))
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None:
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/roles', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_role_binding(self, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_role_binding_with_http_info(namespace, **kwargs)
def delete_collection_namespaced_role_binding_with_http_info(self, namespace, **kwargs):
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_role_binding`")
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if '_continue' in local_var_params and local_var_params['_continue'] is not None:
query_params.append(('continue', local_var_params['_continue']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None:
query_params.append(('fieldSelector', local_var_params['field_selector']))
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None:
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds']))
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None:
query_params.append(('labelSelector', local_var_params['label_selector']))
if 'limit' in local_var_params and local_var_params['limit'] is not None:
query_params.append(('limit', local_var_params['limit']))
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None:
query_params.append(('orphanDependents', local_var_params['orphan_dependents']))
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None:
query_params.append(('propagationPolicy', local_var_params['propagation_policy']))
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None:
query_params.append(('resourceVersion', local_var_params['resource_version']))
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None:
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/rolebindings', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_role(self, name, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_role_with_http_info(name, namespace, **kwargs)
def delete_namespaced_role_with_http_info(self, name, namespace, **kwargs):
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_role`")
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_role`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None:
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds']))
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None:
query_params.append(('orphanDependents', local_var_params['orphan_dependents']))
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None:
query_params.append(('propagationPolicy', local_var_params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/roles/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_role_binding(self, name, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_role_binding_with_http_info(name, namespace, **kwargs)
def delete_namespaced_role_binding_with_http_info(self, name, namespace, **kwargs):
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_role_binding`")
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_role_binding`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None:
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds']))
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None:
query_params.append(('orphanDependents', local_var_params['orphan_dependents']))
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None:
query_params.append(('propagationPolicy', local_var_params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/rolebindings/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
kwargs['_return_http_data_only'] = True
return self.get_api_resources_with_http_info(**kwargs)
def get_api_resources_with_http_info(self, **kwargs):
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_cluster_role(self, **kwargs):
kwargs['_return_http_data_only'] = True
return self.list_cluster_role_with_http_info(**kwargs)
def list_cluster_role_with_http_info(self, **kwargs):
local_var_params = locals()
all_params = [
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_cluster_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None:
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks']))
if '_continue' in local_var_params and local_var_params['_continue'] is not None:
query_params.append(('continue', local_var_params['_continue']))
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None:
query_params.append(('fieldSelector', local_var_params['field_selector']))
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None:
query_params.append(('labelSelector', local_var_params['label_selector']))
if 'limit' in local_var_params and local_var_params['limit'] is not None:
query_params.append(('limit', local_var_params['limit']))
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None:
query_params.append(('resourceVersion', local_var_params['resource_version']))
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None:
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds']))
if 'watch' in local_var_params and local_var_params['watch'] is not None:
query_params.append(('watch', local_var_params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterroles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1ClusterRoleList',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_cluster_role_binding(self, **kwargs):
kwargs['_return_http_data_only'] = True
return self.list_cluster_role_binding_with_http_info(**kwargs)
def list_cluster_role_binding_with_http_info(self, **kwargs):
local_var_params = locals()
all_params = [
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_cluster_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None:
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks']))
if '_continue' in local_var_params and local_var_params['_continue'] is not None:
query_params.append(('continue', local_var_params['_continue']))
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None:
query_params.append(('fieldSelector', local_var_params['field_selector']))
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None:
query_params.append(('labelSelector', local_var_params['label_selector']))
if 'limit' in local_var_params and local_var_params['limit'] is not None:
query_params.append(('limit', local_var_params['limit']))
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None:
query_params.append(('resourceVersion', local_var_params['resource_version']))
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None:
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds']))
if 'watch' in local_var_params and local_var_params['watch'] is not None:
query_params.append(('watch', local_var_params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterrolebindings', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1ClusterRoleBindingList',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_role(self, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
return self.list_namespaced_role_with_http_info(namespace, **kwargs)
def list_namespaced_role_with_http_info(self, namespace, **kwargs):
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_role`")
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None:
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks']))
if '_continue' in local_var_params and local_var_params['_continue'] is not None:
query_params.append(('continue', local_var_params['_continue']))
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None:
query_params.append(('fieldSelector', local_var_params['field_selector']))
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None:
query_params.append(('labelSelector', local_var_params['label_selector']))
if 'limit' in local_var_params and local_var_params['limit'] is not None:
query_params.append(('limit', local_var_params['limit']))
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None:
query_params.append(('resourceVersion', local_var_params['resource_version']))
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None:
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds']))
if 'watch' in local_var_params and local_var_params['watch'] is not None:
query_params.append(('watch', local_var_params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/roles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1RoleList',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_role_binding(self, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
return self.list_namespaced_role_binding_with_http_info(namespace, **kwargs)
def list_namespaced_role_binding_with_http_info(self, namespace, **kwargs):
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_role_binding`")
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None:
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks']))
if '_continue' in local_var_params and local_var_params['_continue'] is not None:
query_params.append(('continue', local_var_params['_continue']))
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None:
query_params.append(('fieldSelector', local_var_params['field_selector']))
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None:
query_params.append(('labelSelector', local_var_params['label_selector']))
if 'limit' in local_var_params and local_var_params['limit'] is not None:
query_params.append(('limit', local_var_params['limit']))
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None:
query_params.append(('resourceVersion', local_var_params['resource_version']))
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None:
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds']))
if 'watch' in local_var_params and local_var_params['watch'] is not None:
query_params.append(('watch', local_var_params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/rolebindings', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1RoleBindingList',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_role_binding_for_all_namespaces(self, **kwargs):
kwargs['_return_http_data_only'] = True
return self.list_role_binding_for_all_namespaces_with_http_info(**kwargs)
def list_role_binding_for_all_namespaces_with_http_info(self, **kwargs):
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_role_binding_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None:
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks']))
if '_continue' in local_var_params and local_var_params['_continue'] is not None:
query_params.append(('continue', local_var_params['_continue']))
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None:
query_params.append(('fieldSelector', local_var_params['field_selector']))
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None:
query_params.append(('labelSelector', local_var_params['label_selector']))
if 'limit' in local_var_params and local_var_params['limit'] is not None:
query_params.append(('limit', local_var_params['limit']))
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None:
query_params.append(('resourceVersion', local_var_params['resource_version']))
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None:
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds']))
if 'watch' in local_var_params and local_var_params['watch'] is not None:
query_params.append(('watch', local_var_params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/rolebindings', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1RoleBindingList',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_role_for_all_namespaces(self, **kwargs):
kwargs['_return_http_data_only'] = True
return self.list_role_for_all_namespaces_with_http_info(**kwargs)
def list_role_for_all_namespaces_with_http_info(self, **kwargs):
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_role_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None:
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks']))
if '_continue' in local_var_params and local_var_params['_continue'] is not None:
query_params.append(('continue', local_var_params['_continue']))
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None:
query_params.append(('fieldSelector', local_var_params['field_selector']))
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None:
query_params.append(('labelSelector', local_var_params['label_selector']))
if 'limit' in local_var_params and local_var_params['limit'] is not None:
query_params.append(('limit', local_var_params['limit']))
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None:
query_params.append(('resourceVersion', local_var_params['resource_version']))
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None:
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds']))
if 'watch' in local_var_params and local_var_params['watch'] is not None:
query_params.append(('watch', local_var_params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/roles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1RoleList',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_cluster_role(self, name, body, **kwargs):
kwargs['_return_http_data_only'] = True
return self.patch_cluster_role_with_http_info(name, body, **kwargs)
def patch_cluster_role_with_http_info(self, name, body, **kwargs):
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_cluster_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `patch_cluster_role`")
if self.api_client.client_side_validation and ('body' not in local_var_params or
local_var_params['body'] is None):
raise ApiValueError("Missing the required parameter `body` when calling `patch_cluster_role`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None:
query_params.append(('fieldManager', local_var_params['field_manager']))
if 'force' in local_var_params and local_var_params['force'] is not None:
query_params.append(('force', local_var_params['force']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterroles/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1ClusterRole',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_cluster_role_binding(self, name, body, **kwargs):
kwargs['_return_http_data_only'] = True
return self.patch_cluster_role_binding_with_http_info(name, body, **kwargs)
def patch_cluster_role_binding_with_http_info(self, name, body, **kwargs):
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_cluster_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `patch_cluster_role_binding`")
if self.api_client.client_side_validation and ('body' not in local_var_params or
local_var_params['body'] is None):
raise ApiValueError("Missing the required parameter `body` when calling `patch_cluster_role_binding`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None:
query_params.append(('fieldManager', local_var_params['field_manager']))
if 'force' in local_var_params and local_var_params['force'] is not None:
query_params.append(('force', local_var_params['force']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterrolebindings/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1ClusterRoleBinding',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_role(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_role_with_http_info(name, namespace, body, **kwargs)
def patch_namespaced_role_with_http_info(self, name, namespace, body, **kwargs):
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_role`")
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_role`")
if self.api_client.client_side_validation and ('body' not in local_var_params or
local_var_params['body'] is None):
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_role`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None:
query_params.append(('fieldManager', local_var_params['field_manager']))
if 'force' in local_var_params and local_var_params['force'] is not None:
query_params.append(('force', local_var_params['force']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/roles/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1Role',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_role_binding(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_role_binding_with_http_info(name, namespace, body, **kwargs)
def patch_namespaced_role_binding_with_http_info(self, name, namespace, body, **kwargs):
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_role_binding`")
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_role_binding`")
if self.api_client.client_side_validation and ('body' not in local_var_params or
local_var_params['body'] is None):
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_role_binding`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None:
query_params.append(('fieldManager', local_var_params['field_manager']))
if 'force' in local_var_params and local_var_params['force'] is not None:
query_params.append(('force', local_var_params['force']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/rolebindings/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1RoleBinding',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_cluster_role(self, name, **kwargs):
kwargs['_return_http_data_only'] = True
return self.read_cluster_role_with_http_info(name, **kwargs)
def read_cluster_role_with_http_info(self, name, **kwargs):
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_cluster_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `read_cluster_role`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterroles/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1ClusterRole',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_cluster_role_binding(self, name, **kwargs):
kwargs['_return_http_data_only'] = True
return self.read_cluster_role_binding_with_http_info(name, **kwargs)
def read_cluster_role_binding_with_http_info(self, name, **kwargs):
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_cluster_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `read_cluster_role_binding`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterrolebindings/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1ClusterRoleBinding',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_role(self, name, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
return self.read_namespaced_role_with_http_info(name, namespace, **kwargs)
def read_namespaced_role_with_http_info(self, name, namespace, **kwargs):
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_role`")
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_role`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/roles/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1Role',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_role_binding(self, name, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
return self.read_namespaced_role_binding_with_http_info(name, namespace, **kwargs)
def read_namespaced_role_binding_with_http_info(self, name, namespace, **kwargs):
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_role_binding`")
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_role_binding`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/rolebindings/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1RoleBinding',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_cluster_role(self, name, body, **kwargs):
kwargs['_return_http_data_only'] = True
return self.replace_cluster_role_with_http_info(name, body, **kwargs)
def replace_cluster_role_with_http_info(self, name, body, **kwargs):
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_cluster_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `replace_cluster_role`")
if self.api_client.client_side_validation and ('body' not in local_var_params or
local_var_params['body'] is None):
raise ApiValueError("Missing the required parameter `body` when calling `replace_cluster_role`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None:
query_params.append(('fieldManager', local_var_params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterroles/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1ClusterRole',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_cluster_role_binding(self, name, body, **kwargs):
kwargs['_return_http_data_only'] = True
return self.replace_cluster_role_binding_with_http_info(name, body, **kwargs)
def replace_cluster_role_binding_with_http_info(self, name, body, **kwargs):
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_cluster_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `replace_cluster_role_binding`")
if self.api_client.client_side_validation and ('body' not in local_var_params or
local_var_params['body'] is None):
raise ApiValueError("Missing the required parameter `body` when calling `replace_cluster_role_binding`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None:
query_params.append(('fieldManager', local_var_params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/clusterrolebindings/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1ClusterRoleBinding',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_role(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_role_with_http_info(name, namespace, body, **kwargs)
def replace_namespaced_role_with_http_info(self, name, namespace, body, **kwargs):
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_role" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_role`")
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_role`")
if self.api_client.client_side_validation and ('body' not in local_var_params or
local_var_params['body'] is None):
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_role`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None:
query_params.append(('fieldManager', local_var_params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/roles/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1Role',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_role_binding(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_role_binding_with_http_info(name, namespace, body, **kwargs)
def replace_namespaced_role_binding_with_http_info(self, name, namespace, body, **kwargs):
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_role_binding" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_role_binding`")
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_role_binding`")
if self.api_client.client_side_validation and ('body' not in local_var_params or
local_var_params['body'] is None):
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_role_binding`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None:
query_params.append(('fieldManager', local_var_params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/rbac.authorization.k8s.io/v1alpha1/namespaces/{namespace}/rolebindings/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1RoleBinding',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| true | true |
f733282a2a00b8956c51e6558ce4316df6e89b60 | 3,996 | py | Python | src/main/resources/license.xlr/LicenseUsageTile.py | xebialabs-community/xlr-license-usage-tile | a15b36b1d84469a00a4d99dba2a4e000b2f685e8 | [
"MIT"
] | null | null | null | src/main/resources/license.xlr/LicenseUsageTile.py | xebialabs-community/xlr-license-usage-tile | a15b36b1d84469a00a4d99dba2a4e000b2f685e8 | [
"MIT"
] | 2 | 2019-12-03T16:10:16.000Z | 2020-02-25T02:15:13.000Z | src/main/resources/license.xlr/LicenseUsageTile.py | xebialabs-community/xlr-license-usage-tile | a15b36b1d84469a00a4d99dba2a4e000b2f685e8 | [
"MIT"
] | null | null | null | #
# Copyright 2020 XEBIALABS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from xlrelease.HttpRequest import HttpRequest
import logging
import json
from datetime import datetime, timedelta
logging.basicConfig(filename='log/custom-tile.log',
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
logging.debug("main: begin xlr")
# convert object to list of dict
def convert_user_obj_list(objs, user_roles):
result = []
logging.debug('convert_user_obj_list...')
for user_obj in objs:
user = {}
user['username'] = user_obj.username
user['fullName'] = user_obj.fullName
user['email'] = user_obj.email
user['loginAllowed'] = user_obj.loginAllowed
username = user_obj.username.lower()
logging.debug(' checking user: %s' % username)
if username in user_roles:
user['roles'] = user_roles[username]
logging.debug(' found, adding roles: ')
logging.debug(user['roles'])
else:
logging.debug(' did not find roles for user: %s' % username)
user['roles'] = ["None Assigned"]
result.append(user)
return result
# calculate cutoff date
d = datetime.today() - timedelta(days=lastUseCutoff)
# FYI: parameters are.. String email, String fullName, Boolean loginAllowed, Boolean external, Date lastActiveAfter, Date lastActiveBefore, Long page, Long resultsPerPage
xlr_active_users_obj = _userApi.findUsers(None, None, None, None, d, None, None, None)
xlr_inactive_users_obj = _userApi.findUsers(None, None, None, None, None, d, None, None)
# get all roles in system (or at least the first thousand)
roles = _rolesApi.getRoles(0, 1000)
# convert that into roles for each user
logging.debug("got %s roles..." % str(len(roles)))
user_roles = {}
active_roles = []
for role in roles:
if len(role.principals) > 0:
r = {}
r['name'] = role.name
r['principals'] = []
for principal in role.principals:
username = principal.username.lower()
r['principals'].append(username)
if username in user_roles:
logging.debug(" appending")
user_roles[username].append(role.name)
else:
logging.debug(" adding")
user_roles[username] = [role.name]
active_roles.append(r)
# convert to list of dict and add roles
xlr_active_users = convert_user_obj_list(xlr_active_users_obj, user_roles)
xlr_inactive_users = convert_user_obj_list(xlr_inactive_users_obj, user_roles)
# form response
data = {
"usage": {
"users_active_cnt": len(xlr_active_users),
"users_inactive_cnt": len(xlr_inactive_users)
},
"users_active": xlr_active_users,
"users_inactive": xlr_inactive_users,
"active_roles": active_roles
}
| 41.195876 | 462 | 0.676927 |
from xlrelease.HttpRequest import HttpRequest
import logging
import json
from datetime import datetime, timedelta
logging.basicConfig(filename='log/custom-tile.log',
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
logging.debug("main: begin xlr")
def convert_user_obj_list(objs, user_roles):
result = []
logging.debug('convert_user_obj_list...')
for user_obj in objs:
user = {}
user['username'] = user_obj.username
user['fullName'] = user_obj.fullName
user['email'] = user_obj.email
user['loginAllowed'] = user_obj.loginAllowed
username = user_obj.username.lower()
logging.debug(' checking user: %s' % username)
if username in user_roles:
user['roles'] = user_roles[username]
logging.debug(' found, adding roles: ')
logging.debug(user['roles'])
else:
logging.debug(' did not find roles for user: %s' % username)
user['roles'] = ["None Assigned"]
result.append(user)
return result
d = datetime.today() - timedelta(days=lastUseCutoff)
xlr_active_users_obj = _userApi.findUsers(None, None, None, None, d, None, None, None)
xlr_inactive_users_obj = _userApi.findUsers(None, None, None, None, None, d, None, None)
roles = _rolesApi.getRoles(0, 1000)
logging.debug("got %s roles..." % str(len(roles)))
user_roles = {}
active_roles = []
for role in roles:
if len(role.principals) > 0:
r = {}
r['name'] = role.name
r['principals'] = []
for principal in role.principals:
username = principal.username.lower()
r['principals'].append(username)
if username in user_roles:
logging.debug(" appending")
user_roles[username].append(role.name)
else:
logging.debug(" adding")
user_roles[username] = [role.name]
active_roles.append(r)
xlr_active_users = convert_user_obj_list(xlr_active_users_obj, user_roles)
xlr_inactive_users = convert_user_obj_list(xlr_inactive_users_obj, user_roles)
data = {
"usage": {
"users_active_cnt": len(xlr_active_users),
"users_inactive_cnt": len(xlr_inactive_users)
},
"users_active": xlr_active_users,
"users_inactive": xlr_inactive_users,
"active_roles": active_roles
}
| true | true |
f73328553727f0236d71100aa88a52b283b8b71f | 3,421 | py | Python | django_signal_valve/tests/test_valve.py | SHUN-YI/bk-sops | a4a841bdc44a18518c6c53c04a02996ddc7da2be | [
"Apache-2.0"
] | 2 | 2019-08-15T10:06:26.000Z | 2019-09-17T11:49:20.000Z | django_signal_valve/tests/test_valve.py | SHUN-YI/bk-sops | a4a841bdc44a18518c6c53c04a02996ddc7da2be | [
"Apache-2.0"
] | null | null | null | django_signal_valve/tests/test_valve.py | SHUN-YI/bk-sops | a4a841bdc44a18518c6c53c04a02996ddc7da2be | [
"Apache-2.0"
] | 1 | 2020-07-03T06:45:07.000Z | 2020-07-03T06:45:07.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
from django.test import TestCase
from django_signal_valve import valve
from django_signal_valve.tests import mock_signal
from django_signal_valve.models import Signal
class TestValve(TestCase):
def setUp(self):
valve.unload_valve_function()
def test_set_valve_function(self):
self.assertRaises(Exception, valve.set_valve_function, args=[1])
def func():
return True
valve.unload_valve_function()
valve.set_valve_function(func)
self.assertEqual(valve.valve_function(), func)
self.assertRaises(Exception, valve.set_valve_function, args=[func])
valve.__valve_function = None
def test_send_on_valve_is_none(self):
kwargs_1 = {'1': 1}
kwargs_2 = {'2': 2}
valve.unload_valve_function()
valve.send(mock_signal, 'signal_1', **kwargs_1)
valve.send(mock_signal, 'signal_1', **kwargs_2)
self.assertEqual(mock_signal.signal_1.history[0], kwargs_1)
self.assertEqual(mock_signal.signal_1.history[1], kwargs_2)
mock_signal.clear()
def test_send_on_valve_opened(self):
kwargs_1 = {'1': 1}
kwargs_2 = {'2': 2}
def is_valve_closed():
return False
valve.unload_valve_function()
valve.set_valve_function(is_valve_closed)
valve.send(mock_signal, 'signal_1', **kwargs_1)
valve.send(mock_signal, 'signal_1', **kwargs_2)
self.assertEqual(mock_signal.signal_1.history[0], kwargs_1)
self.assertEqual(mock_signal.signal_1.history[1], kwargs_2)
mock_signal.clear()
def test_send_on_closed(self):
kwargs_1 = {'1': 1}
kwargs_2 = {'2': 2}
def is_valve_closed():
return True
valve.unload_valve_function()
valve.set_valve_function(is_valve_closed)
valve.send(mock_signal, 'signal_1', **kwargs_1)
valve.send(mock_signal, 'signal_1', **kwargs_2)
self.assertEqual(len(mock_signal.signal_1.history), 0)
mock_signal.clear()
Signal.objects.all().delete()
def test_open_valve(self):
kwargs_1 = {'1': 1}
kwargs_2 = {'2': 2}
def valve_closed():
return True
valve.unload_valve_function()
valve.set_valve_function(valve_closed)
valve.send(mock_signal, 'signal_1', **kwargs_1)
valve.send(mock_signal, 'signal_1', **kwargs_2)
self.assertEqual(len(mock_signal.signal_1.history), 0)
valve.open_valve(mock_signal)
self.assertEqual(mock_signal.signal_1.history[0], kwargs_1)
self.assertEqual(mock_signal.signal_1.history[1], kwargs_2)
mock_signal.clear()
| 36.393617 | 305 | 0.684303 |
from django.test import TestCase
from django_signal_valve import valve
from django_signal_valve.tests import mock_signal
from django_signal_valve.models import Signal
class TestValve(TestCase):
def setUp(self):
valve.unload_valve_function()
def test_set_valve_function(self):
self.assertRaises(Exception, valve.set_valve_function, args=[1])
def func():
return True
valve.unload_valve_function()
valve.set_valve_function(func)
self.assertEqual(valve.valve_function(), func)
self.assertRaises(Exception, valve.set_valve_function, args=[func])
valve.__valve_function = None
def test_send_on_valve_is_none(self):
kwargs_1 = {'1': 1}
kwargs_2 = {'2': 2}
valve.unload_valve_function()
valve.send(mock_signal, 'signal_1', **kwargs_1)
valve.send(mock_signal, 'signal_1', **kwargs_2)
self.assertEqual(mock_signal.signal_1.history[0], kwargs_1)
self.assertEqual(mock_signal.signal_1.history[1], kwargs_2)
mock_signal.clear()
def test_send_on_valve_opened(self):
kwargs_1 = {'1': 1}
kwargs_2 = {'2': 2}
def is_valve_closed():
return False
valve.unload_valve_function()
valve.set_valve_function(is_valve_closed)
valve.send(mock_signal, 'signal_1', **kwargs_1)
valve.send(mock_signal, 'signal_1', **kwargs_2)
self.assertEqual(mock_signal.signal_1.history[0], kwargs_1)
self.assertEqual(mock_signal.signal_1.history[1], kwargs_2)
mock_signal.clear()
def test_send_on_closed(self):
kwargs_1 = {'1': 1}
kwargs_2 = {'2': 2}
def is_valve_closed():
return True
valve.unload_valve_function()
valve.set_valve_function(is_valve_closed)
valve.send(mock_signal, 'signal_1', **kwargs_1)
valve.send(mock_signal, 'signal_1', **kwargs_2)
self.assertEqual(len(mock_signal.signal_1.history), 0)
mock_signal.clear()
Signal.objects.all().delete()
def test_open_valve(self):
kwargs_1 = {'1': 1}
kwargs_2 = {'2': 2}
def valve_closed():
return True
valve.unload_valve_function()
valve.set_valve_function(valve_closed)
valve.send(mock_signal, 'signal_1', **kwargs_1)
valve.send(mock_signal, 'signal_1', **kwargs_2)
self.assertEqual(len(mock_signal.signal_1.history), 0)
valve.open_valve(mock_signal)
self.assertEqual(mock_signal.signal_1.history[0], kwargs_1)
self.assertEqual(mock_signal.signal_1.history[1], kwargs_2)
mock_signal.clear()
| true | true |
f73328c7728db7b0c158a96b040f0fbb97007485 | 4,046 | py | Python | alipay/aop/api/request/KoubeiMarketingCampaignOpenDeliveryDeleteRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/request/KoubeiMarketingCampaignOpenDeliveryDeleteRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/request/KoubeiMarketingCampaignOpenDeliveryDeleteRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.KoubeiMarketingCampaignOpenDeliveryDeleteModel import KoubeiMarketingCampaignOpenDeliveryDeleteModel
class KoubeiMarketingCampaignOpenDeliveryDeleteRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, KoubeiMarketingCampaignOpenDeliveryDeleteModel):
self._biz_content = value
else:
self._biz_content = KoubeiMarketingCampaignOpenDeliveryDeleteModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'koubei.marketing.campaign.open.delivery.delete'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.903448 | 148 | 0.651013 |
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.KoubeiMarketingCampaignOpenDeliveryDeleteModel import KoubeiMarketingCampaignOpenDeliveryDeleteModel
class KoubeiMarketingCampaignOpenDeliveryDeleteRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, KoubeiMarketingCampaignOpenDeliveryDeleteModel):
self._biz_content = value
else:
self._biz_content = KoubeiMarketingCampaignOpenDeliveryDeleteModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'koubei.marketing.campaign.open.delivery.delete'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| true | true |
f7332a528686f1767dfbe9b79555fc71b67c5c1c | 9,566 | py | Python | Integrations/python/deephaven_legacy/lang/vectorize_simple.py | mattrunyon/deephaven-core | 80e3567e4647ab76a81e483d0a8ab542f9aadace | [
"MIT"
] | null | null | null | Integrations/python/deephaven_legacy/lang/vectorize_simple.py | mattrunyon/deephaven-core | 80e3567e4647ab76a81e483d0a8ab542f9aadace | [
"MIT"
] | null | null | null | Integrations/python/deephaven_legacy/lang/vectorize_simple.py | mattrunyon/deephaven-core | 80e3567e4647ab76a81e483d0a8ab542f9aadace | [
"MIT"
] | null | null | null |
"""
Utilities for vectorization.
**The contents of this module are intended only for internal Deephaven use and may change at any time.**
"""
import ast
import traceback
from collections import OrderedDict
import collections
from io import UnsupportedOperation
import numba as nb
import numba.types
import numba.typing
import numpy
from numba.npyufunc import vectorize
from numpy.core.multiarray import ndarray
from deephaven_legacy.lang.analyzer import Analyzer
numpy_primitives = {"float64",
"float32",
"int64",
"int32",
"int16",
"uint16",
"int8",
"bool"}
def new_vec_function(src, cols=None, refs=None):
"""
Creates a new vectorized function.
:return: new function
"""
invoker, used_cols = compile_function(src, cols)
return invoker.fun
def export_locals(glob,loc):
"""
Combines two dictionaries.
:return: combined dictionary.
"""
glob = glob.copy()
glob.update(loc)
return glob
def arguments_for(name, type):
"""
Returns vectorized arguments for a column name and type.
:param name: column name
:param type: column type
:return: argument types, arguments, and unpacks
"""
dimensions_count = 0
unpack = "{0} = __arg0_{0}__[__iterator_i__]".format(name)
if "[" in type:
tail = type[type.find("[") + 1:-1]
argument_types = [type[:type.find("[")] + "[:]"]
if argument_types=="unicode_type":
raise TypeError("Cannot support arrays of String type")
dimensions_count = tail.count(":")
elif type == "unicode_type":
dimensions_count = 1
argument_types = [type]
else:
argument_types = [type + "[:]"]
if dimensions_count > 0:
unpack = "{0} = __arg0_{0}__[__arg1_{0}__[__iterator_i__-1] if __iterator_i__ > 0 else 0:__arg1_{0}__[__iterator_i__]]".format(
name)
if (dimensions_count > 1):
raise UnsupportedOperation("Multidimensional arrays are not supported yet")
'''
shape_args = ["__arg{}_{}__,".format(i, name) for i in range(2, dimensions_count + 1)]
unpack = unpack + ".reshape([{1},len{0}:__arg1_{0}__[__iterator_i__]/({2})])".format(name,
",".join(shape_args),
"*".join(shape_args))
'''
arguments = []
for i in range(0, dimensions_count + 1):
arguments.append("__arg{}_{}__".format(i, name))
for i in range(0, dimensions_count):
argument_types.append("int32[:]")
return argument_types, arguments, unpack
def vectorized_arguments(used_cols):
"""
Returns vectorized arguments for used columns.
:param used_cols: used columns
:return: argument types, arguments, and unpacks
"""
argument_types = []
arguments = []
unpacks = []
for name, type in used_cols.items():
arg_types, args, unpack = arguments_for(name, type)
arguments.extend(args)
unpacks.append(unpack)
argument_types.extend(arg_types)
return argument_types, arguments, unpacks
def compile_function(src, cols=None, globs=None, loc=None):
"""Compiles a function."""
# extract only the referenced globals
colsDict = dict(cols)
cols = colsDict.items()
# debug: print("\n\nSource:\n{}\n\n".format(src))
# parse user's source so we can identify used columns and references
node = ast.parse("{}\n".format(src), '<string>', 'exec')
analyze = Analyzer(colsDict, None)
# visit the ast to determine which columns and references are actually used
analyze.visit(node)
global fixed
# Doing plain string manipulation, we need to add indent to the user-supplied source.
expression = ''.join(' {}'.format(line) for line in src.splitlines(True))
all_args = []
used_cols = [name for name, type in cols if name in analyze.used_cols]
all_args.extend(used_cols)
fixed = (
"def __tmp__({}):\n"
" return {}\n").format(",".join(all_args), expression)
# exec the function-defining code above, saving it into `loc`
if (loc == None):
loc = {}
exec (fixed, globs, loc)
compiled = loc['__tmp__']
# apply @vectorize to the result
types_for_used_columns = [type for name, type in cols if name in analyze.used_cols]
is_vectorize_friendly = not any(type == "unicode_type" or "[" in type for type in types_for_used_columns)
expression = ''.join(' {}'.format(line) for line in src.splitlines(True))
used_cols_name_to_type = OrderedDict([(name, type) for name, type in cols if name in analyze.used_cols])
return_type = numba.jit(["(" + ",".join(types_for_used_columns) + ",)"if len(
analyze.used_cols) > 0 else ()], nopython=True)(compiled).nopython_signatures[0].return_type
if isinstance(return_type, numba.types.scalars.Literal):
return {'result': return_type.literal_value, 'columns': used_cols,
'return_type': return_type.literal_type.name}
if isinstance(return_type, numba.types.IterableType):
is_vectorize_friendly = False
if (is_vectorize_friendly):
vectorized_function = vectorize(["(" + ",".join(types_for_used_columns) + ",)" if len(
analyze.used_cols) > 0 else ()])(compiled)
# debug: print("Signature ")
# debug: print(list(result._dispatcher.overloads))
return_type = [sig.return_type for sig in vectorized_function._dispatcher.overloads.keys()][0]
if isinstance(return_type, numba.types.scalars.Literal):
return {'result': return_type.literal_value, 'columns': used_cols,
'return_type': return_type.literal_type.name}
if (len(analyze.used_cols) == 0):
fixed = (
"def __tmp__(__count__):\n"
" __result__ = numpy.empty(__count__,dtype=numpy.dtype('{}'))\n"
" for __i__ in range(__count__):\n"
" __result__[__i__] = {}\n"
" return __result__\n"
).format(return_type, src)
globs["numpy"] = numpy
exec (fixed, globs, loc)
compiled = loc['__tmp__']
vectorized_function = numba.jit(
numba.typing.signature(nb.types.Array(return_type, 1, 'C'), (numba.types.int32)))(compiled)
else:
argument_types, arguments, unpack = vectorized_arguments(used_cols_name_to_type)
is_primitive = return_type.name in numpy_primitives
numpy_return_type_name = return_type.name
if numpy_return_type_name == "bool":
numpy_return_type_name = "bool_"
element_add = "__dest__[__iterator_i__] = {}" if is_primitive else "__dest__.append({})"
result_init = "__dest__ = {}\n".format("numpy.empty(__arg_count__,numpy.{})".format(numpy_return_type_name) if is_primitive else "[]")
manually_vectorize_code = "def __tmp__(__arg_count__,{}):\n" \
" {}\n" \
" for __iterator_i__ in range(0,__arg_count__):\n" \
" {}\n" \
" {}\n" \
" return __dest__".format(",".join(arguments), result_init, "\n ".join(unpack), element_add.format(expression))
#debug print(manually_vectorize_code)
exec (manually_vectorize_code, globs, loc)
manually_vectorize = loc['__tmp__']
# debug print(argument_types)
argsSig, rt = numba.sigutils.normalize_signature("(int32," + ",".join(argument_types) + ",)" if len(
argument_types) > 0 else "(int32,)")
for i in range(0, len(argsSig)):
if isinstance(argsSig[i], numba.types.Array):
argsSig[i].mutable = False
vectorized_function = numba.jit([argsSig], nopython=True)(
manually_vectorize)
# wrap the returned function, so any expected errors come with a meaningful error message.
def invoker(args):
try:
# debug: print("args:")
# debug: print(len(args))
# debug: print(list(args))
args = list(args)
# debug: print("done args")
#print(vectorized_function.nopython_signatures[0])
return vectorized_function.__call__(*args[1:]) if is_vectorize_friendly and len(args) > 1 else vectorized_function.__call__(*args)
except TypeError:
traceback.print_exc()
# determine the types of referenced variables to help user identify their problem.
types = '\n'.join("ndarray[{}]".format(ar.dtype.name) if
hasattr(ar, 'dtype') and isinstance(ar, ndarray)
else '{}({})'.format(ar.dtype.name, ar) if
hasattr(ar, 'dtype')
else "<foo>"
for ar in args)
raise TypeError(("Vectorized functions may only reference primitives and other vectorized functions.\n"
"Argument types:\n"
"{}\n"
"Source:\n"
"{}").format(types, fixed))
return {'fun': invoker, 'columns': used_cols, 'return_type': return_type.name}
| 41.055794 | 155 | 0.589484 |
import ast
import traceback
from collections import OrderedDict
import collections
from io import UnsupportedOperation
import numba as nb
import numba.types
import numba.typing
import numpy
from numba.npyufunc import vectorize
from numpy.core.multiarray import ndarray
from deephaven_legacy.lang.analyzer import Analyzer
numpy_primitives = {"float64",
"float32",
"int64",
"int32",
"int16",
"uint16",
"int8",
"bool"}
def new_vec_function(src, cols=None, refs=None):
invoker, used_cols = compile_function(src, cols)
return invoker.fun
def export_locals(glob,loc):
glob = glob.copy()
glob.update(loc)
return glob
def arguments_for(name, type):
dimensions_count = 0
unpack = "{0} = __arg0_{0}__[__iterator_i__]".format(name)
if "[" in type:
tail = type[type.find("[") + 1:-1]
argument_types = [type[:type.find("[")] + "[:]"]
if argument_types=="unicode_type":
raise TypeError("Cannot support arrays of String type")
dimensions_count = tail.count(":")
elif type == "unicode_type":
dimensions_count = 1
argument_types = [type]
else:
argument_types = [type + "[:]"]
if dimensions_count > 0:
unpack = "{0} = __arg0_{0}__[__arg1_{0}__[__iterator_i__-1] if __iterator_i__ > 0 else 0:__arg1_{0}__[__iterator_i__]]".format(
name)
if (dimensions_count > 1):
raise UnsupportedOperation("Multidimensional arrays are not supported yet")
arguments = []
for i in range(0, dimensions_count + 1):
arguments.append("__arg{}_{}__".format(i, name))
for i in range(0, dimensions_count):
argument_types.append("int32[:]")
return argument_types, arguments, unpack
def vectorized_arguments(used_cols):
argument_types = []
arguments = []
unpacks = []
for name, type in used_cols.items():
arg_types, args, unpack = arguments_for(name, type)
arguments.extend(args)
unpacks.append(unpack)
argument_types.extend(arg_types)
return argument_types, arguments, unpacks
def compile_function(src, cols=None, globs=None, loc=None):
colsDict = dict(cols)
cols = colsDict.items()
node = ast.parse("{}\n".format(src), '<string>', 'exec')
analyze = Analyzer(colsDict, None)
# visit the ast to determine which columns and references are actually used
analyze.visit(node)
global fixed
# Doing plain string manipulation, we need to add indent to the user-supplied source.
expression = ''.join(' {}'.format(line) for line in src.splitlines(True))
all_args = []
used_cols = [name for name, type in cols if name in analyze.used_cols]
all_args.extend(used_cols)
fixed = (
"def __tmp__({}):\n"
" return {}\n").format(",".join(all_args), expression)
# exec the function-defining code above, saving it into `loc`
if (loc == None):
loc = {}
exec (fixed, globs, loc)
compiled = loc['__tmp__']
# apply @vectorize to the result
types_for_used_columns = [type for name, type in cols if name in analyze.used_cols]
is_vectorize_friendly = not any(type == "unicode_type" or "[" in type for type in types_for_used_columns)
expression = ''.join(' {}'.format(line) for line in src.splitlines(True))
used_cols_name_to_type = OrderedDict([(name, type) for name, type in cols if name in analyze.used_cols])
return_type = numba.jit(["(" + ",".join(types_for_used_columns) + ",)"if len(
analyze.used_cols) > 0 else ()], nopython=True)(compiled).nopython_signatures[0].return_type
if isinstance(return_type, numba.types.scalars.Literal):
return {'result': return_type.literal_value, 'columns': used_cols,
'return_type': return_type.literal_type.name}
if isinstance(return_type, numba.types.IterableType):
is_vectorize_friendly = False
if (is_vectorize_friendly):
vectorized_function = vectorize(["(" + ",".join(types_for_used_columns) + ",)" if len(
analyze.used_cols) > 0 else ()])(compiled)
# debug: print("Signature ")
# debug: print(list(result._dispatcher.overloads))
return_type = [sig.return_type for sig in vectorized_function._dispatcher.overloads.keys()][0]
if isinstance(return_type, numba.types.scalars.Literal):
return {'result': return_type.literal_value, 'columns': used_cols,
'return_type': return_type.literal_type.name}
if (len(analyze.used_cols) == 0):
fixed = (
"def __tmp__(__count__):\n"
" __result__ = numpy.empty(__count__,dtype=numpy.dtype('{}'))\n"
" for __i__ in range(__count__):\n"
" __result__[__i__] = {}\n"
" return __result__\n"
).format(return_type, src)
globs["numpy"] = numpy
exec (fixed, globs, loc)
compiled = loc['__tmp__']
vectorized_function = numba.jit(
numba.typing.signature(nb.types.Array(return_type, 1, 'C'), (numba.types.int32)))(compiled)
else:
argument_types, arguments, unpack = vectorized_arguments(used_cols_name_to_type)
is_primitive = return_type.name in numpy_primitives
numpy_return_type_name = return_type.name
if numpy_return_type_name == "bool":
numpy_return_type_name = "bool_"
element_add = "__dest__[__iterator_i__] = {}" if is_primitive else "__dest__.append({})"
result_init = "__dest__ = {}\n".format("numpy.empty(__arg_count__,numpy.{})".format(numpy_return_type_name) if is_primitive else "[]")
manually_vectorize_code = "def __tmp__(__arg_count__,{}):\n" \
" {}\n" \
" for __iterator_i__ in range(0,__arg_count__):\n" \
" {}\n" \
" {}\n" \
" return __dest__".format(",".join(arguments), result_init, "\n ".join(unpack), element_add.format(expression))
#debug print(manually_vectorize_code)
exec (manually_vectorize_code, globs, loc)
manually_vectorize = loc['__tmp__']
# debug print(argument_types)
argsSig, rt = numba.sigutils.normalize_signature("(int32," + ",".join(argument_types) + ",)" if len(
argument_types) > 0 else "(int32,)")
for i in range(0, len(argsSig)):
if isinstance(argsSig[i], numba.types.Array):
argsSig[i].mutable = False
vectorized_function = numba.jit([argsSig], nopython=True)(
manually_vectorize)
# wrap the returned function, so any expected errors come with a meaningful error message.
def invoker(args):
try:
# debug: print("args:")
# debug: print(len(args))
# debug: print(list(args))
args = list(args)
# debug: print("done args")
#print(vectorized_function.nopython_signatures[0])
return vectorized_function.__call__(*args[1:]) if is_vectorize_friendly and len(args) > 1 else vectorized_function.__call__(*args)
except TypeError:
traceback.print_exc()
# determine the types of referenced variables to help user identify their problem.
types = '\n'.join("ndarray[{}]".format(ar.dtype.name) if
hasattr(ar, 'dtype') and isinstance(ar, ndarray)
else '{}({})'.format(ar.dtype.name, ar) if
hasattr(ar, 'dtype')
else "<foo>"
for ar in args)
raise TypeError(("Vectorized functions may only reference primitives and other vectorized functions.\n"
"Argument types:\n"
"{}\n"
"Source:\n"
"{}").format(types, fixed))
return {'fun': invoker, 'columns': used_cols, 'return_type': return_type.name}
| true | true |
f7332ae2eed05985ae6cecf9073d692e38ef04fa | 1,342 | py | Python | vmaig_blog/uwsgi-2.0.14/tests/queue.py | StanYaha/Blog | 3cb38918e14ebe6ce2e2952ef272de116849910d | [
"BSD-3-Clause"
] | 1 | 2018-11-24T16:10:49.000Z | 2018-11-24T16:10:49.000Z | vmaig_blog/uwsgi-2.0.14/tests/queue.py | StanYaha/Blog | 3cb38918e14ebe6ce2e2952ef272de116849910d | [
"BSD-3-Clause"
] | null | null | null | vmaig_blog/uwsgi-2.0.14/tests/queue.py | StanYaha/Blog | 3cb38918e14ebe6ce2e2952ef272de116849910d | [
"BSD-3-Clause"
] | null | null | null | # uwsgi --queue 10 --queue-store test.queue --master --module tests.queue --socket :3031
import uwsgi
import os
from flask import Flask,render_template,request,redirect,flash
app = Flask(__name__)
app.debug = True
app.secret_key = os.urandom(24)
@app.route('/')
def index():
return render_template('queue.html', uwsgi=uwsgi)
@app.route('/push', methods=['POST'])
def push_item():
if uwsgi.queue_push(request.form['body']):
flash('item enqueued')
return redirect('/')
else:
flash('unable to enqueue item')
return render_template('queue.html', uwsgi=uwsgi)
@app.route('/get', methods=['POST'])
def get_item():
flash( "slot %s value = %s" % (request.form['slot'], uwsgi.queue_get( int(request.form['slot']) )))
return redirect('/')
@app.route('/pop', methods=['POST'])
def pop_item():
flash( "popped value = %s" % uwsgi.queue_pop() )
return redirect('/')
@app.route('/pull', methods=['POST'])
def pull_item():
flash( "pulled value = %s" % uwsgi.queue_pull() )
return redirect('/')
@app.route('/set', methods=['POST'])
def set_item():
if uwsgi.queue_set(int(request.form['pos']), request.form['body']):
flash('item set')
return redirect('/')
else:
flash('unable to set item')
return render_template('queue.html', uwsgi=uwsgi)
| 27.387755 | 103 | 0.634128 |
import uwsgi
import os
from flask import Flask,render_template,request,redirect,flash
app = Flask(__name__)
app.debug = True
app.secret_key = os.urandom(24)
@app.route('/')
def index():
return render_template('queue.html', uwsgi=uwsgi)
@app.route('/push', methods=['POST'])
def push_item():
if uwsgi.queue_push(request.form['body']):
flash('item enqueued')
return redirect('/')
else:
flash('unable to enqueue item')
return render_template('queue.html', uwsgi=uwsgi)
@app.route('/get', methods=['POST'])
def get_item():
flash( "slot %s value = %s" % (request.form['slot'], uwsgi.queue_get( int(request.form['slot']) )))
return redirect('/')
@app.route('/pop', methods=['POST'])
def pop_item():
flash( "popped value = %s" % uwsgi.queue_pop() )
return redirect('/')
@app.route('/pull', methods=['POST'])
def pull_item():
flash( "pulled value = %s" % uwsgi.queue_pull() )
return redirect('/')
@app.route('/set', methods=['POST'])
def set_item():
if uwsgi.queue_set(int(request.form['pos']), request.form['body']):
flash('item set')
return redirect('/')
else:
flash('unable to set item')
return render_template('queue.html', uwsgi=uwsgi)
| true | true |
f7332b120a80430b2740b372b62f2bf1873a53c5 | 8,162 | py | Python | pasta/treeholder.py | koditaraszka/pasta | 2f14d5ae5b256ce1cd6bc49e44cc84a7b25be7f9 | [
"Python-2.0",
"BSD-Source-Code",
"OLDAP-2.7"
] | 1 | 2018-06-22T03:27:55.000Z | 2018-06-22T03:27:55.000Z | pasta/treeholder.py | kodicollins/pasta | 2f14d5ae5b256ce1cd6bc49e44cc84a7b25be7f9 | [
"Python-2.0",
"BSD-Source-Code",
"OLDAP-2.7"
] | null | null | null | pasta/treeholder.py | kodicollins/pasta | 2f14d5ae5b256ce1cd6bc49e44cc84a7b25be7f9 | [
"Python-2.0",
"BSD-Source-Code",
"OLDAP-2.7"
] | null | null | null | #!/usr/bin/env python
# This file is part of PASTA and is forked from SATe
# PASTA like SATe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Jiaye Yu and Mark Holder, University of Kansas
import dendropy
try:
from cStringIO import StringIO
except:
from io import StringIO
from pasta.tree import PhylogeneticTree
from pasta.errors import TaxaLabelsMismatchError
from pasta import get_logger
from dendropy import Tree, TreeList
_LOG = get_logger(__name__)
from dendropy.datamodel.treemodel import _convert_node_to_root_polytomy as convert_node_to_root_polytomy
# Provide a random number generator
import random
POLYTOMY_RNG = random.Random()
def resolve_polytomies(tree, update_splits=False, rng=None):
"""
Copied from more recent DendroPy than the version that we bundle...
Arbitrarily resolve polytomies using 0-length splits.
If `rng` is an object with a sample() method then the polytomy will be
resolved by sequentially adding (generating all tree topologies
equiprobably
rng.sample() should behave like random.sample()
If `rng` is not passed in, then polytomy is broken deterministically by
repeatedly joining pairs of children.
"""
_LOG.debug("start resolving polytomies")
from dendropy import Node
polytomies = []
if rng is None:
rng = POLYTOMY_RNG
for node in tree.postorder_node_iter():
if len(node.child_nodes()) > 2:
polytomies.append(node)
_LOG.debug("Found %d polytomies" %len(polytomies))
for node in polytomies:
children = node.child_nodes()
nc = len(children)
if nc > 2:
if nc == 3 and node.parent_node is None:
continue
to_attach = children[2:]
for child in to_attach:
node.remove_child(child)
attachment_points = children[:2] + [node]
while len(to_attach) > 0:
next_child = to_attach.pop()
next_sib = rng.sample(attachment_points, 1)[0]
next_attachment = Node()
next_attachment.edge.length = 0.0
p = next_sib.parent_node
if p is None:
c_list = list(next_sib.child_nodes())
next_sib.add_child(next_attachment)
next_sib.add_child(next_child)
for child in c_list:
next_sib.remove_child(child)
next_attachment.add_child(child)
else:
p.add_child(next_attachment)
p.remove_child(next_sib)
next_attachment.add_child(next_sib)
next_attachment.add_child(next_child)
attachment_points.append(next_attachment)
_LOG.debug("polytomies resolution - updating splits")
if update_splits:
tree.update_splits()
_LOG.debug("polytomies resolved.")
def check_taxon_labels(taxon_namespace, dataset):
ts = set([i for tset in dataset.taxon_namespaces for i in tset.labels()])
ds = set(taxon_namespace.labels())
extra = ds - ts
missing = ts - ds
return extra, missing
def read_newick_with_translate(stream,taxon_namespace):
"""
Instantiates and returns a `DataSet` object based on the
NEWICK-formatted contents read from the file-like object source
`stream`.
"""
ts = [t for t in tree_source_iter(stream=stream,
translate_dict=translate_dict,
allow_repeated_use=True)]
return ts[0]
def read_trees_into_dataset(dataset, tree_stream, starting_tree=False, preserve_underscores=True):
if starting_tree:
try:
dataset.read_from_stream(tree_stream,
schema='NEWICK', taxon_namespace=dataset.taxon_namespaces[0],preserve_underscores=preserve_underscores)
except KeyError as e:
m = str(e)
m = m[1:m.find("TaxonSet")] + "sequences but present in tree"
raise TaxaLabelsMismatchError(
'There are taxon label mismatches between the starting tree '
'and sequences...\n'
'%s\n' %m)
st = dataset.tree_lists[-1][0]
if len(st.leaf_nodes()) != len(dataset.taxon_namespaces[0]):
missing = [t.label for t in set(dataset.taxon_namespaces[0]) - set((n.taxon for n in st.leaf_nodes()))]
raise TaxaLabelsMismatchError(
'There are taxon label mismatches between the starting tree '
'and sequences...\n'
'In sequences, not tree: {0}\n'.format(','.join(missing)) )
_LOG.debug("reading tree finished")
elif dataset.taxon_namespaces:
dataset.read_from_stream(tree_stream, schema='NEWICK', taxon_namespace=dataset.taxon_namespaces[0])
else:
dataset.read_from_stream(tree_stream, schema='NEWICK')
return dataset.tree_lists[-1]
def read_and_encode_splits(dataset, tree_stream, starting_tree=False):
"""Reads the file-like object `tree_stream` as a source of trees for the
the taxa found in dataset. and then encodes the splits of the nodes of the trees.
This is a convenience function that bridges between dendropy 2 and 3 API's
"""
_LOG.debug("NOT covered in tests")
tree_list = read_trees_into_dataset(dataset, tree_stream,
starting_tree=starting_tree)
assert len(tree_list) == 1
#from dendropy.legacy.treesplit import delete_outdegree_one
#delete_outdegree_one(tree_list[0])
tree_list[0].suppress_unifurcations()
convert_node_to_root_polytomy(tree_list[0].seed_node)
return tree_list
def generate_tree_with_splits_from_str(tree_str, dataset, force_fully_resolved=False):
'''Uses `tree_str` and `dataset` to create a PhylogeneticTree object
and calls `calc_splits` on the object before returning it.
'''
_LOG.debug("start generating tree from string %s" %tree_str[0:200])
tree_stream = StringIO(tree_str)
tree_list = read_and_encode_splits(dataset, tree_stream)
t = tree_list[0]
_LOG.debug("tree generated from string %s" %str(t)[0:200])
#_LOG.debug("tree rooting %s" %str(t.is_rooted))
return generate_tree_with_splits_from_tree(t, force_fully_resolved)
def generate_tree_with_splits_from_tree(t, force_fully_resolved=False):
if force_fully_resolved:
resolve_polytomies(t, update_splits=False)
t = PhylogeneticTree(t)
_LOG.debug("calculating splits")
t.calc_splits()
_LOG.debug("end generating tree from string")
return t
class TreeHolder(object):
'''Uses the tree attribute to provide a `tree_str` property, but also
enables setting of the `tree_str` to update the tree.
'''
def __init__(self, dataset, force_fully_resolved=False):
self.dataset = dataset
self.tree = None
self._force_fully_resolved=force_fully_resolved
def get_tree_str(self):
return self.tree.compose_newick() if self.tree else None
def set_tree_str(self, tree_str):
self.tree = generate_tree_with_splits_from_str(tree_str,
self.dataset,
self._force_fully_resolved)
tree_str = property(get_tree_str, set_tree_str)
def get_tree_copy(self):
'''Returns a deep copy of the tree instance.'''
return generate_tree_with_splits_from_str(self.tree_str, self.dataset)
| 40.405941 | 119 | 0.659397 |
import dendropy
try:
from cStringIO import StringIO
except:
from io import StringIO
from pasta.tree import PhylogeneticTree
from pasta.errors import TaxaLabelsMismatchError
from pasta import get_logger
from dendropy import Tree, TreeList
_LOG = get_logger(__name__)
from dendropy.datamodel.treemodel import _convert_node_to_root_polytomy as convert_node_to_root_polytomy
import random
POLYTOMY_RNG = random.Random()
def resolve_polytomies(tree, update_splits=False, rng=None):
_LOG.debug("start resolving polytomies")
from dendropy import Node
polytomies = []
if rng is None:
rng = POLYTOMY_RNG
for node in tree.postorder_node_iter():
if len(node.child_nodes()) > 2:
polytomies.append(node)
_LOG.debug("Found %d polytomies" %len(polytomies))
for node in polytomies:
children = node.child_nodes()
nc = len(children)
if nc > 2:
if nc == 3 and node.parent_node is None:
continue
to_attach = children[2:]
for child in to_attach:
node.remove_child(child)
attachment_points = children[:2] + [node]
while len(to_attach) > 0:
next_child = to_attach.pop()
next_sib = rng.sample(attachment_points, 1)[0]
next_attachment = Node()
next_attachment.edge.length = 0.0
p = next_sib.parent_node
if p is None:
c_list = list(next_sib.child_nodes())
next_sib.add_child(next_attachment)
next_sib.add_child(next_child)
for child in c_list:
next_sib.remove_child(child)
next_attachment.add_child(child)
else:
p.add_child(next_attachment)
p.remove_child(next_sib)
next_attachment.add_child(next_sib)
next_attachment.add_child(next_child)
attachment_points.append(next_attachment)
_LOG.debug("polytomies resolution - updating splits")
if update_splits:
tree.update_splits()
_LOG.debug("polytomies resolved.")
def check_taxon_labels(taxon_namespace, dataset):
ts = set([i for tset in dataset.taxon_namespaces for i in tset.labels()])
ds = set(taxon_namespace.labels())
extra = ds - ts
missing = ts - ds
return extra, missing
def read_newick_with_translate(stream,taxon_namespace):
ts = [t for t in tree_source_iter(stream=stream,
translate_dict=translate_dict,
allow_repeated_use=True)]
return ts[0]
def read_trees_into_dataset(dataset, tree_stream, starting_tree=False, preserve_underscores=True):
if starting_tree:
try:
dataset.read_from_stream(tree_stream,
schema='NEWICK', taxon_namespace=dataset.taxon_namespaces[0],preserve_underscores=preserve_underscores)
except KeyError as e:
m = str(e)
m = m[1:m.find("TaxonSet")] + "sequences but present in tree"
raise TaxaLabelsMismatchError(
'There are taxon label mismatches between the starting tree '
'and sequences...\n'
'%s\n' %m)
st = dataset.tree_lists[-1][0]
if len(st.leaf_nodes()) != len(dataset.taxon_namespaces[0]):
missing = [t.label for t in set(dataset.taxon_namespaces[0]) - set((n.taxon for n in st.leaf_nodes()))]
raise TaxaLabelsMismatchError(
'There are taxon label mismatches between the starting tree '
'and sequences...\n'
'In sequences, not tree: {0}\n'.format(','.join(missing)) )
_LOG.debug("reading tree finished")
elif dataset.taxon_namespaces:
dataset.read_from_stream(tree_stream, schema='NEWICK', taxon_namespace=dataset.taxon_namespaces[0])
else:
dataset.read_from_stream(tree_stream, schema='NEWICK')
return dataset.tree_lists[-1]
def read_and_encode_splits(dataset, tree_stream, starting_tree=False):
_LOG.debug("NOT covered in tests")
tree_list = read_trees_into_dataset(dataset, tree_stream,
starting_tree=starting_tree)
assert len(tree_list) == 1
tree_list[0].suppress_unifurcations()
convert_node_to_root_polytomy(tree_list[0].seed_node)
return tree_list
def generate_tree_with_splits_from_str(tree_str, dataset, force_fully_resolved=False):
_LOG.debug("start generating tree from string %s" %tree_str[0:200])
tree_stream = StringIO(tree_str)
tree_list = read_and_encode_splits(dataset, tree_stream)
t = tree_list[0]
_LOG.debug("tree generated from string %s" %str(t)[0:200])
return generate_tree_with_splits_from_tree(t, force_fully_resolved)
def generate_tree_with_splits_from_tree(t, force_fully_resolved=False):
if force_fully_resolved:
resolve_polytomies(t, update_splits=False)
t = PhylogeneticTree(t)
_LOG.debug("calculating splits")
t.calc_splits()
_LOG.debug("end generating tree from string")
return t
class TreeHolder(object):
def __init__(self, dataset, force_fully_resolved=False):
self.dataset = dataset
self.tree = None
self._force_fully_resolved=force_fully_resolved
def get_tree_str(self):
return self.tree.compose_newick() if self.tree else None
def set_tree_str(self, tree_str):
self.tree = generate_tree_with_splits_from_str(tree_str,
self.dataset,
self._force_fully_resolved)
tree_str = property(get_tree_str, set_tree_str)
def get_tree_copy(self):
return generate_tree_with_splits_from_str(self.tree_str, self.dataset)
| true | true |
f7332bc723643ecab2695e061bcbc427a2a2cf7d | 7,532 | py | Python | TA-linode/bin/ta_linode/aob_py3/solnlib/splunkenv.py | jriddle-linode/splunk-addon-linode | 5954acd12ef88ab991365ef51072db68aed46aa1 | [
"Apache-2.0"
] | 11 | 2020-01-23T11:32:26.000Z | 2021-09-23T09:24:02.000Z | TA-linode/bin/ta_linode/aob_py3/solnlib/splunkenv.py | jriddle-linode/splunk-addon-linode | 5954acd12ef88ab991365ef51072db68aed46aa1 | [
"Apache-2.0"
] | 26 | 2019-07-15T02:38:22.000Z | 2021-12-01T04:14:17.000Z | TA-linode/bin/ta_linode/aob_py3/solnlib/splunkenv.py | jriddle-linode/splunk-addon-linode | 5954acd12ef88ab991365ef51072db68aed46aa1 | [
"Apache-2.0"
] | 6 | 2019-07-14T17:44:06.000Z | 2020-11-17T17:33:23.000Z | # Copyright 2016 Splunk, Inc.
# SPDX-FileCopyrightText: 2020 2020
#
# SPDX-License-Identifier: Apache-2.0
"""
Splunk platform related utilities.
"""
import os
import os.path as op
import subprocess
import socket
try:
from ConfigParser import ConfigParser
CONF_PARSER_KWARGS = {}
except ImportError:
from configparser import ConfigParser
CONF_PARSER_KWARGS = {"strict": False}
from io import StringIO
from . import utils
__all__ = [
"make_splunkhome_path",
"get_splunk_host_info",
"get_splunk_bin",
"get_splunkd_access_info",
"get_splunkd_uri",
"get_conf_key_value",
"get_conf_stanza",
"get_conf_stanzas",
]
ETC_LEAF = "etc"
# See validateSearchHeadPooling() in src/libbundle/ConfSettings.cpp
on_shared_storage = [
os.path.join(ETC_LEAF, "apps"),
os.path.join(ETC_LEAF, "users"),
os.path.join("var", "run", "splunk", "dispatch"),
os.path.join("var", "run", "splunk", "srtemp"),
os.path.join("var", "run", "splunk", "rss"),
os.path.join("var", "run", "splunk", "scheduler"),
os.path.join("var", "run", "splunk", "lookup_tmp"),
]
def _splunk_home():
return os.path.normpath(os.environ["SPLUNK_HOME"])
def _splunk_etc():
try:
result = os.environ["SPLUNK_ETC"]
except KeyError:
result = op.join(_splunk_home(), ETC_LEAF)
return os.path.normpath(result)
def _get_shared_storage():
"""Get splunk shared storage name.
:returns: Splunk shared storage name.
:rtype: ``string``
"""
try:
state = get_conf_key_value("server", "pooling", "state")
storage = get_conf_key_value("server", "pooling", "storage")
except KeyError:
state = "disabled"
storage = None
if state == "enabled" and storage:
return storage
return None
# Verify path prefix and return true if both paths have drives
def _verify_path_prefix(path, start):
path_drive = os.path.splitdrive(path)[0]
start_drive = os.path.splitdrive(start)[0]
return len(path_drive) == len(start_drive)
def make_splunkhome_path(parts):
"""Construct absolute path by $SPLUNK_HOME and `parts`.
Concatenate $SPLUNK_HOME and `parts` to an absolute path.
For example, `parts` is ['etc', 'apps', 'Splunk_TA_test'],
the return path will be $SPLUNK_HOME/etc/apps/Splunk_TA_test.
Note: this function assumed SPLUNK_HOME is in environment varialbes.
:param parts: Path parts.
:type parts: ``list, tuple``
:returns: Absolute path.
:rtype: ``string``
:raises ValueError: Escape from intended parent directories.
"""
relpath = os.path.normpath(os.path.join(*parts))
basepath = None
shared_storage = _get_shared_storage()
if shared_storage:
for candidate in on_shared_storage:
# SPL-100508 On windows if the path is missing the drive letter,
# construct fullpath manually and call relpath
if os.name == "nt" and not _verify_path_prefix(relpath, candidate):
break
if os.path.relpath(relpath, candidate)[0:2] != "..":
basepath = shared_storage
break
if basepath is None:
etc_with_trailing_sep = os.path.join(ETC_LEAF, "")
if relpath == ETC_LEAF or relpath.startswith(etc_with_trailing_sep):
# Redirect $SPLUNK_HOME/etc to $SPLUNK_ETC.
basepath = _splunk_etc()
# Remove leading etc (and path separator, if present). Note: when
# emitting $SPLUNK_ETC exactly, with no additional path parts, we
# set <relpath> to the empty string.
relpath = relpath[4:]
else:
basepath = _splunk_home()
fullpath = os.path.normpath(os.path.join(basepath, relpath))
# Check that we haven't escaped from intended parent directories.
if os.path.relpath(fullpath, basepath)[0:2] == "..":
raise ValueError(
'Illegal escape from parent directory "%s": %s' % (basepath, fullpath)
)
return fullpath
def get_splunk_host_info():
"""Get splunk host info.
:returns: Tuple of (server_name, host_name).
:rtype: ``tuple``
"""
server_name = get_conf_key_value("server", "general", "serverName")
host_name = socket.gethostname()
return (server_name, host_name)
def get_splunk_bin():
"""Get absolute path of splunk CLI.
:returns: absolute path of splunk CLI
:rtype: ``string``
"""
if os.name == "nt":
splunk_bin = "splunk.exe"
else:
splunk_bin = "splunk"
return make_splunkhome_path(("bin", splunk_bin))
def get_splunkd_access_info():
"""Get splunkd server access info.
:returns: Tuple of (scheme, host, port).
:rtype: ``tuple``
"""
if utils.is_true(get_conf_key_value("server", "sslConfig", "enableSplunkdSSL")):
scheme = "https"
else:
scheme = "http"
host_port = get_conf_key_value("web", "settings", "mgmtHostPort")
host_port = host_port.strip()
host = host_port.split(":")[0]
port = int(host_port.split(":")[1])
if "SPLUNK_BINDIP" in os.environ:
bindip = os.environ["SPLUNK_BINDIP"]
port_idx = bindip.rfind(":")
host = bindip[:port_idx] if port_idx > 0 else bindip
return (scheme, host, port)
def get_splunkd_uri():
"""Get splunkd uri.
:returns: Splunkd uri.
:rtype: ``string``
"""
if os.environ.get("SPLUNKD_URI"):
return os.environ["SPLUNKD_URI"]
scheme, host, port = get_splunkd_access_info()
return "{scheme}://{host}:{port}".format(scheme=scheme, host=host, port=port)
def get_conf_key_value(conf_name, stanza, key):
"""Get value of `key` of `stanza` in `conf_name`.
:param conf_name: Config file.
:type conf_name: ``string``
:param stanza: Stanza name.
:type stanza: ``string``
:param key: Key name.
:type key: ``string``
:returns: Config value.
:rtype: ``(string, list, dict)``
:raises KeyError: If `stanza` or `key` doesn't exist.
"""
stanzas = get_conf_stanzas(conf_name)
return stanzas[stanza][key]
def get_conf_stanza(conf_name, stanza):
"""Get `stanza` in `conf_name`.
:param conf_name: Config file.
:type conf_name: ``string``
:param stanza: Stanza name.
:type stanza: ``string``
:returns: Config stanza.
:rtype: ``dict``
:raises KeyError: If stanza doesn't exist.
"""
stanzas = get_conf_stanzas(conf_name)
return stanzas[stanza]
def get_conf_stanzas(conf_name):
"""Get stanzas of `conf_name`
:param conf_name: Config file.
:type conf_name: ``string``
:returns: Config stanzas.
:rtype: ``dict``
Usage::
>>> stanzas = get_conf_stanzas('server')
>>> return: {'serverName': 'testServer', 'sessionTimeout': '1h', ...}
"""
if conf_name.endswith(".conf"):
conf_name = conf_name[:-5]
# TODO: dynamically caculate SPLUNK_HOME
btool_cli = [
op.join(os.environ["SPLUNK_HOME"], "bin", "splunk"),
"cmd",
"btool",
conf_name,
"list",
]
p = subprocess.Popen(btool_cli, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, _ = p.communicate()
if isinstance(out, bytes):
out = out.decode()
parser = ConfigParser(**CONF_PARSER_KWARGS)
parser.optionxform = str
parser.readfp(StringIO(out))
out = {}
for section in parser.sections():
out[section] = {item[0]: item[1] for item in parser.items(section, raw=True)}
return out
| 26.42807 | 85 | 0.635024 |
import os
import os.path as op
import subprocess
import socket
try:
from ConfigParser import ConfigParser
CONF_PARSER_KWARGS = {}
except ImportError:
from configparser import ConfigParser
CONF_PARSER_KWARGS = {"strict": False}
from io import StringIO
from . import utils
__all__ = [
"make_splunkhome_path",
"get_splunk_host_info",
"get_splunk_bin",
"get_splunkd_access_info",
"get_splunkd_uri",
"get_conf_key_value",
"get_conf_stanza",
"get_conf_stanzas",
]
ETC_LEAF = "etc"
on_shared_storage = [
os.path.join(ETC_LEAF, "apps"),
os.path.join(ETC_LEAF, "users"),
os.path.join("var", "run", "splunk", "dispatch"),
os.path.join("var", "run", "splunk", "srtemp"),
os.path.join("var", "run", "splunk", "rss"),
os.path.join("var", "run", "splunk", "scheduler"),
os.path.join("var", "run", "splunk", "lookup_tmp"),
]
def _splunk_home():
return os.path.normpath(os.environ["SPLUNK_HOME"])
def _splunk_etc():
try:
result = os.environ["SPLUNK_ETC"]
except KeyError:
result = op.join(_splunk_home(), ETC_LEAF)
return os.path.normpath(result)
def _get_shared_storage():
try:
state = get_conf_key_value("server", "pooling", "state")
storage = get_conf_key_value("server", "pooling", "storage")
except KeyError:
state = "disabled"
storage = None
if state == "enabled" and storage:
return storage
return None
def _verify_path_prefix(path, start):
path_drive = os.path.splitdrive(path)[0]
start_drive = os.path.splitdrive(start)[0]
return len(path_drive) == len(start_drive)
def make_splunkhome_path(parts):
relpath = os.path.normpath(os.path.join(*parts))
basepath = None
shared_storage = _get_shared_storage()
if shared_storage:
for candidate in on_shared_storage:
if os.name == "nt" and not _verify_path_prefix(relpath, candidate):
break
if os.path.relpath(relpath, candidate)[0:2] != "..":
basepath = shared_storage
break
if basepath is None:
etc_with_trailing_sep = os.path.join(ETC_LEAF, "")
if relpath == ETC_LEAF or relpath.startswith(etc_with_trailing_sep):
basepath = _splunk_etc()
relpath = relpath[4:]
else:
basepath = _splunk_home()
fullpath = os.path.normpath(os.path.join(basepath, relpath))
if os.path.relpath(fullpath, basepath)[0:2] == "..":
raise ValueError(
'Illegal escape from parent directory "%s": %s' % (basepath, fullpath)
)
return fullpath
def get_splunk_host_info():
server_name = get_conf_key_value("server", "general", "serverName")
host_name = socket.gethostname()
return (server_name, host_name)
def get_splunk_bin():
if os.name == "nt":
splunk_bin = "splunk.exe"
else:
splunk_bin = "splunk"
return make_splunkhome_path(("bin", splunk_bin))
def get_splunkd_access_info():
if utils.is_true(get_conf_key_value("server", "sslConfig", "enableSplunkdSSL")):
scheme = "https"
else:
scheme = "http"
host_port = get_conf_key_value("web", "settings", "mgmtHostPort")
host_port = host_port.strip()
host = host_port.split(":")[0]
port = int(host_port.split(":")[1])
if "SPLUNK_BINDIP" in os.environ:
bindip = os.environ["SPLUNK_BINDIP"]
port_idx = bindip.rfind(":")
host = bindip[:port_idx] if port_idx > 0 else bindip
return (scheme, host, port)
def get_splunkd_uri():
if os.environ.get("SPLUNKD_URI"):
return os.environ["SPLUNKD_URI"]
scheme, host, port = get_splunkd_access_info()
return "{scheme}://{host}:{port}".format(scheme=scheme, host=host, port=port)
def get_conf_key_value(conf_name, stanza, key):
stanzas = get_conf_stanzas(conf_name)
return stanzas[stanza][key]
def get_conf_stanza(conf_name, stanza):
stanzas = get_conf_stanzas(conf_name)
return stanzas[stanza]
def get_conf_stanzas(conf_name):
if conf_name.endswith(".conf"):
conf_name = conf_name[:-5]
# TODO: dynamically caculate SPLUNK_HOME
btool_cli = [
op.join(os.environ["SPLUNK_HOME"], "bin", "splunk"),
"cmd",
"btool",
conf_name,
"list",
]
p = subprocess.Popen(btool_cli, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, _ = p.communicate()
if isinstance(out, bytes):
out = out.decode()
parser = ConfigParser(**CONF_PARSER_KWARGS)
parser.optionxform = str
parser.readfp(StringIO(out))
out = {}
for section in parser.sections():
out[section] = {item[0]: item[1] for item in parser.items(section, raw=True)}
return out
| true | true |
f7332c7bdb8b56b19acfa24147f6f7e92a2f35cf | 1,187 | py | Python | django/duck/media.py | lastcoolnameleft/duckiehunt | 4a00c12aac934fc9492ebd8563a36b4e76eab671 | [
"MIT"
] | 1 | 2018-04-16T04:03:13.000Z | 2018-04-16T04:03:13.000Z | django/duck/media.py | lastcoolnameleft/duckiehunt | 4a00c12aac934fc9492ebd8563a36b4e76eab671 | [
"MIT"
] | 38 | 2017-07-31T01:02:10.000Z | 2022-02-10T07:45:55.000Z | django/duck/media.py | lastcoolnameleft/duckiehunt | 4a00c12aac934fc9492ebd8563a36b4e76eab671 | [
"MIT"
] | null | null | null | """ Helper functions for uploading image to Flickr """
import flickr_api
from django.conf import settings
from django.core.files.storage import FileSystemStorage
def handle_uploaded_file(uploaded_file, duck_id, duck_name, comments):
""" Upload duck location image to flickr """
title = 'Duck #' + str(duck_id) + ' (' + duck_name + ')'
tags = "duckiehunt"
file_path = write_upload_to_file(uploaded_file, settings.UPLOAD_PATH)
photo_info = upload_to_flickr(file_path, title, comments, settings.FLICKR_PHOTO_IS_PUBLIC, tags)
return photo_info
def write_upload_to_file(photo_file, upload_path):
""" Save bufferred file in memory to disk """
fss = FileSystemStorage()
filename = fss.save(upload_path + photo_file.name, photo_file)
uploaded_file_url = fss.url(filename)
return uploaded_file_url
def upload_to_flickr(photo_file, title, comments, is_public, tags):
""" Upload file to flickr """
photo = flickr_api.upload(photo_file=photo_file, title=title, is_public=is_public,
tags=tags, description=comments)
photo_info = photo.getInfo()
photo_info['sizes'] = photo.getSizes()
return photo_info
| 40.931034 | 100 | 0.723673 | import flickr_api
from django.conf import settings
from django.core.files.storage import FileSystemStorage
def handle_uploaded_file(uploaded_file, duck_id, duck_name, comments):
title = 'Duck #' + str(duck_id) + ' (' + duck_name + ')'
tags = "duckiehunt"
file_path = write_upload_to_file(uploaded_file, settings.UPLOAD_PATH)
photo_info = upload_to_flickr(file_path, title, comments, settings.FLICKR_PHOTO_IS_PUBLIC, tags)
return photo_info
def write_upload_to_file(photo_file, upload_path):
fss = FileSystemStorage()
filename = fss.save(upload_path + photo_file.name, photo_file)
uploaded_file_url = fss.url(filename)
return uploaded_file_url
def upload_to_flickr(photo_file, title, comments, is_public, tags):
photo = flickr_api.upload(photo_file=photo_file, title=title, is_public=is_public,
tags=tags, description=comments)
photo_info = photo.getInfo()
photo_info['sizes'] = photo.getSizes()
return photo_info
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.