hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7fc4340d2b6a8628244829406520e082b98cff1 | 3,024 | py | Python | polaris2/geomvis/R2toR.py | talonchandler/polaris2 | 2ec215edf7f63967af109661d40bc55b10d836da | [
"MIT"
] | null | null | null | polaris2/geomvis/R2toR.py | talonchandler/polaris2 | 2ec215edf7f63967af109661d40bc55b10d836da | [
"MIT"
] | null | null | null | polaris2/geomvis/R2toR.py | talonchandler/polaris2 | 2ec215edf7f63967af109661d40bc55b10d836da | [
"MIT"
] | null | null | null | import tifffile
import numpy as np
from polaris2.geomvis import utilmpl
import logging
log = logging.getLogger('log')
class xy:
def __init__(self, data, px_dims=[1,1], cmap='gray', title='',
fov=[0,1], plotfov=[0,1], vmin=None, vmax=None):
self.data = data
self.px_dims = px_dims
self.cmap = cmap
self.xlabel = '{:.0f}'.format(plotfov[1] - plotfov[0]) + ' $\mu$m'
self.title = title
self.fov = fov
self.plotfov = plotfov
self.vmin = vmin
self.vmax = vmax
def to_tiff(self, filename):
utilmpl.mkdir(filename)
with tifffile.TiffWriter(filename, imagej=True) as tif:
tif.save(self.data.astype(np.float32),
resolution=(1/self.px_dims[0], 1/self.px_dims[1]),
metadata={'unit':'um'}) # TZCYXS
def plot(self, f, fc, ss):
ax = utilmpl.plot_template(f, fc, shape=self.data.shape, xlabel=self.xlabel,
title=self.title)
# Image
if self.cmap is 'gray':
vmax = np.max(self.data)
vmin = 0
elif self.cmap is 'bwr':
vmax = np.max(np.abs(self.data))
vmin = -vmax
if self.vmax is not None:
vmax = self.vmax
vmin = self.vmin
ax[0].set_xlim(self.plotfov)
ax[0].set_ylim(self.plotfov)
ax[0].imshow(self.data.T, vmin=vmin, vmax=vmax, cmap=self.cmap,
extent=2*self.fov,
aspect='auto', interpolation='nearest', origin='lower')
# Colorbar
x = np.linspace(vmin, vmax, 100)
xx, yy = np.meshgrid(x, x)
ax[1].imshow(yy, vmin=vmin, vmax=vmax, cmap=self.cmap,
extent=[0,1,vmin,vmax], aspect='auto',
interpolation='bicubic', origin='lower')
if self.cmap is 'gray':
ax[1].annotate('{:.2g}'.format(np.max(vmax)), xy=(0,0), xytext=(0, 1.05), textcoords='axes fraction', va='center', ha='left')
ax[1].annotate('0', xy=(0,0), xytext=(1.8, 0), textcoords='axes fraction', va='center', ha='left')
ax[1].yaxis.set_ticks([0, vmax])
ax[1].set_yticklabels(['', ''])
elif self.cmap is 'bwr':
ax[1].annotate('{:.2g}'.format(vmax), xy=(0,0), xytext=(0, 1.05), textcoords='axes fraction', va='center', ha='left')
ax[1].annotate('${:.2g}$'.format(vmin), xy=(0,0), xytext=(0, -0.05), textcoords='axes fraction', va='center', ha='left')
ax[1].yaxis.set_ticks([vmin, 0, vmax])
ax[1].set_yticklabels(['', '', ''])
# Colors
ax[0].annotate('', xy=(0,0), xytext=(0.1, 0), xycoords='axes fraction', textcoords='axes fraction', arrowprops=dict(arrowstyle="-", lw=2, shrinkB=0, color='red'))
ax[0].annotate('', xy=(0,0), xytext=(0, 0.1), xycoords='axes fraction', textcoords='axes fraction', arrowprops=dict(arrowstyle="-", lw=2, shrinkB=0, color=[0,1,0]))
| 40.32 | 172 | 0.538029 | import tifffile
import numpy as np
from polaris2.geomvis import utilmpl
import logging
log = logging.getLogger('log')
class xy:
def __init__(self, data, px_dims=[1,1], cmap='gray', title='',
fov=[0,1], plotfov=[0,1], vmin=None, vmax=None):
self.data = data
self.px_dims = px_dims
self.cmap = cmap
self.xlabel = '{:.0f}'.format(plotfov[1] - plotfov[0]) + ' $\mu$m'
self.title = title
self.fov = fov
self.plotfov = plotfov
self.vmin = vmin
self.vmax = vmax
def to_tiff(self, filename):
utilmpl.mkdir(filename)
with tifffile.TiffWriter(filename, imagej=True) as tif:
tif.save(self.data.astype(np.float32),
resolution=(1/self.px_dims[0], 1/self.px_dims[1]),
metadata={'unit':'um'})
def plot(self, f, fc, ss):
ax = utilmpl.plot_template(f, fc, shape=self.data.shape, xlabel=self.xlabel,
title=self.title)
if self.cmap is 'gray':
vmax = np.max(self.data)
vmin = 0
elif self.cmap is 'bwr':
vmax = np.max(np.abs(self.data))
vmin = -vmax
if self.vmax is not None:
vmax = self.vmax
vmin = self.vmin
ax[0].set_xlim(self.plotfov)
ax[0].set_ylim(self.plotfov)
ax[0].imshow(self.data.T, vmin=vmin, vmax=vmax, cmap=self.cmap,
extent=2*self.fov,
aspect='auto', interpolation='nearest', origin='lower')
x = np.linspace(vmin, vmax, 100)
xx, yy = np.meshgrid(x, x)
ax[1].imshow(yy, vmin=vmin, vmax=vmax, cmap=self.cmap,
extent=[0,1,vmin,vmax], aspect='auto',
interpolation='bicubic', origin='lower')
if self.cmap is 'gray':
ax[1].annotate('{:.2g}'.format(np.max(vmax)), xy=(0,0), xytext=(0, 1.05), textcoords='axes fraction', va='center', ha='left')
ax[1].annotate('0', xy=(0,0), xytext=(1.8, 0), textcoords='axes fraction', va='center', ha='left')
ax[1].yaxis.set_ticks([0, vmax])
ax[1].set_yticklabels(['', ''])
elif self.cmap is 'bwr':
ax[1].annotate('{:.2g}'.format(vmax), xy=(0,0), xytext=(0, 1.05), textcoords='axes fraction', va='center', ha='left')
ax[1].annotate('${:.2g}$'.format(vmin), xy=(0,0), xytext=(0, -0.05), textcoords='axes fraction', va='center', ha='left')
ax[1].yaxis.set_ticks([vmin, 0, vmax])
ax[1].set_yticklabels(['', '', ''])
ax[0].annotate('', xy=(0,0), xytext=(0.1, 0), xycoords='axes fraction', textcoords='axes fraction', arrowprops=dict(arrowstyle="-", lw=2, shrinkB=0, color='red'))
ax[0].annotate('', xy=(0,0), xytext=(0, 0.1), xycoords='axes fraction', textcoords='axes fraction', arrowprops=dict(arrowstyle="-", lw=2, shrinkB=0, color=[0,1,0]))
| true | true |
f7fc43832095bd13bf18e789848e36fef5a4bd7a | 3,596 | py | Python | tests/unit/test_manager.py | rbaltrusch/bach_generator | a5de2d55c982b94d22c62d2cbc8adecd25456069 | [
"MIT"
] | null | null | null | tests/unit/test_manager.py | rbaltrusch/bach_generator | a5de2d55c982b94d22c62d2cbc8adecd25456069 | [
"MIT"
] | null | null | null | tests/unit/test_manager.py | rbaltrusch/bach_generator | a5de2d55c982b94d22c62d2cbc8adecd25456069 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Tests for the manager module"""
import random
from typing import List
import pytest
from bach_generator.src import manager, model
# pylint: disable=protected-access
class MockModel:
def __init__(self, inputs: int = 2):
self.inputs = inputs
self.jumbled_with = None
def jumble(self, jumble_strategy, weight_divergence):
self.jumbled_with = (jumble_strategy, weight_divergence)
@staticmethod
def compute(inputs: List[int]) -> float:
return [sum(inputs) / len(inputs)]
class MockDecoder:
@staticmethod
def decode(inputs: List[float]) -> List[int]:
return list(map(int, inputs))
class MockJudge:
@staticmethod
def rate(inputs: List[int], outputs: List[int]) -> float:
return sum(inputs) / (sum(outputs) if sum(outputs) else 1)
class MockQuantizer:
@staticmethod
def quantize(inputs: List[float]) -> List[int]:
return list(map(int, inputs))
@pytest.mark.parametrize(
"inputs, outputs, layers, layer_size",
[
[0, 0, 0, 0],
[5, 1, 1, 20],
random.choices(list(range(20)), k=4),
],
)
def test_model_manager_init(inputs, outputs, layers, layer_size):
manager_ = manager.ModelManager(inputs, outputs, layers, layer_size)
assert manager_.model is not None
assert len(manager_.model._layers) == layers + 2 # inputs and outputs extra
assert len(manager_.model._layers[0].nodes) == inputs
assert len(manager_.model._layers[-1].nodes) == outputs
for layer in manager_.model._layers[1:-1]:
assert len(layer.nodes) == layer_size
@pytest.mark.usefixtures("test_model")
def test_model_manager_construct_from_model(test_model):
manager_ = manager.ModelManager.construct_with_model(model=test_model)
assert manager_.model is test_model
@pytest.mark.parametrize(
"inputs, expected_encoded_outputs",
[
([], []),
([0], [0]),
([0, 1, 5, 3], [0, 0, 3, 4]),
],
)
def test_model_manager_run(inputs, expected_encoded_outputs):
manager_ = manager.ModelManager.construct_with_model(model=MockModel())
manager_.run_model(inputs=inputs, quantizer=MockQuantizer())
assert manager_.encoded_outputs == expected_encoded_outputs
@pytest.mark.parametrize(
"jumble_strategy, weight_divergence",
[
(model.jumble_by_factor_strategy, 0),
(model.jumble_by_selection_strategy, -2),
],
)
def test_model_manager_clone(jumble_strategy, weight_divergence):
manager_ = manager.ModelManager.construct_with_model(model=MockModel())
manager_.clone(jumble_strategy, weight_divergence)
@pytest.mark.parametrize(
"encoded_outputs, expected_decoded_outputs",
[
([], []),
([0.1], [0]),
([0.7, 1.1, 4.6, -1.2], [0, 1, 4, -1]),
],
)
def test_model_manager_decode(encoded_outputs, expected_decoded_outputs):
manager_ = manager.ModelManager(inputs=1, outputs=1, layers=1, layer_size=1)
manager_.encoded_outputs = encoded_outputs
manager_.decode_outputs(decoder=MockDecoder())
assert manager_.decoded_outputs == expected_decoded_outputs
@pytest.mark.parametrize(
"inputs, outputs, expected_rating",
[
([], [], 0),
([1], [2], 0.5),
([1, 2, 3], [0, 5, -2], 2),
],
)
def test_model_manager_rating(inputs, outputs, expected_rating):
manager_ = manager.ModelManager(inputs=1, outputs=1, layers=1, layer_size=1)
manager_.encoded_outputs = outputs
manager_.get_rated_by(judge=MockJudge(), encoded_inputs=inputs)
assert manager_.rating == expected_rating
| 29.47541 | 80 | 0.679644 |
import random
from typing import List
import pytest
from bach_generator.src import manager, model
class MockModel:
def __init__(self, inputs: int = 2):
self.inputs = inputs
self.jumbled_with = None
def jumble(self, jumble_strategy, weight_divergence):
self.jumbled_with = (jumble_strategy, weight_divergence)
@staticmethod
def compute(inputs: List[int]) -> float:
return [sum(inputs) / len(inputs)]
class MockDecoder:
@staticmethod
def decode(inputs: List[float]) -> List[int]:
return list(map(int, inputs))
class MockJudge:
@staticmethod
def rate(inputs: List[int], outputs: List[int]) -> float:
return sum(inputs) / (sum(outputs) if sum(outputs) else 1)
class MockQuantizer:
@staticmethod
def quantize(inputs: List[float]) -> List[int]:
return list(map(int, inputs))
@pytest.mark.parametrize(
"inputs, outputs, layers, layer_size",
[
[0, 0, 0, 0],
[5, 1, 1, 20],
random.choices(list(range(20)), k=4),
],
)
def test_model_manager_init(inputs, outputs, layers, layer_size):
manager_ = manager.ModelManager(inputs, outputs, layers, layer_size)
assert manager_.model is not None
assert len(manager_.model._layers) == layers + 2
assert len(manager_.model._layers[0].nodes) == inputs
assert len(manager_.model._layers[-1].nodes) == outputs
for layer in manager_.model._layers[1:-1]:
assert len(layer.nodes) == layer_size
@pytest.mark.usefixtures("test_model")
def test_model_manager_construct_from_model(test_model):
manager_ = manager.ModelManager.construct_with_model(model=test_model)
assert manager_.model is test_model
@pytest.mark.parametrize(
"inputs, expected_encoded_outputs",
[
([], []),
([0], [0]),
([0, 1, 5, 3], [0, 0, 3, 4]),
],
)
def test_model_manager_run(inputs, expected_encoded_outputs):
manager_ = manager.ModelManager.construct_with_model(model=MockModel())
manager_.run_model(inputs=inputs, quantizer=MockQuantizer())
assert manager_.encoded_outputs == expected_encoded_outputs
@pytest.mark.parametrize(
"jumble_strategy, weight_divergence",
[
(model.jumble_by_factor_strategy, 0),
(model.jumble_by_selection_strategy, -2),
],
)
def test_model_manager_clone(jumble_strategy, weight_divergence):
manager_ = manager.ModelManager.construct_with_model(model=MockModel())
manager_.clone(jumble_strategy, weight_divergence)
@pytest.mark.parametrize(
"encoded_outputs, expected_decoded_outputs",
[
([], []),
([0.1], [0]),
([0.7, 1.1, 4.6, -1.2], [0, 1, 4, -1]),
],
)
def test_model_manager_decode(encoded_outputs, expected_decoded_outputs):
manager_ = manager.ModelManager(inputs=1, outputs=1, layers=1, layer_size=1)
manager_.encoded_outputs = encoded_outputs
manager_.decode_outputs(decoder=MockDecoder())
assert manager_.decoded_outputs == expected_decoded_outputs
@pytest.mark.parametrize(
"inputs, outputs, expected_rating",
[
([], [], 0),
([1], [2], 0.5),
([1, 2, 3], [0, 5, -2], 2),
],
)
def test_model_manager_rating(inputs, outputs, expected_rating):
manager_ = manager.ModelManager(inputs=1, outputs=1, layers=1, layer_size=1)
manager_.encoded_outputs = outputs
manager_.get_rated_by(judge=MockJudge(), encoded_inputs=inputs)
assert manager_.rating == expected_rating
| true | true |
f7fc43af608992a57d11353d6656045b8db8185f | 32 | py | Python | src/test/resources/python-code-examples/import_from_var/use_imported.py | florayym/depends | 6c437a78268d91d54059b560c0273ae3c9253452 | [
"BSD-3-Clause",
"MIT"
] | 146 | 2019-03-09T03:02:59.000Z | 2022-03-28T11:28:41.000Z | src/test/resources/python-code-examples/import_from_var/use_imported.py | florayym/depends | 6c437a78268d91d54059b560c0273ae3c9253452 | [
"BSD-3-Clause",
"MIT"
] | 27 | 2019-03-11T02:12:54.000Z | 2021-12-21T00:24:13.000Z | src/test/resources/python-code-examples/import_from_var/use_imported.py | florayym/depends | 6c437a78268d91d54059b560c0273ae3c9253452 | [
"BSD-3-Clause",
"MIT"
] | 41 | 2019-03-09T03:04:50.000Z | 2022-01-14T06:53:14.000Z | from pkg.core import c
c.foo()
| 8 | 22 | 0.6875 | from pkg.core import c
c.foo()
| true | true |
f7fc459a39797bfd71ba44df3a7aec515af29d51 | 236 | py | Python | withrestc3/testapp/models.py | Ajitkumar1995/Django_REST_API | e7e3b0912602f4478415337fda4a851171e967ee | [
"MIT"
] | null | null | null | withrestc3/testapp/models.py | Ajitkumar1995/Django_REST_API | e7e3b0912602f4478415337fda4a851171e967ee | [
"MIT"
] | null | null | null | withrestc3/testapp/models.py | Ajitkumar1995/Django_REST_API | e7e3b0912602f4478415337fda4a851171e967ee | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
class Employee(models.Model):
eno=models.IntegerField()
ename=models.CharField(max_length=64)
esal=models.FloatField()
eaddr=models.CharField(max_length=64)
| 29.5 | 42 | 0.733051 | from django.db import models
class Employee(models.Model):
eno=models.IntegerField()
ename=models.CharField(max_length=64)
esal=models.FloatField()
eaddr=models.CharField(max_length=64)
| true | true |
f7fc45cb1e6ef9e9a6179cd21f990518afe34905 | 26,032 | py | Python | libs/blocks/tests/bricks/test_recurrent.py | dendisuhubdy/attention-lvcsr | 598d487c118e66875fdd625baa84ed29d283b800 | [
"MIT"
] | 295 | 2015-09-25T21:15:04.000Z | 2022-01-13T01:16:18.000Z | libs/blocks/tests/bricks/test_recurrent.py | shenshenzhanzhan/attention-lvcsr | 598d487c118e66875fdd625baa84ed29d283b800 | [
"MIT"
] | 21 | 2015-10-28T19:06:32.000Z | 2022-03-11T23:13:05.000Z | libs/blocks/tests/bricks/test_recurrent.py | shenshenzhanzhan/attention-lvcsr | 598d487c118e66875fdd625baa84ed29d283b800 | [
"MIT"
] | 114 | 2015-09-26T21:23:02.000Z | 2021-11-19T02:36:41.000Z | import itertools
import unittest
from collections import OrderedDict
import numpy
import theano
from numpy.testing import assert_allclose, assert_raises
from theano import tensor
from theano.gof.graph import is_same_graph
from blocks.utils import is_shared_variable
from blocks.bricks.base import application
from blocks.bricks import Tanh
from blocks.bricks.recurrent import (
recurrent, BaseRecurrent, GatedRecurrent,
SimpleRecurrent, Bidirectional, LSTM,
RecurrentStack, RECURRENTSTACK_SEPARATOR)
from blocks.initialization import (
Constant, IsotropicGaussian, Orthogonal, Identity)
from blocks.filter import get_application_call, VariableFilter
from blocks.graph import ComputationGraph
from blocks.roles import INITIAL_STATE
class RecurrentWrapperTestClass(BaseRecurrent):
def __init__(self, dim, ** kwargs):
super(RecurrentWrapperTestClass, self).__init__(self, ** kwargs)
self.dim = dim
def get_dim(self, name):
if name in ['inputs', 'states', 'outputs', 'states_2', 'outputs_2']:
return self.dim
if name == 'mask':
return 0
return super(RecurrentWrapperTestClass, self).get_dim(name)
@recurrent(sequences=['inputs', 'mask'], states=['states', 'states_2'],
outputs=['outputs', 'states_2', 'outputs_2', 'states'],
contexts=[])
def apply(self, inputs=None, states=None, states_2=None, mask=None):
next_states = states + inputs
next_states_2 = states_2 + .5
if mask:
next_states = (mask[:, None] * next_states +
(1 - mask[:, None]) * states)
outputs = 10 * next_states
outputs_2 = 10 * next_states_2
return outputs, next_states_2, outputs_2, next_states
class TestRecurrentWrapper(unittest.TestCase):
def setUp(self):
self.recurrent_example = RecurrentWrapperTestClass(dim=1)
def test(self):
X = tensor.tensor3('X')
out, H2, out_2, H = self.recurrent_example.apply(
inputs=X, mask=None)
x_val = numpy.ones((5, 1, 1), dtype=theano.config.floatX)
h = H.eval({X: x_val})
h2 = H2.eval({X: x_val})
out_eval = out.eval({X: x_val})
out_2_eval = out_2.eval({X: x_val})
# This also implicitly tests that the initial states are zeros
assert_allclose(h, x_val.cumsum(axis=0))
assert_allclose(h2, .5 * (numpy.arange(5).reshape((5, 1, 1)) + 1))
assert_allclose(h * 10, out_eval)
assert_allclose(h2 * 10, out_2_eval)
class RecurrentBrickWithBugInInitialStates(BaseRecurrent):
@recurrent(sequences=[], contexts=[],
states=['states'], outputs=['states'])
def apply(self, states):
return states
@recurrent(sequences=[], contexts=[],
states=['states2'], outputs=['states2'])
def apply2(self, states):
return states
def get_dim(self, name):
return 100
def test_bug_in_initial_states():
def do():
brick = RecurrentBrickWithBugInInitialStates()
brick.apply2(n_steps=3, batch_size=5)
assert_raises(KeyError, do)
class TestSimpleRecurrent(unittest.TestCase):
def setUp(self):
self.simple = SimpleRecurrent(dim=3, weights_init=Constant(2),
activation=Tanh())
self.simple.initialize()
def test_one_step(self):
h0 = tensor.matrix('h0')
x = tensor.matrix('x')
mask = tensor.vector('mask')
h1 = self.simple.apply(x, h0, mask=mask, iterate=False)
next_h = theano.function(inputs=[h0, x, mask], outputs=[h1])
h0_val = 0.1 * numpy.array([[1, 1, 0], [0, 1, 1]],
dtype=theano.config.floatX)
x_val = 0.1 * numpy.array([[1, 2, 3], [4, 5, 6]],
dtype=theano.config.floatX)
mask_val = numpy.array([1, 0]).astype(theano.config.floatX)
h1_val = numpy.tanh(h0_val.dot(2 * numpy.ones((3, 3))) + x_val)
h1_val = mask_val[:, None] * h1_val + (1 - mask_val[:, None]) * h0_val
assert_allclose(h1_val, next_h(h0_val, x_val, mask_val)[0])
def test_many_steps(self):
x = tensor.tensor3('x')
mask = tensor.matrix('mask')
h = self.simple.apply(x, mask=mask, iterate=True)
calc_h = theano.function(inputs=[x, mask], outputs=[h])
x_val = 0.1 * numpy.asarray(list(itertools.permutations(range(4))),
dtype=theano.config.floatX)
x_val = numpy.ones((24, 4, 3),
dtype=theano.config.floatX) * x_val[..., None]
mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)
mask_val[12:24, 3] = 0
h_val = numpy.zeros((25, 4, 3), dtype=theano.config.floatX)
for i in range(1, 25):
h_val[i] = numpy.tanh(h_val[i - 1].dot(
2 * numpy.ones((3, 3))) + x_val[i - 1])
h_val[i] = (mask_val[i - 1, :, None] * h_val[i] +
(1 - mask_val[i - 1, :, None]) * h_val[i - 1])
h_val = h_val[1:]
assert_allclose(h_val, calc_h(x_val, mask_val)[0], rtol=1e-04)
# Also test that initial state is a parameter
initial_state, = VariableFilter(roles=[INITIAL_STATE])(
ComputationGraph(h))
assert is_shared_variable(initial_state)
assert initial_state.name == 'initial_state'
class TestLSTM(unittest.TestCase):
def setUp(self):
self.lstm = LSTM(dim=3, weights_init=Constant(2),
biases_init=Constant(0))
self.lstm.initialize()
def test_one_step(self):
h0 = tensor.matrix('h0')
c0 = tensor.matrix('c0')
x = tensor.matrix('x')
h1, c1 = self.lstm.apply(x, h0, c0, iterate=False)
next_h = theano.function(inputs=[x, h0, c0], outputs=[h1])
h0_val = 0.1 * numpy.array([[1, 1, 0], [0, 1, 1]],
dtype=theano.config.floatX)
c0_val = 0.1 * numpy.array([[1, 1, 0], [0, 1, 1]],
dtype=theano.config.floatX)
x_val = 0.1 * numpy.array([range(12), range(12, 24)],
dtype=theano.config.floatX)
W_state_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX)
W_cell_to_in = 2 * numpy.ones((3,), dtype=theano.config.floatX)
W_cell_to_out = 2 * numpy.ones((3,), dtype=theano.config.floatX)
W_cell_to_forget = 2 * numpy.ones((3,), dtype=theano.config.floatX)
# omitting biases because they are zero
activation = numpy.dot(h0_val, W_state_val) + x_val
def sigmoid(x):
return 1. / (1. + numpy.exp(-x))
i_t = sigmoid(activation[:, :3] + c0_val * W_cell_to_in)
f_t = sigmoid(activation[:, 3:6] + c0_val * W_cell_to_forget)
next_cells = f_t * c0_val + i_t * numpy.tanh(activation[:, 6:9])
o_t = sigmoid(activation[:, 9:12] +
next_cells * W_cell_to_out)
h1_val = o_t * numpy.tanh(next_cells)
assert_allclose(h1_val, next_h(x_val, h0_val, c0_val)[0],
rtol=1e-6)
def test_many_steps(self):
x = tensor.tensor3('x')
mask = tensor.matrix('mask')
h, c = self.lstm.apply(x, mask=mask, iterate=True)
calc_h = theano.function(inputs=[x, mask], outputs=[h])
x_val = (0.1 * numpy.asarray(
list(itertools.islice(itertools.permutations(range(12)), 0, 24)),
dtype=theano.config.floatX))
x_val = numpy.ones((24, 4, 12),
dtype=theano.config.floatX) * x_val[:, None, :]
mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)
mask_val[12:24, 3] = 0
h_val = numpy.zeros((25, 4, 3), dtype=theano.config.floatX)
c_val = numpy.zeros((25, 4, 3), dtype=theano.config.floatX)
W_state_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX)
W_cell_to_in = 2 * numpy.ones((3,), dtype=theano.config.floatX)
W_cell_to_out = 2 * numpy.ones((3,), dtype=theano.config.floatX)
W_cell_to_forget = 2 * numpy.ones((3,), dtype=theano.config.floatX)
def sigmoid(x):
return 1. / (1. + numpy.exp(-x))
for i in range(1, 25):
activation = numpy.dot(h_val[i-1], W_state_val) + x_val[i-1]
i_t = sigmoid(activation[:, :3] + c_val[i-1] * W_cell_to_in)
f_t = sigmoid(activation[:, 3:6] + c_val[i-1] * W_cell_to_forget)
c_val[i] = f_t * c_val[i-1] + i_t * numpy.tanh(activation[:, 6:9])
o_t = sigmoid(activation[:, 9:12] +
c_val[i] * W_cell_to_out)
h_val[i] = o_t * numpy.tanh(c_val[i])
h_val[i] = (mask_val[i - 1, :, None] * h_val[i] +
(1 - mask_val[i - 1, :, None]) * h_val[i - 1])
c_val[i] = (mask_val[i - 1, :, None] * c_val[i] +
(1 - mask_val[i - 1, :, None]) * c_val[i - 1])
h_val = h_val[1:]
assert_allclose(h_val, calc_h(x_val, mask_val)[0], rtol=1e-04)
# Also test that initial state is a parameter
initial1, initial2 = VariableFilter(roles=[INITIAL_STATE])(
ComputationGraph(h))
assert is_shared_variable(initial1)
assert is_shared_variable(initial2)
assert {initial1.name, initial2.name} == {
'initial_state', 'initial_cells'}
class TestRecurrentStack(unittest.TestCase):
def setUp(self):
depth = 4
self.depth = depth
dim = 3 # don't change, hardwired in the code
transitions = [LSTM(dim=dim) for _ in range(depth)]
self.stack0 = RecurrentStack(transitions,
weights_init=Constant(2),
biases_init=Constant(0))
self.stack0.initialize()
self.stack2 = RecurrentStack(transitions,
weights_init=Constant(2),
biases_init=Constant(0),
skip_connections=True)
self.stack2.initialize()
def do_one_step(self, stack, skip_connections=False, low_memory=False):
depth = self.depth
# batch=2
h0_val = 0.1 * numpy.array([[[1, 1, 0], [0, 1, 1]]] * depth,
dtype=theano.config.floatX)
c0_val = 0.1 * numpy.array([[[1, 1, 0], [0, 1, 1]]] * depth,
dtype=theano.config.floatX)
x_val = 0.1 * numpy.array([range(12), range(12, 24)],
dtype=theano.config.floatX)
# we will use same weights on all layers
W_state2x_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX)
W_state_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX)
W_cell_to_in = 2 * numpy.ones((3,), dtype=theano.config.floatX)
W_cell_to_out = 2 * numpy.ones((3,), dtype=theano.config.floatX)
W_cell_to_forget = 2 * numpy.ones((3,), dtype=theano.config.floatX)
kwargs = OrderedDict()
for d in range(depth):
if d > 0:
suffix = RECURRENTSTACK_SEPARATOR + str(d)
else:
suffix = ''
if d == 0 or skip_connections:
kwargs['inputs' + suffix] = tensor.matrix('inputs' + suffix)
kwargs['inputs' + suffix].tag.test_value = x_val
kwargs['states' + suffix] = tensor.matrix('states' + suffix)
kwargs['states' + suffix].tag.test_value = h0_val[d]
kwargs['cells' + suffix] = tensor.matrix('cells' + suffix)
kwargs['cells' + suffix].tag.test_value = c0_val[d]
results = stack.apply(iterate=False, low_memory=low_memory, **kwargs)
next_h = theano.function(inputs=list(kwargs.values()),
outputs=results)
def sigmoid(x):
return 1. / (1. + numpy.exp(-x))
h1_val = []
x_v = x_val
args_val = []
for d in range(depth):
if d == 0 or skip_connections:
args_val.append(x_val)
h0_v = h0_val[d]
args_val.append(h0_v)
c0_v = c0_val[d]
args_val.append(c0_v)
# omitting biases because they are zero
activation = numpy.dot(h0_v, W_state_val) + x_v
if skip_connections and d > 0:
activation += x_val
i_t = sigmoid(activation[:, :3] + c0_v * W_cell_to_in)
f_t = sigmoid(activation[:, 3:6] + c0_v * W_cell_to_forget)
next_cells = f_t * c0_v + i_t * numpy.tanh(activation[:, 6:9])
o_t = sigmoid(activation[:, 9:12] +
next_cells * W_cell_to_out)
h1_v = o_t * numpy.tanh(next_cells)
# current layer output state transformed to input of next
x_v = numpy.dot(h1_v, W_state2x_val)
h1_val.append(h1_v)
res = next_h(*args_val)
for d in range(depth):
assert_allclose(h1_val[d], res[d * 2], rtol=1e-6)
def test_one_step(self):
self.do_one_step(self.stack0)
self.do_one_step(self.stack0, low_memory=True)
self.do_one_step(self.stack2, skip_connections=True)
self.do_one_step(self.stack2, skip_connections=True, low_memory=True)
def do_many_steps(self, stack, skip_connections=False, low_memory=False):
depth = self.depth
# 24 steps
# 4 batch examples
# 12 dimensions per step
x_val = (0.1 * numpy.asarray(
list(itertools.islice(itertools.permutations(range(12)), 0, 24)),
dtype=theano.config.floatX))
x_val = numpy.ones((24, 4, 12),
dtype=theano.config.floatX) * x_val[:, None, :]
# mask the last third of steps
mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)
mask_val[12:24, 3] = 0
# unroll all states and cells for all steps and also initial value
h_val = numpy.zeros((depth, 25, 4, 3), dtype=theano.config.floatX)
c_val = numpy.zeros((depth, 25, 4, 3), dtype=theano.config.floatX)
# we will use same weights on all layers
W_state2x_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX)
W_state_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX)
W_cell_to_in = 2 * numpy.ones((3,), dtype=theano.config.floatX)
W_cell_to_out = 2 * numpy.ones((3,), dtype=theano.config.floatX)
W_cell_to_forget = 2 * numpy.ones((3,), dtype=theano.config.floatX)
kwargs = OrderedDict()
for d in range(depth):
if d > 0:
suffix = RECURRENTSTACK_SEPARATOR + str(d)
else:
suffix = ''
if d == 0 or skip_connections:
kwargs['inputs' + suffix] = tensor.tensor3('inputs' + suffix)
kwargs['inputs' + suffix].tag.test_value = x_val
kwargs['mask'] = tensor.matrix('mask')
kwargs['mask'].tag.test_value = mask_val
results = stack.apply(iterate=True, low_memory=low_memory, **kwargs)
calc_h = theano.function(inputs=list(kwargs.values()),
outputs=results)
def sigmoid(x):
return 1. / (1. + numpy.exp(-x))
for i in range(1, 25):
x_v = x_val[i - 1]
h_vs = []
c_vs = []
for d in range(depth):
h_v = h_val[d][i - 1, :, :]
c_v = c_val[d][i - 1, :, :]
activation = numpy.dot(h_v, W_state_val) + x_v
if skip_connections and d > 0:
activation += x_val[i - 1]
i_t = sigmoid(activation[:, :3] + c_v * W_cell_to_in)
f_t = sigmoid(activation[:, 3:6] + c_v * W_cell_to_forget)
c_v1 = f_t * c_v + i_t * numpy.tanh(activation[:, 6:9])
o_t = sigmoid(activation[:, 9:12] +
c_v1 * W_cell_to_out)
h_v1 = o_t * numpy.tanh(c_v1)
h_v = (mask_val[i - 1, :, None] * h_v1 +
(1 - mask_val[i - 1, :, None]) * h_v)
c_v = (mask_val[i - 1, :, None] * c_v1 +
(1 - mask_val[i - 1, :, None]) * c_v)
# current layer output state transformed to input of next
x_v = numpy.dot(h_v, W_state2x_val)
h_vs.append(h_v)
c_vs.append(c_v)
for d in range(depth):
h_val[d][i, :, :] = h_vs[d]
c_val[d][i, :, :] = c_vs[d]
args_val = [x_val]*(depth if skip_connections else 1) + [mask_val]
res = calc_h(*args_val)
for d in range(depth):
assert_allclose(h_val[d][1:], res[d * 2], rtol=1e-4)
assert_allclose(c_val[d][1:], res[d * 2 + 1], rtol=1e-4)
# Also test that initial state is a parameter
for h in results:
initial_states = VariableFilter(roles=[INITIAL_STATE])(
ComputationGraph(h))
assert all(is_shared_variable(initial_state)
for initial_state in initial_states)
def test_many_steps(self):
self.do_many_steps(self.stack0)
self.do_many_steps(self.stack0, low_memory=True)
self.do_many_steps(self.stack2, skip_connections=True)
self.do_many_steps(self.stack2, skip_connections=True, low_memory=True)
class TestGatedRecurrent(unittest.TestCase):
def setUp(self):
self.gated = GatedRecurrent(
dim=3, activation=Tanh(),
gate_activation=Tanh(), weights_init=Constant(2))
self.gated.initialize()
self.reset_only = GatedRecurrent(
dim=3, activation=Tanh(),
gate_activation=Tanh(),
weights_init=IsotropicGaussian(), seed=1)
self.reset_only.initialize()
def test_one_step(self):
h0 = tensor.matrix('h0')
x = tensor.matrix('x')
gi = tensor.matrix('gi')
h1 = self.gated.apply(x, gi, h0, iterate=False)
next_h = theano.function(inputs=[h0, x, gi], outputs=[h1])
h0_val = 0.1 * numpy.array([[1, 1, 0], [0, 1, 1]],
dtype=theano.config.floatX)
x_val = 0.1 * numpy.array([[1, 2, 3], [4, 5, 6]],
dtype=theano.config.floatX)
zi_val = (h0_val + x_val) / 2
ri_val = -x_val
W_val = 2 * numpy.ones((3, 3), dtype=theano.config.floatX)
z_val = numpy.tanh(h0_val.dot(W_val) + zi_val)
r_val = numpy.tanh(h0_val.dot(W_val) + ri_val)
h1_val = (z_val * numpy.tanh((r_val * h0_val).dot(W_val) + x_val) +
(1 - z_val) * h0_val)
assert_allclose(
h1_val, next_h(h0_val, x_val, numpy.hstack([zi_val, ri_val]))[0],
rtol=1e-6)
def test_many_steps(self):
x = tensor.tensor3('x')
gi = tensor.tensor3('gi')
mask = tensor.matrix('mask')
h = self.reset_only.apply(x, gi, mask=mask)
calc_h = theano.function(inputs=[x, gi, mask], outputs=[h])
x_val = 0.1 * numpy.asarray(list(itertools.permutations(range(4))),
dtype=theano.config.floatX)
x_val = numpy.ones((24, 4, 3),
dtype=theano.config.floatX) * x_val[..., None]
ri_val = 0.3 - x_val
zi_val = 2 * ri_val
mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)
mask_val[12:24, 3] = 0
h_val = numpy.zeros((25, 4, 3), dtype=theano.config.floatX)
W = self.reset_only.state_to_state.get_value()
Wz = self.reset_only.state_to_gates.get_value()[:, :3]
Wr = self.reset_only.state_to_gates.get_value()[:, 3:]
for i in range(1, 25):
z_val = numpy.tanh(h_val[i - 1].dot(Wz) + zi_val[i - 1])
r_val = numpy.tanh(h_val[i - 1].dot(Wr) + ri_val[i - 1])
h_val[i] = numpy.tanh((r_val * h_val[i - 1]).dot(W) +
x_val[i - 1])
h_val[i] = z_val * h_val[i] + (1 - z_val) * h_val[i - 1]
h_val[i] = (mask_val[i - 1, :, None] * h_val[i] +
(1 - mask_val[i - 1, :, None]) * h_val[i - 1])
h_val = h_val[1:]
# TODO Figure out why this tolerance needs to be so big
assert_allclose(
h_val,
calc_h(x_val, numpy.concatenate(
[zi_val, ri_val], axis=2), mask_val)[0],
1e-04)
# Also test that initial state is a parameter
initial_state, = VariableFilter(roles=[INITIAL_STATE])(
ComputationGraph(h))
assert is_shared_variable(initial_state)
assert initial_state.name == 'initial_state'
class TestBidirectional(unittest.TestCase):
def setUp(self):
self.bidir = Bidirectional(weights_init=Orthogonal(),
prototype=SimpleRecurrent(
dim=3, activation=Tanh()))
self.simple = SimpleRecurrent(dim=3, weights_init=Orthogonal(),
activation=Tanh(), seed=1)
self.bidir.allocate()
self.simple.initialize()
self.bidir.children[0].parameters[0].set_value(
self.simple.parameters[0].get_value())
self.bidir.children[1].parameters[0].set_value(
self.simple.parameters[0].get_value())
self.x_val = 0.1 * numpy.asarray(
list(itertools.permutations(range(4))),
dtype=theano.config.floatX)
self.x_val = (numpy.ones((24, 4, 3), dtype=theano.config.floatX) *
self.x_val[..., None])
self.mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)
self.mask_val[12:24, 3] = 0
def test(self):
x = tensor.tensor3('x')
mask = tensor.matrix('mask')
calc_bidir = theano.function([x, mask],
[self.bidir.apply(x, mask=mask)])
calc_simple = theano.function([x, mask],
[self.simple.apply(x, mask=mask)])
h_bidir = calc_bidir(self.x_val, self.mask_val)[0]
h_simple = calc_simple(self.x_val, self.mask_val)[0]
h_simple_rev = calc_simple(self.x_val[::-1], self.mask_val[::-1])[0]
output_names = self.bidir.apply.outputs
assert output_names == ['states']
assert_allclose(h_simple, h_bidir[..., :3], rtol=1e-04)
assert_allclose(h_simple_rev, h_bidir[::-1, ..., 3:], rtol=1e-04)
class TestBidirectionalStack(unittest.TestCase):
def setUp(self):
prototype = SimpleRecurrent(dim=3, activation=Tanh())
self.layers = [
Bidirectional(weights_init=Orthogonal(), prototype=prototype)
for _ in range(3)]
self.stack = RecurrentStack(self.layers)
for fork in self.stack.forks:
fork.weights_init = Identity(1)
fork.biases_init = Constant(0)
self.stack.initialize()
self.x_val = 0.1 * numpy.asarray(
list(itertools.permutations(range(4))),
dtype=theano.config.floatX)
self.x_val = (numpy.ones((24, 4, 3), dtype=theano.config.floatX) *
self.x_val[..., None])
self.mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)
self.mask_val[12:24, 3] = 0
def test_steps(self):
x = tensor.tensor3('x')
mask = tensor.matrix('mask')
calc_stack_layers = [
theano.function([x, mask], self.stack.apply(x, mask=mask)[i])
for i in range(len(self.layers))]
stack_layers = [
f(self.x_val, self.mask_val) for f in calc_stack_layers]
h_val = self.x_val
for stack_layer_value, bidir_net in zip(stack_layers, self.layers):
calc = theano.function([x, mask], bidir_net.apply(x, mask=mask))
simple_layer_value = calc(h_val, self.mask_val)
assert_allclose(stack_layer_value, simple_layer_value, rtol=1e-04)
h_val = simple_layer_value[..., :3]
def test_dims(self):
self.assertEqual(self.stack.get_dim("inputs"), 3)
for i in range(len(self.layers)):
state_name = self.stack.suffix("states", i)
self.assertEqual(self.stack.get_dim(state_name), 6)
def test_saved_inner_graph():
"""Make sure that the original inner graph is saved."""
x = tensor.tensor3()
recurrent = SimpleRecurrent(dim=3, activation=Tanh())
y = recurrent.apply(x)
application_call = get_application_call(y)
assert application_call.inner_inputs
assert application_call.inner_outputs
cg = ComputationGraph(application_call.inner_outputs)
# Check that the inner scan graph is annotated
# with `recurrent.apply`
assert len(VariableFilter(applications=[recurrent.apply])(cg)) == 3
# Check that the inner graph is equivalent to the one
# produced by a stand-alone of `recurrent.apply`
assert is_same_graph(application_call.inner_outputs[0],
recurrent.apply(*application_call.inner_inputs,
iterate=False))
def test_super_in_recurrent_overrider():
# A regression test for the issue #475
class SimpleRecurrentWithContext(SimpleRecurrent):
@application(contexts=['context'])
def apply(self, context, *args, **kwargs):
kwargs['inputs'] += context
return super(SimpleRecurrentWithContext, self).apply(*args,
**kwargs)
@apply.delegate
def apply_delegate(self):
return super(SimpleRecurrentWithContext, self).apply
brick = SimpleRecurrentWithContext(100, Tanh())
inputs = tensor.tensor3('inputs')
context = tensor.matrix('context').dimshuffle('x', 0, 1)
brick.apply(context, inputs=inputs)
| 42.054927 | 79 | 0.566111 | import itertools
import unittest
from collections import OrderedDict
import numpy
import theano
from numpy.testing import assert_allclose, assert_raises
from theano import tensor
from theano.gof.graph import is_same_graph
from blocks.utils import is_shared_variable
from blocks.bricks.base import application
from blocks.bricks import Tanh
from blocks.bricks.recurrent import (
recurrent, BaseRecurrent, GatedRecurrent,
SimpleRecurrent, Bidirectional, LSTM,
RecurrentStack, RECURRENTSTACK_SEPARATOR)
from blocks.initialization import (
Constant, IsotropicGaussian, Orthogonal, Identity)
from blocks.filter import get_application_call, VariableFilter
from blocks.graph import ComputationGraph
from blocks.roles import INITIAL_STATE
class RecurrentWrapperTestClass(BaseRecurrent):
def __init__(self, dim, ** kwargs):
super(RecurrentWrapperTestClass, self).__init__(self, ** kwargs)
self.dim = dim
def get_dim(self, name):
if name in ['inputs', 'states', 'outputs', 'states_2', 'outputs_2']:
return self.dim
if name == 'mask':
return 0
return super(RecurrentWrapperTestClass, self).get_dim(name)
@recurrent(sequences=['inputs', 'mask'], states=['states', 'states_2'],
outputs=['outputs', 'states_2', 'outputs_2', 'states'],
contexts=[])
def apply(self, inputs=None, states=None, states_2=None, mask=None):
next_states = states + inputs
next_states_2 = states_2 + .5
if mask:
next_states = (mask[:, None] * next_states +
(1 - mask[:, None]) * states)
outputs = 10 * next_states
outputs_2 = 10 * next_states_2
return outputs, next_states_2, outputs_2, next_states
class TestRecurrentWrapper(unittest.TestCase):
def setUp(self):
self.recurrent_example = RecurrentWrapperTestClass(dim=1)
def test(self):
X = tensor.tensor3('X')
out, H2, out_2, H = self.recurrent_example.apply(
inputs=X, mask=None)
x_val = numpy.ones((5, 1, 1), dtype=theano.config.floatX)
h = H.eval({X: x_val})
h2 = H2.eval({X: x_val})
out_eval = out.eval({X: x_val})
out_2_eval = out_2.eval({X: x_val})
assert_allclose(h, x_val.cumsum(axis=0))
assert_allclose(h2, .5 * (numpy.arange(5).reshape((5, 1, 1)) + 1))
assert_allclose(h * 10, out_eval)
assert_allclose(h2 * 10, out_2_eval)
class RecurrentBrickWithBugInInitialStates(BaseRecurrent):
@recurrent(sequences=[], contexts=[],
states=['states'], outputs=['states'])
def apply(self, states):
return states
@recurrent(sequences=[], contexts=[],
states=['states2'], outputs=['states2'])
def apply2(self, states):
return states
def get_dim(self, name):
return 100
def test_bug_in_initial_states():
def do():
brick = RecurrentBrickWithBugInInitialStates()
brick.apply2(n_steps=3, batch_size=5)
assert_raises(KeyError, do)
class TestSimpleRecurrent(unittest.TestCase):
def setUp(self):
self.simple = SimpleRecurrent(dim=3, weights_init=Constant(2),
activation=Tanh())
self.simple.initialize()
def test_one_step(self):
h0 = tensor.matrix('h0')
x = tensor.matrix('x')
mask = tensor.vector('mask')
h1 = self.simple.apply(x, h0, mask=mask, iterate=False)
next_h = theano.function(inputs=[h0, x, mask], outputs=[h1])
h0_val = 0.1 * numpy.array([[1, 1, 0], [0, 1, 1]],
dtype=theano.config.floatX)
x_val = 0.1 * numpy.array([[1, 2, 3], [4, 5, 6]],
dtype=theano.config.floatX)
mask_val = numpy.array([1, 0]).astype(theano.config.floatX)
h1_val = numpy.tanh(h0_val.dot(2 * numpy.ones((3, 3))) + x_val)
h1_val = mask_val[:, None] * h1_val + (1 - mask_val[:, None]) * h0_val
assert_allclose(h1_val, next_h(h0_val, x_val, mask_val)[0])
def test_many_steps(self):
x = tensor.tensor3('x')
mask = tensor.matrix('mask')
h = self.simple.apply(x, mask=mask, iterate=True)
calc_h = theano.function(inputs=[x, mask], outputs=[h])
x_val = 0.1 * numpy.asarray(list(itertools.permutations(range(4))),
dtype=theano.config.floatX)
x_val = numpy.ones((24, 4, 3),
dtype=theano.config.floatX) * x_val[..., None]
mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)
mask_val[12:24, 3] = 0
h_val = numpy.zeros((25, 4, 3), dtype=theano.config.floatX)
for i in range(1, 25):
h_val[i] = numpy.tanh(h_val[i - 1].dot(
2 * numpy.ones((3, 3))) + x_val[i - 1])
h_val[i] = (mask_val[i - 1, :, None] * h_val[i] +
(1 - mask_val[i - 1, :, None]) * h_val[i - 1])
h_val = h_val[1:]
assert_allclose(h_val, calc_h(x_val, mask_val)[0], rtol=1e-04)
initial_state, = VariableFilter(roles=[INITIAL_STATE])(
ComputationGraph(h))
assert is_shared_variable(initial_state)
assert initial_state.name == 'initial_state'
class TestLSTM(unittest.TestCase):
def setUp(self):
self.lstm = LSTM(dim=3, weights_init=Constant(2),
biases_init=Constant(0))
self.lstm.initialize()
def test_one_step(self):
h0 = tensor.matrix('h0')
c0 = tensor.matrix('c0')
x = tensor.matrix('x')
h1, c1 = self.lstm.apply(x, h0, c0, iterate=False)
next_h = theano.function(inputs=[x, h0, c0], outputs=[h1])
h0_val = 0.1 * numpy.array([[1, 1, 0], [0, 1, 1]],
dtype=theano.config.floatX)
c0_val = 0.1 * numpy.array([[1, 1, 0], [0, 1, 1]],
dtype=theano.config.floatX)
x_val = 0.1 * numpy.array([range(12), range(12, 24)],
dtype=theano.config.floatX)
W_state_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX)
W_cell_to_in = 2 * numpy.ones((3,), dtype=theano.config.floatX)
W_cell_to_out = 2 * numpy.ones((3,), dtype=theano.config.floatX)
W_cell_to_forget = 2 * numpy.ones((3,), dtype=theano.config.floatX)
activation = numpy.dot(h0_val, W_state_val) + x_val
def sigmoid(x):
return 1. / (1. + numpy.exp(-x))
i_t = sigmoid(activation[:, :3] + c0_val * W_cell_to_in)
f_t = sigmoid(activation[:, 3:6] + c0_val * W_cell_to_forget)
next_cells = f_t * c0_val + i_t * numpy.tanh(activation[:, 6:9])
o_t = sigmoid(activation[:, 9:12] +
next_cells * W_cell_to_out)
h1_val = o_t * numpy.tanh(next_cells)
assert_allclose(h1_val, next_h(x_val, h0_val, c0_val)[0],
rtol=1e-6)
def test_many_steps(self):
x = tensor.tensor3('x')
mask = tensor.matrix('mask')
h, c = self.lstm.apply(x, mask=mask, iterate=True)
calc_h = theano.function(inputs=[x, mask], outputs=[h])
x_val = (0.1 * numpy.asarray(
list(itertools.islice(itertools.permutations(range(12)), 0, 24)),
dtype=theano.config.floatX))
x_val = numpy.ones((24, 4, 12),
dtype=theano.config.floatX) * x_val[:, None, :]
mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)
mask_val[12:24, 3] = 0
h_val = numpy.zeros((25, 4, 3), dtype=theano.config.floatX)
c_val = numpy.zeros((25, 4, 3), dtype=theano.config.floatX)
W_state_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX)
W_cell_to_in = 2 * numpy.ones((3,), dtype=theano.config.floatX)
W_cell_to_out = 2 * numpy.ones((3,), dtype=theano.config.floatX)
W_cell_to_forget = 2 * numpy.ones((3,), dtype=theano.config.floatX)
def sigmoid(x):
return 1. / (1. + numpy.exp(-x))
for i in range(1, 25):
activation = numpy.dot(h_val[i-1], W_state_val) + x_val[i-1]
i_t = sigmoid(activation[:, :3] + c_val[i-1] * W_cell_to_in)
f_t = sigmoid(activation[:, 3:6] + c_val[i-1] * W_cell_to_forget)
c_val[i] = f_t * c_val[i-1] + i_t * numpy.tanh(activation[:, 6:9])
o_t = sigmoid(activation[:, 9:12] +
c_val[i] * W_cell_to_out)
h_val[i] = o_t * numpy.tanh(c_val[i])
h_val[i] = (mask_val[i - 1, :, None] * h_val[i] +
(1 - mask_val[i - 1, :, None]) * h_val[i - 1])
c_val[i] = (mask_val[i - 1, :, None] * c_val[i] +
(1 - mask_val[i - 1, :, None]) * c_val[i - 1])
h_val = h_val[1:]
assert_allclose(h_val, calc_h(x_val, mask_val)[0], rtol=1e-04)
initial1, initial2 = VariableFilter(roles=[INITIAL_STATE])(
ComputationGraph(h))
assert is_shared_variable(initial1)
assert is_shared_variable(initial2)
assert {initial1.name, initial2.name} == {
'initial_state', 'initial_cells'}
class TestRecurrentStack(unittest.TestCase):
def setUp(self):
depth = 4
self.depth = depth
dim = 3
transitions = [LSTM(dim=dim) for _ in range(depth)]
self.stack0 = RecurrentStack(transitions,
weights_init=Constant(2),
biases_init=Constant(0))
self.stack0.initialize()
self.stack2 = RecurrentStack(transitions,
weights_init=Constant(2),
biases_init=Constant(0),
skip_connections=True)
self.stack2.initialize()
def do_one_step(self, stack, skip_connections=False, low_memory=False):
depth = self.depth
# batch=2
h0_val = 0.1 * numpy.array([[[1, 1, 0], [0, 1, 1]]] * depth,
dtype=theano.config.floatX)
c0_val = 0.1 * numpy.array([[[1, 1, 0], [0, 1, 1]]] * depth,
dtype=theano.config.floatX)
x_val = 0.1 * numpy.array([range(12), range(12, 24)],
dtype=theano.config.floatX)
# we will use same weights on all layers
W_state2x_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX)
W_state_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX)
W_cell_to_in = 2 * numpy.ones((3,), dtype=theano.config.floatX)
W_cell_to_out = 2 * numpy.ones((3,), dtype=theano.config.floatX)
W_cell_to_forget = 2 * numpy.ones((3,), dtype=theano.config.floatX)
kwargs = OrderedDict()
for d in range(depth):
if d > 0:
suffix = RECURRENTSTACK_SEPARATOR + str(d)
else:
suffix = ''
if d == 0 or skip_connections:
kwargs['inputs' + suffix] = tensor.matrix('inputs' + suffix)
kwargs['inputs' + suffix].tag.test_value = x_val
kwargs['states' + suffix] = tensor.matrix('states' + suffix)
kwargs['states' + suffix].tag.test_value = h0_val[d]
kwargs['cells' + suffix] = tensor.matrix('cells' + suffix)
kwargs['cells' + suffix].tag.test_value = c0_val[d]
results = stack.apply(iterate=False, low_memory=low_memory, **kwargs)
next_h = theano.function(inputs=list(kwargs.values()),
outputs=results)
def sigmoid(x):
return 1. / (1. + numpy.exp(-x))
h1_val = []
x_v = x_val
args_val = []
for d in range(depth):
if d == 0 or skip_connections:
args_val.append(x_val)
h0_v = h0_val[d]
args_val.append(h0_v)
c0_v = c0_val[d]
args_val.append(c0_v)
# omitting biases because they are zero
activation = numpy.dot(h0_v, W_state_val) + x_v
if skip_connections and d > 0:
activation += x_val
i_t = sigmoid(activation[:, :3] + c0_v * W_cell_to_in)
f_t = sigmoid(activation[:, 3:6] + c0_v * W_cell_to_forget)
next_cells = f_t * c0_v + i_t * numpy.tanh(activation[:, 6:9])
o_t = sigmoid(activation[:, 9:12] +
next_cells * W_cell_to_out)
h1_v = o_t * numpy.tanh(next_cells)
# current layer output state transformed to input of next
x_v = numpy.dot(h1_v, W_state2x_val)
h1_val.append(h1_v)
res = next_h(*args_val)
for d in range(depth):
assert_allclose(h1_val[d], res[d * 2], rtol=1e-6)
def test_one_step(self):
self.do_one_step(self.stack0)
self.do_one_step(self.stack0, low_memory=True)
self.do_one_step(self.stack2, skip_connections=True)
self.do_one_step(self.stack2, skip_connections=True, low_memory=True)
def do_many_steps(self, stack, skip_connections=False, low_memory=False):
depth = self.depth
# 24 steps
# 4 batch examples
# 12 dimensions per step
x_val = (0.1 * numpy.asarray(
list(itertools.islice(itertools.permutations(range(12)), 0, 24)),
dtype=theano.config.floatX))
x_val = numpy.ones((24, 4, 12),
dtype=theano.config.floatX) * x_val[:, None, :]
# mask the last third of steps
mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)
mask_val[12:24, 3] = 0
# unroll all states and cells for all steps and also initial value
h_val = numpy.zeros((depth, 25, 4, 3), dtype=theano.config.floatX)
c_val = numpy.zeros((depth, 25, 4, 3), dtype=theano.config.floatX)
# we will use same weights on all layers
W_state2x_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX)
W_state_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX)
W_cell_to_in = 2 * numpy.ones((3,), dtype=theano.config.floatX)
W_cell_to_out = 2 * numpy.ones((3,), dtype=theano.config.floatX)
W_cell_to_forget = 2 * numpy.ones((3,), dtype=theano.config.floatX)
kwargs = OrderedDict()
for d in range(depth):
if d > 0:
suffix = RECURRENTSTACK_SEPARATOR + str(d)
else:
suffix = ''
if d == 0 or skip_connections:
kwargs['inputs' + suffix] = tensor.tensor3('inputs' + suffix)
kwargs['inputs' + suffix].tag.test_value = x_val
kwargs['mask'] = tensor.matrix('mask')
kwargs['mask'].tag.test_value = mask_val
results = stack.apply(iterate=True, low_memory=low_memory, **kwargs)
calc_h = theano.function(inputs=list(kwargs.values()),
outputs=results)
def sigmoid(x):
return 1. / (1. + numpy.exp(-x))
for i in range(1, 25):
x_v = x_val[i - 1]
h_vs = []
c_vs = []
for d in range(depth):
h_v = h_val[d][i - 1, :, :]
c_v = c_val[d][i - 1, :, :]
activation = numpy.dot(h_v, W_state_val) + x_v
if skip_connections and d > 0:
activation += x_val[i - 1]
i_t = sigmoid(activation[:, :3] + c_v * W_cell_to_in)
f_t = sigmoid(activation[:, 3:6] + c_v * W_cell_to_forget)
c_v1 = f_t * c_v + i_t * numpy.tanh(activation[:, 6:9])
o_t = sigmoid(activation[:, 9:12] +
c_v1 * W_cell_to_out)
h_v1 = o_t * numpy.tanh(c_v1)
h_v = (mask_val[i - 1, :, None] * h_v1 +
(1 - mask_val[i - 1, :, None]) * h_v)
c_v = (mask_val[i - 1, :, None] * c_v1 +
(1 - mask_val[i - 1, :, None]) * c_v)
# current layer output state transformed to input of next
x_v = numpy.dot(h_v, W_state2x_val)
h_vs.append(h_v)
c_vs.append(c_v)
for d in range(depth):
h_val[d][i, :, :] = h_vs[d]
c_val[d][i, :, :] = c_vs[d]
args_val = [x_val]*(depth if skip_connections else 1) + [mask_val]
res = calc_h(*args_val)
for d in range(depth):
assert_allclose(h_val[d][1:], res[d * 2], rtol=1e-4)
assert_allclose(c_val[d][1:], res[d * 2 + 1], rtol=1e-4)
# Also test that initial state is a parameter
for h in results:
initial_states = VariableFilter(roles=[INITIAL_STATE])(
ComputationGraph(h))
assert all(is_shared_variable(initial_state)
for initial_state in initial_states)
def test_many_steps(self):
self.do_many_steps(self.stack0)
self.do_many_steps(self.stack0, low_memory=True)
self.do_many_steps(self.stack2, skip_connections=True)
self.do_many_steps(self.stack2, skip_connections=True, low_memory=True)
class TestGatedRecurrent(unittest.TestCase):
def setUp(self):
self.gated = GatedRecurrent(
dim=3, activation=Tanh(),
gate_activation=Tanh(), weights_init=Constant(2))
self.gated.initialize()
self.reset_only = GatedRecurrent(
dim=3, activation=Tanh(),
gate_activation=Tanh(),
weights_init=IsotropicGaussian(), seed=1)
self.reset_only.initialize()
def test_one_step(self):
h0 = tensor.matrix('h0')
x = tensor.matrix('x')
gi = tensor.matrix('gi')
h1 = self.gated.apply(x, gi, h0, iterate=False)
next_h = theano.function(inputs=[h0, x, gi], outputs=[h1])
h0_val = 0.1 * numpy.array([[1, 1, 0], [0, 1, 1]],
dtype=theano.config.floatX)
x_val = 0.1 * numpy.array([[1, 2, 3], [4, 5, 6]],
dtype=theano.config.floatX)
zi_val = (h0_val + x_val) / 2
ri_val = -x_val
W_val = 2 * numpy.ones((3, 3), dtype=theano.config.floatX)
z_val = numpy.tanh(h0_val.dot(W_val) + zi_val)
r_val = numpy.tanh(h0_val.dot(W_val) + ri_val)
h1_val = (z_val * numpy.tanh((r_val * h0_val).dot(W_val) + x_val) +
(1 - z_val) * h0_val)
assert_allclose(
h1_val, next_h(h0_val, x_val, numpy.hstack([zi_val, ri_val]))[0],
rtol=1e-6)
def test_many_steps(self):
x = tensor.tensor3('x')
gi = tensor.tensor3('gi')
mask = tensor.matrix('mask')
h = self.reset_only.apply(x, gi, mask=mask)
calc_h = theano.function(inputs=[x, gi, mask], outputs=[h])
x_val = 0.1 * numpy.asarray(list(itertools.permutations(range(4))),
dtype=theano.config.floatX)
x_val = numpy.ones((24, 4, 3),
dtype=theano.config.floatX) * x_val[..., None]
ri_val = 0.3 - x_val
zi_val = 2 * ri_val
mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)
mask_val[12:24, 3] = 0
h_val = numpy.zeros((25, 4, 3), dtype=theano.config.floatX)
W = self.reset_only.state_to_state.get_value()
Wz = self.reset_only.state_to_gates.get_value()[:, :3]
Wr = self.reset_only.state_to_gates.get_value()[:, 3:]
for i in range(1, 25):
z_val = numpy.tanh(h_val[i - 1].dot(Wz) + zi_val[i - 1])
r_val = numpy.tanh(h_val[i - 1].dot(Wr) + ri_val[i - 1])
h_val[i] = numpy.tanh((r_val * h_val[i - 1]).dot(W) +
x_val[i - 1])
h_val[i] = z_val * h_val[i] + (1 - z_val) * h_val[i - 1]
h_val[i] = (mask_val[i - 1, :, None] * h_val[i] +
(1 - mask_val[i - 1, :, None]) * h_val[i - 1])
h_val = h_val[1:]
# TODO Figure out why this tolerance needs to be so big
assert_allclose(
h_val,
calc_h(x_val, numpy.concatenate(
[zi_val, ri_val], axis=2), mask_val)[0],
1e-04)
# Also test that initial state is a parameter
initial_state, = VariableFilter(roles=[INITIAL_STATE])(
ComputationGraph(h))
assert is_shared_variable(initial_state)
assert initial_state.name == 'initial_state'
class TestBidirectional(unittest.TestCase):
def setUp(self):
self.bidir = Bidirectional(weights_init=Orthogonal(),
prototype=SimpleRecurrent(
dim=3, activation=Tanh()))
self.simple = SimpleRecurrent(dim=3, weights_init=Orthogonal(),
activation=Tanh(), seed=1)
self.bidir.allocate()
self.simple.initialize()
self.bidir.children[0].parameters[0].set_value(
self.simple.parameters[0].get_value())
self.bidir.children[1].parameters[0].set_value(
self.simple.parameters[0].get_value())
self.x_val = 0.1 * numpy.asarray(
list(itertools.permutations(range(4))),
dtype=theano.config.floatX)
self.x_val = (numpy.ones((24, 4, 3), dtype=theano.config.floatX) *
self.x_val[..., None])
self.mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)
self.mask_val[12:24, 3] = 0
def test(self):
x = tensor.tensor3('x')
mask = tensor.matrix('mask')
calc_bidir = theano.function([x, mask],
[self.bidir.apply(x, mask=mask)])
calc_simple = theano.function([x, mask],
[self.simple.apply(x, mask=mask)])
h_bidir = calc_bidir(self.x_val, self.mask_val)[0]
h_simple = calc_simple(self.x_val, self.mask_val)[0]
h_simple_rev = calc_simple(self.x_val[::-1], self.mask_val[::-1])[0]
output_names = self.bidir.apply.outputs
assert output_names == ['states']
assert_allclose(h_simple, h_bidir[..., :3], rtol=1e-04)
assert_allclose(h_simple_rev, h_bidir[::-1, ..., 3:], rtol=1e-04)
class TestBidirectionalStack(unittest.TestCase):
def setUp(self):
prototype = SimpleRecurrent(dim=3, activation=Tanh())
self.layers = [
Bidirectional(weights_init=Orthogonal(), prototype=prototype)
for _ in range(3)]
self.stack = RecurrentStack(self.layers)
for fork in self.stack.forks:
fork.weights_init = Identity(1)
fork.biases_init = Constant(0)
self.stack.initialize()
self.x_val = 0.1 * numpy.asarray(
list(itertools.permutations(range(4))),
dtype=theano.config.floatX)
self.x_val = (numpy.ones((24, 4, 3), dtype=theano.config.floatX) *
self.x_val[..., None])
self.mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)
self.mask_val[12:24, 3] = 0
def test_steps(self):
x = tensor.tensor3('x')
mask = tensor.matrix('mask')
calc_stack_layers = [
theano.function([x, mask], self.stack.apply(x, mask=mask)[i])
for i in range(len(self.layers))]
stack_layers = [
f(self.x_val, self.mask_val) for f in calc_stack_layers]
h_val = self.x_val
for stack_layer_value, bidir_net in zip(stack_layers, self.layers):
calc = theano.function([x, mask], bidir_net.apply(x, mask=mask))
simple_layer_value = calc(h_val, self.mask_val)
assert_allclose(stack_layer_value, simple_layer_value, rtol=1e-04)
h_val = simple_layer_value[..., :3]
def test_dims(self):
self.assertEqual(self.stack.get_dim("inputs"), 3)
for i in range(len(self.layers)):
state_name = self.stack.suffix("states", i)
self.assertEqual(self.stack.get_dim(state_name), 6)
def test_saved_inner_graph():
x = tensor.tensor3()
recurrent = SimpleRecurrent(dim=3, activation=Tanh())
y = recurrent.apply(x)
application_call = get_application_call(y)
assert application_call.inner_inputs
assert application_call.inner_outputs
cg = ComputationGraph(application_call.inner_outputs)
# Check that the inner scan graph is annotated
# with `recurrent.apply`
assert len(VariableFilter(applications=[recurrent.apply])(cg)) == 3
# Check that the inner graph is equivalent to the one
# produced by a stand-alone of `recurrent.apply`
assert is_same_graph(application_call.inner_outputs[0],
recurrent.apply(*application_call.inner_inputs,
iterate=False))
def test_super_in_recurrent_overrider():
# A regression test for the issue #475
class SimpleRecurrentWithContext(SimpleRecurrent):
@application(contexts=['context'])
def apply(self, context, *args, **kwargs):
kwargs['inputs'] += context
return super(SimpleRecurrentWithContext, self).apply(*args,
**kwargs)
@apply.delegate
def apply_delegate(self):
return super(SimpleRecurrentWithContext, self).apply
brick = SimpleRecurrentWithContext(100, Tanh())
inputs = tensor.tensor3('inputs')
context = tensor.matrix('context').dimshuffle('x', 0, 1)
brick.apply(context, inputs=inputs)
| true | true |
f7fc45d78af97d852668c067ce6794119ebb9659 | 67,770 | py | Python | pandas/tests/series/test_operators.py | jackieleng/pandas | ccec504e31ce74f8016952ac75add1cc4bec7080 | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null | pandas/tests/series/test_operators.py | jackieleng/pandas | ccec504e31ce74f8016952ac75add1cc4bec7080 | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null | pandas/tests/series/test_operators.py | jackieleng/pandas | ccec504e31ce74f8016952ac75add1cc4bec7080 | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | 1 | 2021-01-02T02:27:25.000Z | 2021-01-02T02:27:25.000Z | # coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},"
"s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s,
us, lhs, rhs))
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5, seconds=4)
ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',
'__rfloordiv__', '__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',
'__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_sub_single_tz(self):
# GH12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])
result = s1 - s2
expected = Series([Timedelta('2days')])
assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta('-2days')])
assert_series_equal(result, expected)
def test_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timedelta = Series(
[NaT, NaT], dtype='timedelta64[ns]')
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp)
assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
assert_series_equal(datetime_series - single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + datetime_series
assert_series_equal(datetime_series - single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta + datetime_series,
nat_series_dtype_timestamp)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
assert_series_equal(nat_series_dtype_timestamp - NaT,
nat_series_dtype_timestamp)
assert_series_equal(-NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with tm.assertRaises(TypeError):
timedelta_series - single_nat_dtype_datetime
# addition
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_datetime,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_datetime +
nat_series_dtype_timedelta,
nat_series_dtype_timestamp)
# multiplication
assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series * 1, timedelta_series)
assert_series_equal(1 * timedelta_series, timedelta_series)
assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(timedelta_series * nan, nat_series_dtype_timedelta)
assert_series_equal(nan * timedelta_series, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
datetime_series * 1
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1
with tm.assertRaises(TypeError):
datetime_series * 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1.0
# division
assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
def test_object_comparisons(self):
s = Series(['a', 'b', np.nan, 'c', 'a'])
result = s == 'a'
expected = Series([True, False, False, False, True])
assert_series_equal(result, expected)
result = s < 'a'
expected = Series([False, False, False, False, False])
assert_series_equal(result, expected)
result = s != 'a'
expected = -(s == 'a')
assert_series_equal(result, expected)
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = s[5]
f = getattr(operator, op)
result = f(s, val)
expected = f(s.dropna(), val).reindex(s.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
# boolean &, |, ^ should work with object arrays and propagate NAs
ops = ['and_', 'or_', 'xor']
mask = s.isnull()
for bool_op in ops:
f = getattr(operator, bool_op)
filled = s.fillna(s[0])
result = f(s < s[9], s > s[3])
expected = f(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_comparison_object_numeric_nas(self):
s = Series(np.random.randn(10), dtype=object)
shifted = s.shift(2)
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
f = getattr(operator, op)
result = f(s, shifted)
expected = f(s.astype(float), shifted.astype(float))
assert_series_equal(result, expected)
def test_comparison_invalid(self):
# GH4968
# invalid date/int comparisons
s = Series(range(5))
s2 = Series(date_range('20010101', periods=5))
for (x, y) in [(s, s2), (s2, s)]:
self.assertRaises(TypeError, lambda: x == y)
self.assertRaises(TypeError, lambda: x != y)
self.assertRaises(TypeError, lambda: x >= y)
self.assertRaises(TypeError, lambda: x > y)
self.assertRaises(TypeError, lambda: x < y)
self.assertRaises(TypeError, lambda: x <= y)
def test_more_na_comparisons(self):
for dtype in [None, object]:
left = Series(['a', np.nan, 'c'], dtype=dtype)
right = Series(['a', np.nan, 'd'], dtype=dtype)
result = left == right
expected = Series([True, False, False])
assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
assert_series_equal(result, expected)
def test_nat_comparisons(self):
data = [([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]),
([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')],
[pd.NaT, pd.NaT, pd.Timedelta('3 days')]),
([pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')],
[pd.NaT, pd.NaT, pd.Period('2011-03', freq='M')])]
# add lhs / rhs switched data
data = data + [(r, l) for l, r in data]
for l, r in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
# Series, Index
for right in [Series(r, dtype=dtype), Index(r, dtype=dtype)]:
expected = Series([False, False, True])
assert_series_equal(left == right, expected)
expected = Series([True, True, False])
assert_series_equal(left != right, expected)
expected = Series([False, False, False])
assert_series_equal(left < right, expected)
expected = Series([False, False, False])
assert_series_equal(left > right, expected)
expected = Series([False, False, True])
assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
assert_series_equal(left <= right, expected)
def test_nat_comparisons_scalar(self):
data = [[pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')],
[pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]]
for l in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
expected = Series([False, False, False])
assert_series_equal(left == pd.NaT, expected)
assert_series_equal(pd.NaT == left, expected)
expected = Series([True, True, True])
assert_series_equal(left != pd.NaT, expected)
assert_series_equal(pd.NaT != left, expected)
expected = Series([False, False, False])
assert_series_equal(left < pd.NaT, expected)
assert_series_equal(pd.NaT > left, expected)
assert_series_equal(left <= pd.NaT, expected)
assert_series_equal(pd.NaT >= left, expected)
assert_series_equal(left > pd.NaT, expected)
assert_series_equal(pd.NaT < left, expected)
assert_series_equal(left >= pd.NaT, expected)
assert_series_equal(pd.NaT <= left, expected)
def test_comparison_different_length(self):
a = Series(['a', 'b', 'c'])
b = Series(['b', 'a'])
self.assertRaises(ValueError, a.__lt__, b)
a = Series([1, 2])
b = Series([2, 3, 4])
self.assertRaises(ValueError, a.__eq__, b)
def test_comparison_label_based(self):
# GH 4947
# comparisons should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([False, True, False], list('abc'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False], list('abc'))
result = a | b
assert_series_equal(result, expected)
expected = Series([True, False, False], list('abc'))
result = a ^ b
assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list('bca'))
b = Series([False, True, False, True], list('abcd'))
expected = Series([False, True, False, False], list('abcd'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False, False], list('abcd'))
result = a | b
assert_series_equal(result, expected)
# filling
# vs empty
result = a & Series([])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result, expected)
result = a | Series([])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ['z'])
expected = Series([False, False, False, False], list('abcz'))
assert_series_equal(result, expected)
result = a | Series([1], ['z'])
expected = Series([True, True, False, False], list('abcz'))
assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [Series([]), Series([1], ['z']),
Series(np.nan, b.index), Series(np.nan, a.index)]:
result = a[a | e]
assert_series_equal(result, a[a])
for e in [Series(['z'])]:
if compat.PY3:
with tm.assert_produces_warning(RuntimeWarning):
result = a[a | e]
else:
result = a[a | e]
assert_series_equal(result, a[a])
# vs scalars
index = list('bca')
t = Series([True, False, True])
for v in [True, 1, 2]:
result = Series([True, False, True], index=index) | v
expected = Series([True, True, True], index=index)
assert_series_equal(result, expected)
for v in [np.nan, 'foo']:
self.assertRaises(TypeError, lambda: t | v)
for v in [False, 0]:
result = Series([True, False, True], index=index) | v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [True, 1]:
result = Series([True, False, True], index=index) & v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [False, 0]:
result = Series([True, False, True], index=index) & v
expected = Series([False, False, False], index=index)
assert_series_equal(result, expected)
for v in [np.nan]:
self.assertRaises(TypeError, lambda: t & v)
def test_comparison_flex_basic(self):
left = pd.Series(np.random.randn(10))
right = pd.Series(np.random.randn(10))
tm.assert_series_equal(left.eq(right), left == right)
tm.assert_series_equal(left.ne(right), left != right)
tm.assert_series_equal(left.le(right), left < right)
tm.assert_series_equal(left.lt(right), left <= right)
tm.assert_series_equal(left.gt(right), left > right)
tm.assert_series_equal(left.ge(right), left >= right)
# axis
for axis in [0, None, 'index']:
tm.assert_series_equal(left.eq(right, axis=axis), left == right)
tm.assert_series_equal(left.ne(right, axis=axis), left != right)
tm.assert_series_equal(left.le(right, axis=axis), left < right)
tm.assert_series_equal(left.lt(right, axis=axis), left <= right)
tm.assert_series_equal(left.gt(right, axis=axis), left > right)
tm.assert_series_equal(left.ge(right, axis=axis), left >= right)
#
msg = 'No axis named 1 for object type'
for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']:
with tm.assertRaisesRegexp(ValueError, msg):
getattr(left, op)(right, axis=1)
def test_comparison_flex_alignment(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.eq(right), exp)
exp = pd.Series([True, True, False, True], index=list('abcd'))
tm.assert_series_equal(left.ne(right), exp)
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.le(right), exp)
exp = pd.Series([False, False, False, False], index=list('abcd'))
tm.assert_series_equal(left.lt(right), exp)
exp = pd.Series([False, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right), exp)
exp = pd.Series([False, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right), exp)
def test_comparison_flex_alignment_fill(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.eq(right, fill_value=2), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.ne(right, fill_value=2), exp)
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.le(right, fill_value=0), exp)
exp = pd.Series([False, False, False, True], index=list('abcd'))
tm.assert_series_equal(left.lt(right, fill_value=0), exp)
exp = pd.Series([True, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right, fill_value=0), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right, fill_value=0), exp)
def test_operators_bitwise(self):
# GH 9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4), dtype='int64')
assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype='int64')
assert_series_equal(res, expected)
s_a0b1c0 = Series([1], list('b'))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list('abc'))
assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list('abc'))
assert_series_equal(res, expected)
n0 = 0
res = s_tft & n0
expected = s_fff
assert_series_equal(res, expected)
res = s_0123 & n0
expected = Series([0] * 4)
assert_series_equal(res, expected)
n1 = 1
res = s_tft & n1
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & n1
expected = Series([0, 1, 0, 1])
assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype='int8')
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype='int64')
assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype='int32')
assert_series_equal(res, expected)
self.assertRaises(TypeError, lambda: s_1111 & 'a')
self.assertRaises(TypeError, lambda: s_1111 & ['a', 'b', 'c', 'd'])
self.assertRaises(TypeError, lambda: s_0123 & np.NaN)
self.assertRaises(TypeError, lambda: s_0123 & 3.14)
self.assertRaises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2])
# s_0123 will be all false now because of reindexing like s_tft
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_tft & s_0123, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_tft & s_0123, exp)
# s_tft will be all false now because of reindexing like s_0123
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a'])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_0123 & s_tft, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_0123 & s_tft, exp)
assert_series_equal(s_0123 & False, Series([False] * 4))
assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
assert_series_equal(s_0123 & [False], Series([False] * 4))
assert_series_equal(s_0123 & (False), Series([False] * 4))
assert_series_equal(s_0123 & Series([False, np.NaN, False, False]),
Series([False] * 4))
s_ftft = Series([False, True, False, True])
assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)
s_abNd = Series(['a', 'b', np.NaN, 'd'])
res = s_0123 & s_abNd
expected = s_ftft
assert_series_equal(res, expected)
def test_scalar_na_cmp_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
def tester(a, b):
return a & b
self.assertRaises(TypeError, tester, s, datetime(2005, 1, 1))
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True, index=s.index)
expected[::2] = False
assert_series_equal(tester(s, list(s)), expected)
d = DataFrame({'A': s})
# TODO: Fix this exception - needs to be fixed! (see GH5035)
# (previously this was a TypeError because series returned
# NotImplemented
# this is an alignment issue; these are equivalent
# https://github.com/pydata/pandas/issues/5284
self.assertRaises(ValueError, lambda: d.__and__(s, axis='columns'))
self.assertRaises(ValueError, tester, s, d)
# this is wrong as its not a boolean result
# result = d.__and__(s,axis='index')
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
self.assertTrue(np.isnan(result).all())
result = empty + Series([], index=Index([]))
self.assertEqual(len(result), 0)
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = Series(self.ts.values[:-5] + int_ts.values,
index=self.ts.index[:-5], name='ts')
self.assert_series_equal(added[:-5], expected)
def test_operators_reverse_object(self):
# GH 56
arr = Series(np.random.randn(10), index=np.arange(10), dtype=object)
def _check_op(arr, op):
result = op(1., arr)
expected = op(1., arr.astype(float))
assert_series_equal(result.astype(float), expected)
_check_op(arr, operator.add)
_check_op(arr, operator.sub)
_check_op(arr, operator.mul)
_check_op(arr, operator.truediv)
_check_op(arr, operator.floordiv)
def test_arith_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
exp = pd.Series([3.0, 4.0, np.nan, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 + s2, exp)
tm.assert_series_equal(s2 + s1, exp)
exp = pd.DataFrame({'x': [3.0, 4.0, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() + s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() + s1.to_frame(), exp)
# different length
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
exp = pd.Series([3, 4, 5, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 + s4, exp)
tm.assert_series_equal(s4 + s3, exp)
exp = pd.DataFrame({'x': [3, 4, 5, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() + s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() + s3.to_frame(), exp)
def test_comp_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
for l, r in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]:
msg = "Can only compare identically-labeled Series objects"
with tm.assertRaisesRegexp(ValueError, msg):
l == r
with tm.assertRaisesRegexp(ValueError, msg):
l != r
with tm.assertRaisesRegexp(ValueError, msg):
l < r
msg = "Can only compare identically-labeled DataFrame objects"
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() == r.to_frame()
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() != r.to_frame()
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() < r.to_frame()
def test_bool_ops_df_compat(self):
# GH 1134
s1 = pd.Series([True, False, True], index=list('ABC'), name='x')
s2 = pd.Series([True, True, False], index=list('ABD'), name='x')
exp = pd.Series([True, False, False, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 & s2, exp)
tm.assert_series_equal(s2 & s1, exp)
# True | np.nan => True
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 | s2, exp)
# np.nan | True => np.nan, filled with False
exp = pd.Series([True, True, False, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s2 | s1, exp)
# DataFrame doesn't fill nan with False
exp = pd.DataFrame({'x': [True, False, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() & s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() & s1.to_frame(), exp)
exp = pd.DataFrame({'x': [True, True, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() | s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() | s1.to_frame(), exp)
# different length
s3 = pd.Series([True, False, True], index=list('ABC'), name='x')
s4 = pd.Series([True, True, True, True], index=list('ABCD'), name='x')
exp = pd.Series([True, False, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 & s4, exp)
tm.assert_series_equal(s4 & s3, exp)
# np.nan | True => np.nan, filled with False
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 | s4, exp)
# True | np.nan => True
exp = pd.Series([True, True, True, True],
index=list('ABCD'), name='x')
tm.assert_series_equal(s4 | s3, exp)
exp = pd.DataFrame({'x': [True, False, True, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() & s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() & s3.to_frame(), exp)
exp = pd.DataFrame({'x': [True, True, True, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() | s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() | s3.to_frame(), exp)
def test_series_frame_radd_bug(self):
# GH 353
vals = Series(tm.rands_array(5, 10))
result = 'foo_' + vals
expected = vals.map(lambda x: 'foo_' + x)
assert_series_equal(result, expected)
frame = DataFrame({'vals': vals})
result = 'foo_' + frame
expected = DataFrame({'vals': vals.map(lambda x: 'foo_' + x)})
tm.assert_frame_equal(result, expected)
# really raise this time
with tm.assertRaises(TypeError):
datetime.now() + self.ts
with tm.assertRaises(TypeError):
self.ts + datetime.now()
def test_series_radd_more(self):
data = [[1, 2, 3],
[1.1, 2.2, 3.3],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.NaT],
['x', 'y', 1]]
for d in data:
for dtype in [None, object]:
s = Series(d, dtype=dtype)
with tm.assertRaises(TypeError):
'foo_' + s
for dtype in [None, object]:
res = 1 + pd.Series([1, 2, 3], dtype=dtype)
exp = pd.Series([2, 3, 4], dtype=dtype)
tm.assert_series_equal(res, exp)
res = pd.Series([1, 2, 3], dtype=dtype) + 1
tm.assert_series_equal(res, exp)
res = np.nan + pd.Series([1, 2, 3], dtype=dtype)
exp = pd.Series([np.nan, np.nan, np.nan], dtype=dtype)
tm.assert_series_equal(res, exp)
res = pd.Series([1, 2, 3], dtype=dtype) + np.nan
tm.assert_series_equal(res, exp)
s = pd.Series([pd.Timedelta('1 days'), pd.Timedelta('2 days'),
pd.Timedelta('3 days')], dtype=dtype)
exp = pd.Series([pd.Timedelta('4 days'), pd.Timedelta('5 days'),
pd.Timedelta('6 days')])
tm.assert_series_equal(pd.Timedelta('3 days') + s, exp)
tm.assert_series_equal(s + pd.Timedelta('3 days'), exp)
s = pd.Series(['x', np.nan, 'x'])
tm.assert_series_equal('a' + s, pd.Series(['ax', np.nan, 'ax']))
tm.assert_series_equal(s + 'a', pd.Series(['xa', np.nan, 'xa']))
def test_frame_radd_more(self):
data = [[1, 2, 3],
[1.1, 2.2, 3.3],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.NaT],
['x', 'y', 1]]
for d in data:
for dtype in [None, object]:
s = DataFrame(d, dtype=dtype)
with tm.assertRaises(TypeError):
'foo_' + s
for dtype in [None, object]:
res = 1 + pd.DataFrame([1, 2, 3], dtype=dtype)
exp = pd.DataFrame([2, 3, 4], dtype=dtype)
tm.assert_frame_equal(res, exp)
res = pd.DataFrame([1, 2, 3], dtype=dtype) + 1
tm.assert_frame_equal(res, exp)
res = np.nan + pd.DataFrame([1, 2, 3], dtype=dtype)
exp = pd.DataFrame([np.nan, np.nan, np.nan], dtype=dtype)
tm.assert_frame_equal(res, exp)
res = pd.DataFrame([1, 2, 3], dtype=dtype) + np.nan
tm.assert_frame_equal(res, exp)
df = pd.DataFrame(['x', np.nan, 'x'])
tm.assert_frame_equal('a' + df, pd.DataFrame(['ax', np.nan, 'ax']))
tm.assert_frame_equal(df + 'a', pd.DataFrame(['xa', np.nan, 'xa']))
def test_operators_frame(self):
# rpow does not work with DataFrame
df = DataFrame({'A': self.ts})
tm.assert_series_equal(self.ts + self.ts, self.ts + df['A'],
check_names=False)
tm.assert_series_equal(self.ts ** self.ts, self.ts ** df['A'],
check_names=False)
tm.assert_series_equal(self.ts < self.ts, self.ts < df['A'],
check_names=False)
tm.assert_series_equal(self.ts / self.ts, self.ts / df['A'],
check_names=False)
def test_operators_combine(self):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isnull(a)
bmask = isnull(b)
exp_values = []
for i in range(len(exp_index)):
with np.errstate(all='ignore'):
if amask[i]:
if bmask[i]:
exp_values.append(nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
assert_series_equal(result, expected)
a = Series([nan, 1., 2., 3., nan], index=np.arange(5))
b = Series([nan, 1, nan, 3, nan, 4.], index=np.arange(6))
pairings = []
for op in ['add', 'sub', 'mul', 'pow', 'truediv', 'floordiv']:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, 'r' + op)
# bind op at definition time...
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
if compat.PY3:
pairings.append((Series.div, operator.truediv, 1))
pairings.append((Series.rdiv, lambda x, y: operator.truediv(y, x),
1))
else:
pairings.append((Series.div, operator.div, 1))
pairings.append((Series.rdiv, lambda x, y: operator.div(y, x), 1))
for op, equiv_op, fv in pairings:
result = op(a, b)
exp = equiv_op(a, b)
assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
# should accept axis=0 or axis='rows'
op(a, b, axis=0)
def test_ne(self):
ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)
expected = [True, True, False, True, True]
self.assertTrue(tm.equalContents(ts.index != 5, expected))
self.assertTrue(tm.equalContents(~(ts.index == 5), expected))
def test_operators_na_handling(self):
from decimal import Decimal
from datetime import date
s = Series([Decimal('1.3'), Decimal('2.3')],
index=[date(2012, 1, 1), date(2012, 1, 2)])
result = s + s.shift(1)
result2 = s.shift(1) + s
self.assertTrue(isnull(result[0]))
self.assertTrue(isnull(result2[0]))
s = Series(['foo', 'bar', 'baz', np.nan])
result = 'prefix_' + s
expected = Series(['prefix_foo', 'prefix_bar', 'prefix_baz', np.nan])
assert_series_equal(result, expected)
result = s + '_suffix'
expected = Series(['foo_suffix', 'bar_suffix', 'baz_suffix', np.nan])
assert_series_equal(result, expected)
def test_divide_decimal(self):
""" resolves issue #9787 """
from decimal import Decimal
expected = Series([Decimal(5)])
s = Series([Decimal(10)])
s = s / Decimal(2)
tm.assert_series_equal(expected, s)
s = Series([Decimal(10)])
s = s // Decimal(2)
tm.assert_series_equal(expected, s)
def test_datetime64_with_index(self):
# arithmetic integer ops with an index
s = Series(np.random.randn(5))
expected = s - s.index.to_series()
result = s - s.index
assert_series_equal(result, expected)
# GH 4629
# arithmetic datetime64 ops with an index
s = Series(date_range('20130101', periods=5),
index=date_range('20130101', periods=5))
expected = s - s.index.to_series()
result = s - s.index
assert_series_equal(result, expected)
result = s - s.index.to_period()
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(5, 2),
index=date_range('20130101', periods=5))
df['date'] = Timestamp('20130102')
df['expected'] = df['date'] - df.index.to_series()
df['result'] = df['date'] - df.index
assert_series_equal(df['result'], df['expected'], check_names=False)
def test_dti_tz_convert_to_utc(self):
base = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
tz='UTC')
idx1 = base.tz_convert('Asia/Tokyo')[:2]
idx2 = base.tz_convert('US/Eastern')[1:]
res = Series([1, 2], index=idx1) + Series([1, 1], index=idx2)
assert_series_equal(res, Series([np.nan, 3, np.nan], index=base))
| 38.22335 | 79 | 0.554877 |
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},"
"s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s,
us, lhs, rhs))
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5, seconds=4)
ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',
'__rfloordiv__', '__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',
'__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_sub_single_tz(self):
# GH12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])
result = s1 - s2
expected = Series([Timedelta('2days')])
assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta('-2days')])
assert_series_equal(result, expected)
def test_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timedelta = Series(
[NaT, NaT], dtype='timedelta64[ns]')
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp)
assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
assert_series_equal(datetime_series - single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + datetime_series
assert_series_equal(datetime_series - single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta + datetime_series,
nat_series_dtype_timestamp)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
assert_series_equal(nat_series_dtype_timestamp - NaT,
nat_series_dtype_timestamp)
assert_series_equal(-NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with tm.assertRaises(TypeError):
timedelta_series - single_nat_dtype_datetime
# addition
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_datetime,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_datetime +
nat_series_dtype_timedelta,
nat_series_dtype_timestamp)
# multiplication
assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series * 1, timedelta_series)
assert_series_equal(1 * timedelta_series, timedelta_series)
assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(timedelta_series * nan, nat_series_dtype_timedelta)
assert_series_equal(nan * timedelta_series, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
datetime_series * 1
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1
with tm.assertRaises(TypeError):
datetime_series * 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1.0
# division
assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
def test_object_comparisons(self):
s = Series(['a', 'b', np.nan, 'c', 'a'])
result = s == 'a'
expected = Series([True, False, False, False, True])
assert_series_equal(result, expected)
result = s < 'a'
expected = Series([False, False, False, False, False])
assert_series_equal(result, expected)
result = s != 'a'
expected = -(s == 'a')
assert_series_equal(result, expected)
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = s[5]
f = getattr(operator, op)
result = f(s, val)
expected = f(s.dropna(), val).reindex(s.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
# boolean &, |, ^ should work with object arrays and propagate NAs
ops = ['and_', 'or_', 'xor']
mask = s.isnull()
for bool_op in ops:
f = getattr(operator, bool_op)
filled = s.fillna(s[0])
result = f(s < s[9], s > s[3])
expected = f(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_comparison_object_numeric_nas(self):
s = Series(np.random.randn(10), dtype=object)
shifted = s.shift(2)
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
f = getattr(operator, op)
result = f(s, shifted)
expected = f(s.astype(float), shifted.astype(float))
assert_series_equal(result, expected)
def test_comparison_invalid(self):
# GH4968
# invalid date/int comparisons
s = Series(range(5))
s2 = Series(date_range('20010101', periods=5))
for (x, y) in [(s, s2), (s2, s)]:
self.assertRaises(TypeError, lambda: x == y)
self.assertRaises(TypeError, lambda: x != y)
self.assertRaises(TypeError, lambda: x >= y)
self.assertRaises(TypeError, lambda: x > y)
self.assertRaises(TypeError, lambda: x < y)
self.assertRaises(TypeError, lambda: x <= y)
def test_more_na_comparisons(self):
for dtype in [None, object]:
left = Series(['a', np.nan, 'c'], dtype=dtype)
right = Series(['a', np.nan, 'd'], dtype=dtype)
result = left == right
expected = Series([True, False, False])
assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
assert_series_equal(result, expected)
def test_nat_comparisons(self):
data = [([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]),
([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')],
[pd.NaT, pd.NaT, pd.Timedelta('3 days')]),
([pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')],
[pd.NaT, pd.NaT, pd.Period('2011-03', freq='M')])]
# add lhs / rhs switched data
data = data + [(r, l) for l, r in data]
for l, r in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
# Series, Index
for right in [Series(r, dtype=dtype), Index(r, dtype=dtype)]:
expected = Series([False, False, True])
assert_series_equal(left == right, expected)
expected = Series([True, True, False])
assert_series_equal(left != right, expected)
expected = Series([False, False, False])
assert_series_equal(left < right, expected)
expected = Series([False, False, False])
assert_series_equal(left > right, expected)
expected = Series([False, False, True])
assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
assert_series_equal(left <= right, expected)
def test_nat_comparisons_scalar(self):
data = [[pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')],
[pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]]
for l in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
expected = Series([False, False, False])
assert_series_equal(left == pd.NaT, expected)
assert_series_equal(pd.NaT == left, expected)
expected = Series([True, True, True])
assert_series_equal(left != pd.NaT, expected)
assert_series_equal(pd.NaT != left, expected)
expected = Series([False, False, False])
assert_series_equal(left < pd.NaT, expected)
assert_series_equal(pd.NaT > left, expected)
assert_series_equal(left <= pd.NaT, expected)
assert_series_equal(pd.NaT >= left, expected)
assert_series_equal(left > pd.NaT, expected)
assert_series_equal(pd.NaT < left, expected)
assert_series_equal(left >= pd.NaT, expected)
assert_series_equal(pd.NaT <= left, expected)
def test_comparison_different_length(self):
a = Series(['a', 'b', 'c'])
b = Series(['b', 'a'])
self.assertRaises(ValueError, a.__lt__, b)
a = Series([1, 2])
b = Series([2, 3, 4])
self.assertRaises(ValueError, a.__eq__, b)
def test_comparison_label_based(self):
# GH 4947
# comparisons should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([False, True, False], list('abc'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False], list('abc'))
result = a | b
assert_series_equal(result, expected)
expected = Series([True, False, False], list('abc'))
result = a ^ b
assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list('bca'))
b = Series([False, True, False, True], list('abcd'))
expected = Series([False, True, False, False], list('abcd'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False, False], list('abcd'))
result = a | b
assert_series_equal(result, expected)
# filling
# vs empty
result = a & Series([])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result, expected)
result = a | Series([])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ['z'])
expected = Series([False, False, False, False], list('abcz'))
assert_series_equal(result, expected)
result = a | Series([1], ['z'])
expected = Series([True, True, False, False], list('abcz'))
assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [Series([]), Series([1], ['z']),
Series(np.nan, b.index), Series(np.nan, a.index)]:
result = a[a | e]
assert_series_equal(result, a[a])
for e in [Series(['z'])]:
if compat.PY3:
with tm.assert_produces_warning(RuntimeWarning):
result = a[a | e]
else:
result = a[a | e]
assert_series_equal(result, a[a])
# vs scalars
index = list('bca')
t = Series([True, False, True])
for v in [True, 1, 2]:
result = Series([True, False, True], index=index) | v
expected = Series([True, True, True], index=index)
assert_series_equal(result, expected)
for v in [np.nan, 'foo']:
self.assertRaises(TypeError, lambda: t | v)
for v in [False, 0]:
result = Series([True, False, True], index=index) | v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [True, 1]:
result = Series([True, False, True], index=index) & v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [False, 0]:
result = Series([True, False, True], index=index) & v
expected = Series([False, False, False], index=index)
assert_series_equal(result, expected)
for v in [np.nan]:
self.assertRaises(TypeError, lambda: t & v)
def test_comparison_flex_basic(self):
left = pd.Series(np.random.randn(10))
right = pd.Series(np.random.randn(10))
tm.assert_series_equal(left.eq(right), left == right)
tm.assert_series_equal(left.ne(right), left != right)
tm.assert_series_equal(left.le(right), left < right)
tm.assert_series_equal(left.lt(right), left <= right)
tm.assert_series_equal(left.gt(right), left > right)
tm.assert_series_equal(left.ge(right), left >= right)
# axis
for axis in [0, None, 'index']:
tm.assert_series_equal(left.eq(right, axis=axis), left == right)
tm.assert_series_equal(left.ne(right, axis=axis), left != right)
tm.assert_series_equal(left.le(right, axis=axis), left < right)
tm.assert_series_equal(left.lt(right, axis=axis), left <= right)
tm.assert_series_equal(left.gt(right, axis=axis), left > right)
tm.assert_series_equal(left.ge(right, axis=axis), left >= right)
#
msg = 'No axis named 1 for object type'
for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']:
with tm.assertRaisesRegexp(ValueError, msg):
getattr(left, op)(right, axis=1)
def test_comparison_flex_alignment(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.eq(right), exp)
exp = pd.Series([True, True, False, True], index=list('abcd'))
tm.assert_series_equal(left.ne(right), exp)
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.le(right), exp)
exp = pd.Series([False, False, False, False], index=list('abcd'))
tm.assert_series_equal(left.lt(right), exp)
exp = pd.Series([False, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right), exp)
exp = pd.Series([False, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right), exp)
def test_comparison_flex_alignment_fill(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.eq(right, fill_value=2), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.ne(right, fill_value=2), exp)
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.le(right, fill_value=0), exp)
exp = pd.Series([False, False, False, True], index=list('abcd'))
tm.assert_series_equal(left.lt(right, fill_value=0), exp)
exp = pd.Series([True, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right, fill_value=0), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right, fill_value=0), exp)
def test_operators_bitwise(self):
# GH 9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4), dtype='int64')
assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype='int64')
assert_series_equal(res, expected)
s_a0b1c0 = Series([1], list('b'))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list('abc'))
assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list('abc'))
assert_series_equal(res, expected)
n0 = 0
res = s_tft & n0
expected = s_fff
assert_series_equal(res, expected)
res = s_0123 & n0
expected = Series([0] * 4)
assert_series_equal(res, expected)
n1 = 1
res = s_tft & n1
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & n1
expected = Series([0, 1, 0, 1])
assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype='int8')
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype='int64')
assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype='int32')
assert_series_equal(res, expected)
self.assertRaises(TypeError, lambda: s_1111 & 'a')
self.assertRaises(TypeError, lambda: s_1111 & ['a', 'b', 'c', 'd'])
self.assertRaises(TypeError, lambda: s_0123 & np.NaN)
self.assertRaises(TypeError, lambda: s_0123 & 3.14)
self.assertRaises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2])
# s_0123 will be all false now because of reindexing like s_tft
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_tft & s_0123, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_tft & s_0123, exp)
# s_tft will be all false now because of reindexing like s_0123
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a'])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_0123 & s_tft, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_0123 & s_tft, exp)
assert_series_equal(s_0123 & False, Series([False] * 4))
assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
assert_series_equal(s_0123 & [False], Series([False] * 4))
assert_series_equal(s_0123 & (False), Series([False] * 4))
assert_series_equal(s_0123 & Series([False, np.NaN, False, False]),
Series([False] * 4))
s_ftft = Series([False, True, False, True])
assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)
s_abNd = Series(['a', 'b', np.NaN, 'd'])
res = s_0123 & s_abNd
expected = s_ftft
assert_series_equal(res, expected)
def test_scalar_na_cmp_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
def tester(a, b):
return a & b
self.assertRaises(TypeError, tester, s, datetime(2005, 1, 1))
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True, index=s.index)
expected[::2] = False
assert_series_equal(tester(s, list(s)), expected)
d = DataFrame({'A': s})
# TODO: Fix this exception - needs to be fixed! (see GH5035)
# (previously this was a TypeError because series returned
# NotImplemented
# this is an alignment issue; these are equivalent
# https://github.com/pydata/pandas/issues/5284
self.assertRaises(ValueError, lambda: d.__and__(s, axis='columns'))
self.assertRaises(ValueError, tester, s, d)
# this is wrong as its not a boolean result
# result = d.__and__(s,axis='index')
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
self.assertTrue(np.isnan(result).all())
result = empty + Series([], index=Index([]))
self.assertEqual(len(result), 0)
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = Series(self.ts.values[:-5] + int_ts.values,
index=self.ts.index[:-5], name='ts')
self.assert_series_equal(added[:-5], expected)
def test_operators_reverse_object(self):
# GH 56
arr = Series(np.random.randn(10), index=np.arange(10), dtype=object)
def _check_op(arr, op):
result = op(1., arr)
expected = op(1., arr.astype(float))
assert_series_equal(result.astype(float), expected)
_check_op(arr, operator.add)
_check_op(arr, operator.sub)
_check_op(arr, operator.mul)
_check_op(arr, operator.truediv)
_check_op(arr, operator.floordiv)
def test_arith_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
exp = pd.Series([3.0, 4.0, np.nan, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 + s2, exp)
tm.assert_series_equal(s2 + s1, exp)
exp = pd.DataFrame({'x': [3.0, 4.0, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() + s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() + s1.to_frame(), exp)
# different length
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
exp = pd.Series([3, 4, 5, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 + s4, exp)
tm.assert_series_equal(s4 + s3, exp)
exp = pd.DataFrame({'x': [3, 4, 5, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() + s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() + s3.to_frame(), exp)
def test_comp_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
for l, r in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]:
msg = "Can only compare identically-labeled Series objects"
with tm.assertRaisesRegexp(ValueError, msg):
l == r
with tm.assertRaisesRegexp(ValueError, msg):
l != r
with tm.assertRaisesRegexp(ValueError, msg):
l < r
msg = "Can only compare identically-labeled DataFrame objects"
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() == r.to_frame()
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() != r.to_frame()
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() < r.to_frame()
def test_bool_ops_df_compat(self):
# GH 1134
s1 = pd.Series([True, False, True], index=list('ABC'), name='x')
s2 = pd.Series([True, True, False], index=list('ABD'), name='x')
exp = pd.Series([True, False, False, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 & s2, exp)
tm.assert_series_equal(s2 & s1, exp)
# True | np.nan => True
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 | s2, exp)
# np.nan | True => np.nan, filled with False
exp = pd.Series([True, True, False, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s2 | s1, exp)
# DataFrame doesn't fill nan with False
exp = pd.DataFrame({'x': [True, False, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() & s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() & s1.to_frame(), exp)
exp = pd.DataFrame({'x': [True, True, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() | s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() | s1.to_frame(), exp)
s3 = pd.Series([True, False, True], index=list('ABC'), name='x')
s4 = pd.Series([True, True, True, True], index=list('ABCD'), name='x')
exp = pd.Series([True, False, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 & s4, exp)
tm.assert_series_equal(s4 & s3, exp)
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 | s4, exp)
exp = pd.Series([True, True, True, True],
index=list('ABCD'), name='x')
tm.assert_series_equal(s4 | s3, exp)
exp = pd.DataFrame({'x': [True, False, True, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() & s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() & s3.to_frame(), exp)
exp = pd.DataFrame({'x': [True, True, True, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() | s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() | s3.to_frame(), exp)
def test_series_frame_radd_bug(self):
vals = Series(tm.rands_array(5, 10))
result = 'foo_' + vals
expected = vals.map(lambda x: 'foo_' + x)
assert_series_equal(result, expected)
frame = DataFrame({'vals': vals})
result = 'foo_' + frame
expected = DataFrame({'vals': vals.map(lambda x: 'foo_' + x)})
tm.assert_frame_equal(result, expected)
with tm.assertRaises(TypeError):
datetime.now() + self.ts
with tm.assertRaises(TypeError):
self.ts + datetime.now()
def test_series_radd_more(self):
data = [[1, 2, 3],
[1.1, 2.2, 3.3],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.NaT],
['x', 'y', 1]]
for d in data:
for dtype in [None, object]:
s = Series(d, dtype=dtype)
with tm.assertRaises(TypeError):
'foo_' + s
for dtype in [None, object]:
res = 1 + pd.Series([1, 2, 3], dtype=dtype)
exp = pd.Series([2, 3, 4], dtype=dtype)
tm.assert_series_equal(res, exp)
res = pd.Series([1, 2, 3], dtype=dtype) + 1
tm.assert_series_equal(res, exp)
res = np.nan + pd.Series([1, 2, 3], dtype=dtype)
exp = pd.Series([np.nan, np.nan, np.nan], dtype=dtype)
tm.assert_series_equal(res, exp)
res = pd.Series([1, 2, 3], dtype=dtype) + np.nan
tm.assert_series_equal(res, exp)
s = pd.Series([pd.Timedelta('1 days'), pd.Timedelta('2 days'),
pd.Timedelta('3 days')], dtype=dtype)
exp = pd.Series([pd.Timedelta('4 days'), pd.Timedelta('5 days'),
pd.Timedelta('6 days')])
tm.assert_series_equal(pd.Timedelta('3 days') + s, exp)
tm.assert_series_equal(s + pd.Timedelta('3 days'), exp)
s = pd.Series(['x', np.nan, 'x'])
tm.assert_series_equal('a' + s, pd.Series(['ax', np.nan, 'ax']))
tm.assert_series_equal(s + 'a', pd.Series(['xa', np.nan, 'xa']))
def test_frame_radd_more(self):
data = [[1, 2, 3],
[1.1, 2.2, 3.3],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.NaT],
['x', 'y', 1]]
for d in data:
for dtype in [None, object]:
s = DataFrame(d, dtype=dtype)
with tm.assertRaises(TypeError):
'foo_' + s
for dtype in [None, object]:
res = 1 + pd.DataFrame([1, 2, 3], dtype=dtype)
exp = pd.DataFrame([2, 3, 4], dtype=dtype)
tm.assert_frame_equal(res, exp)
res = pd.DataFrame([1, 2, 3], dtype=dtype) + 1
tm.assert_frame_equal(res, exp)
res = np.nan + pd.DataFrame([1, 2, 3], dtype=dtype)
exp = pd.DataFrame([np.nan, np.nan, np.nan], dtype=dtype)
tm.assert_frame_equal(res, exp)
res = pd.DataFrame([1, 2, 3], dtype=dtype) + np.nan
tm.assert_frame_equal(res, exp)
df = pd.DataFrame(['x', np.nan, 'x'])
tm.assert_frame_equal('a' + df, pd.DataFrame(['ax', np.nan, 'ax']))
tm.assert_frame_equal(df + 'a', pd.DataFrame(['xa', np.nan, 'xa']))
def test_operators_frame(self):
df = DataFrame({'A': self.ts})
tm.assert_series_equal(self.ts + self.ts, self.ts + df['A'],
check_names=False)
tm.assert_series_equal(self.ts ** self.ts, self.ts ** df['A'],
check_names=False)
tm.assert_series_equal(self.ts < self.ts, self.ts < df['A'],
check_names=False)
tm.assert_series_equal(self.ts / self.ts, self.ts / df['A'],
check_names=False)
def test_operators_combine(self):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isnull(a)
bmask = isnull(b)
exp_values = []
for i in range(len(exp_index)):
with np.errstate(all='ignore'):
if amask[i]:
if bmask[i]:
exp_values.append(nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
assert_series_equal(result, expected)
a = Series([nan, 1., 2., 3., nan], index=np.arange(5))
b = Series([nan, 1, nan, 3, nan, 4.], index=np.arange(6))
pairings = []
for op in ['add', 'sub', 'mul', 'pow', 'truediv', 'floordiv']:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, 'r' + op)
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
if compat.PY3:
pairings.append((Series.div, operator.truediv, 1))
pairings.append((Series.rdiv, lambda x, y: operator.truediv(y, x),
1))
else:
pairings.append((Series.div, operator.div, 1))
pairings.append((Series.rdiv, lambda x, y: operator.div(y, x), 1))
for op, equiv_op, fv in pairings:
result = op(a, b)
exp = equiv_op(a, b)
assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
op(a, b, axis=0)
def test_ne(self):
ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)
expected = [True, True, False, True, True]
self.assertTrue(tm.equalContents(ts.index != 5, expected))
self.assertTrue(tm.equalContents(~(ts.index == 5), expected))
def test_operators_na_handling(self):
from decimal import Decimal
from datetime import date
s = Series([Decimal('1.3'), Decimal('2.3')],
index=[date(2012, 1, 1), date(2012, 1, 2)])
result = s + s.shift(1)
result2 = s.shift(1) + s
self.assertTrue(isnull(result[0]))
self.assertTrue(isnull(result2[0]))
s = Series(['foo', 'bar', 'baz', np.nan])
result = 'prefix_' + s
expected = Series(['prefix_foo', 'prefix_bar', 'prefix_baz', np.nan])
assert_series_equal(result, expected)
result = s + '_suffix'
expected = Series(['foo_suffix', 'bar_suffix', 'baz_suffix', np.nan])
assert_series_equal(result, expected)
def test_divide_decimal(self):
from decimal import Decimal
expected = Series([Decimal(5)])
s = Series([Decimal(10)])
s = s / Decimal(2)
tm.assert_series_equal(expected, s)
s = Series([Decimal(10)])
s = s // Decimal(2)
tm.assert_series_equal(expected, s)
def test_datetime64_with_index(self):
s = Series(np.random.randn(5))
expected = s - s.index.to_series()
result = s - s.index
assert_series_equal(result, expected)
s = Series(date_range('20130101', periods=5),
index=date_range('20130101', periods=5))
expected = s - s.index.to_series()
result = s - s.index
assert_series_equal(result, expected)
result = s - s.index.to_period()
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(5, 2),
index=date_range('20130101', periods=5))
df['date'] = Timestamp('20130102')
df['expected'] = df['date'] - df.index.to_series()
df['result'] = df['date'] - df.index
assert_series_equal(df['result'], df['expected'], check_names=False)
def test_dti_tz_convert_to_utc(self):
base = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
tz='UTC')
idx1 = base.tz_convert('Asia/Tokyo')[:2]
idx2 = base.tz_convert('US/Eastern')[1:]
res = Series([1, 2], index=idx1) + Series([1, 1], index=idx2)
assert_series_equal(res, Series([np.nan, 3, np.nan], index=base))
| true | true |
f7fc4793014b6ae4f4764af79dff30029ab092e7 | 491 | py | Python | obywatele/migrations/0032_auto_20210911_2051.py | soma115/wikikracja | 7715ca1daa4ca09888e1c7389ed5f8a2df29898b | [
"MIT"
] | 7 | 2016-02-21T17:25:54.000Z | 2021-10-09T19:36:10.000Z | obywatele/migrations/0032_auto_20210911_2051.py | soma115/wikikracja | 7715ca1daa4ca09888e1c7389ed5f8a2df29898b | [
"MIT"
] | 19 | 2020-02-11T23:55:01.000Z | 2022-03-31T18:11:56.000Z | obywatele/migrations/0032_auto_20210911_2051.py | soma115/wikikracja | 7715ca1daa4ca09888e1c7389ed5f8a2df29898b | [
"MIT"
] | 3 | 2016-01-20T22:34:58.000Z | 2020-09-16T07:45:42.000Z | # Generated by Django 3.1.12 on 2021-09-11 18:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('obywatele', '0031_auto_20210911_2049'),
]
operations = [
migrations.AlterField(
model_name='uzytkownik',
name='phone',
field=models.CharField(blank=True, help_text='Phone number i.e. +48 123 456 789', max_length=20, null=True, verbose_name='Phone number'),
),
]
| 25.842105 | 149 | 0.631365 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('obywatele', '0031_auto_20210911_2049'),
]
operations = [
migrations.AlterField(
model_name='uzytkownik',
name='phone',
field=models.CharField(blank=True, help_text='Phone number i.e. +48 123 456 789', max_length=20, null=True, verbose_name='Phone number'),
),
]
| true | true |
f7fc47ee9828b852894df4f635b387bc277449f2 | 27,218 | py | Python | corehq/apps/couch_sql_migration/statedb.py | scottwedge/commcare-hq | 900ccf81c9f23fb3b435962f065648669817f37a | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/couch_sql_migration/statedb.py | scottwedge/commcare-hq | 900ccf81c9f23fb3b435962f065648669817f37a | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/couch_sql_migration/statedb.py | scottwedge/commcare-hq | 900ccf81c9f23fb3b435962f065648669817f37a | [
"BSD-3-Clause"
] | null | null | null | import errno
import json
import logging
import os
import os.path
from collections import defaultdict, namedtuple
from contextlib import contextmanager
from datetime import datetime
from functools import partial
from itertools import groupby
import attr
from memoized import memoized
from sqlalchemy import (
Column,
Integer,
String,
Text,
bindparam,
func,
)
from sqlalchemy.exc import IntegrityError
from corehq.apps.hqwebapp.encoders import LazyEncoder
from corehq.apps.tzmigration.planning import Base, DiffDB, PlanningDiff as Diff
from corehq.apps.tzmigration.timezonemigration import MISSING, json_diff
from corehq.util.datadog.gauges import datadog_counter
from corehq.util.log import with_progress_bar
from .diff import filter_form_diffs
log = logging.getLogger(__name__)
def init_state_db(domain, state_dir):
db_filepath = _get_state_db_filepath(domain, state_dir)
db_dir = os.path.dirname(db_filepath)
if os.path.isdir(state_dir) and not os.path.isdir(db_dir):
os.mkdir(db_dir)
return StateDB.init(domain, db_filepath)
def open_state_db(domain, state_dir, *, readonly=True):
"""Open state db in read-only mode"""
db_filepath = _get_state_db_filepath(domain, state_dir)
if not os.path.exists(db_filepath):
raise Error(f"not found: {db_filepath}")
return StateDB.open(domain, db_filepath, readonly=readonly)
def delete_state_db(domain, state_dir):
db_filepath = _get_state_db_filepath(domain, state_dir)
try:
os.remove(db_filepath)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def _get_state_db_filepath(domain, state_dir):
return os.path.join(state_dir, "db", '{}-couch-sql.db'.format(domain))
class StateDB(DiffDB):
@classmethod
def init(cls, domain, path):
is_new_db = not os.path.exists(path)
db = super(StateDB, cls).init(domain, path)
if is_new_db:
db._set_kv("db_unique_id", datetime.utcnow().strftime("%Y%m%d-%H%M%S.%f"))
else:
db._migrate()
return db
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.is_rebuild = False
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def close(self):
self.engine.dispose()
@contextmanager
def session(self, session=None):
if session is not None:
yield session
return
session = self.Session()
try:
yield session
session.commit()
finally:
session.close()
@property
@memoized
def unique_id(self):
with self.session() as session:
return self._get_kv("db_unique_id", session).value
def get(self, name, default=None):
with self.session() as session:
kv = self._get_kv(f"kv-{name}", session)
if kv is None:
return default
return json.loads(kv.value)
def set(self, name, value):
self._upsert(KeyValue, KeyValue.key, f"kv-{name}", json.dumps(value))
def update_cases(self, case_records):
"""Update case total and processed form counts
:param case_records: iterable of objects, each having the attributes:
- id: case id
- total_forms: number of forms known to update the case.
- processed_forms: number of forms updating the case that
have been processed.
:returns: list of three-tuples `(case_id, total_forms, processed_forms)`
"""
params = [
{"case": rec.id, "total": rec.total_forms, "proc": rec.processed_forms}
for rec in case_records
]
with self.session() as session:
session.execute(
"""
REPLACE INTO {table} (case_id, total_forms, processed_forms)
VALUES (
:case,
MAX(COALESCE((
SELECT total_forms
FROM {table}
WHERE case_id = :case
), 0), :total),
COALESCE((
SELECT processed_forms
FROM {table}
WHERE case_id = :case
), 0) + :proc
)
""".format(table=CaseForms.__tablename__),
params,
)
case_ids = [p["case"] for p in params]
query = session.query(CaseForms).filter(CaseForms.case_id.in_(case_ids))
result = [(c.case_id, c.total_forms, c.processed_forms) for c in query]
assert len(case_ids) == len(result), (case_ids, result)
return result
def add_processed_forms(self, cases):
"""Increment processed forms count for each of the given cases
:param cases: dict `{<case_id>: <processed_form_count>, ...}`
:returns: list of three-tuples `(case_id, total_forms, processed_forms)`
where `total_forms` is `None` for unknown cases.
"""
case_col = CaseForms.case_id
proc_col = CaseForms.processed_forms
params = [{"case": c, "proc": p} for c, p in cases.items()]
with self.session() as session:
session.execute(
CaseForms.__table__.update()
.where(case_col == bindparam("case"))
.values({proc_col: proc_col + bindparam("proc")}),
params,
)
query = session.query(CaseForms).filter(case_col.in_(cases))
case_forms = {cf.case_id: cf for cf in query}
def make_result(case_id):
case = case_forms.get(case_id)
if case is None:
return (case_id, None, None)
return (case_id, case.total_forms, case.processed_forms)
return [make_result(case_id) for case_id in cases]
def iter_cases_with_unprocessed_forms(self):
query = self.Session().query(
CaseForms.case_id,
CaseForms.total_forms,
).filter(CaseForms.total_forms > CaseForms.processed_forms)
for case_id, total_forms in iter_large(query, CaseForms.case_id):
yield case_id, total_forms
def get_forms_count(self, case_id):
with self.session() as session:
query = session.query(CaseForms.total_forms).filter_by(case_id=case_id)
return query.scalar() or 0
def add_cases_to_diff(self, case_ids):
if not case_ids:
return
with self.session() as session:
session.execute(
f"INSERT OR IGNORE INTO {CaseToDiff.__tablename__} (id) VALUES (:id)",
[{"id": x} for x in case_ids],
)
def add_diffed_cases(self, case_ids):
if not case_ids:
return
with self.session() as session:
session.execute(
f"INSERT OR IGNORE INTO {DiffedCase.__tablename__} (id) VALUES (:id)",
[{"id": x} for x in case_ids],
)
(
session.query(CaseToDiff)
.filter(CaseToDiff.id.in_(case_ids))
.delete(synchronize_session=False)
)
def iter_undiffed_case_ids(self):
query = self.Session().query(CaseToDiff.id)
for case_id, in iter_large(query, CaseToDiff.id):
yield case_id
def count_undiffed_cases(self):
with self.session() as session:
return session.query(CaseToDiff).count()
def iter_case_ids_with_diffs(self):
query = (
self.Session().query(DocDiffs.doc_id)
.filter(DocDiffs.kind == "CommCareCase")
)
for doc_id, in iter_large(query, DocDiffs.doc_id):
yield doc_id
def count_case_ids_with_diffs(self):
with self.session() as session:
return (
session.query(DocDiffs.doc_id)
.filter(DocDiffs.kind == "CommCareCase")
.count()
)
def add_problem_form(self, form_id):
"""Add form to be migrated with "unprocessed" forms
A "problem" form is an error form with normal doctype (XFormInstance)
"""
with self.session() as session:
session.add(ProblemForm(id=form_id))
def iter_problem_forms(self):
query = self.Session().query(ProblemForm.id)
for form_id, in iter_large(query, ProblemForm.id):
yield form_id
def add_no_action_case_form(self, form_id):
try:
with self.session() as session:
session.add(NoActionCaseForm(id=form_id))
except IntegrityError:
pass
else:
self.get_no_action_case_forms.reset_cache(self)
@memoized
def get_no_action_case_forms(self):
"""Get the set of form ids that touch cases without actions"""
return {x for x, in self.Session().query(NoActionCaseForm.id)}
def set_resume_state(self, key, value):
resume_key = "resume-{}".format(key)
self._upsert(KeyValue, KeyValue.key, resume_key, json.dumps(value))
@contextmanager
def pop_resume_state(self, key, default):
resume_key = "resume-{}".format(key)
with self.session() as session:
kv = self._get_kv(resume_key, session)
if kv is None:
self._set_kv(resume_key, RESUME_NOT_ALLOWED, session)
yield default
elif self.is_rebuild:
yield default
elif kv.value == RESUME_NOT_ALLOWED:
raise ResumeError("previous session did not save resume state")
else:
yield json.loads(kv.value)
kv.value = RESUME_NOT_ALLOWED
def _get_kv(self, key, session):
return session.query(KeyValue).get(key)
def _set_kv(self, key, value, session=None):
with self.session(session) as session:
session.add(KeyValue(key=key, value=value))
def _upsert(self, model, key_field, key, value, incr=False):
with self.session() as session:
updated = (
session.query(model)
.filter(key_field == key)
.update(
{model.value: (model.value + value) if incr else value},
synchronize_session=False,
)
)
if not updated:
obj = model(value=value)
key_field.__set__(obj, key)
session.add(obj)
else:
assert updated == 1, (key, updated)
def add_missing_docs(self, kind, doc_ids):
with self.session() as session:
session.bulk_save_objects([
MissingDoc(kind=kind, doc_id=doc_id)
for doc_id in doc_ids
])
def delete_missing_docs(self, kind):
with self.session() as session:
(
session.query(MissingDoc)
.filter_by(kind=kind)
.delete(synchronize_session=False)
)
def doc_not_missing(self, kind, doc_id):
with self.session() as session:
(
session.query(MissingDoc.doc_id)
.filter_by(kind=kind, doc_id=doc_id)
.delete(synchronize_session=False)
)
def save_form_diffs(self, couch_json, sql_json):
diffs = json_diff(couch_json, sql_json, track_list_indices=False)
diffs = filter_form_diffs(couch_json, sql_json, diffs)
dd_count = partial(datadog_counter, tags=["domain:" + self.domain])
dd_count("commcare.couchsqlmigration.form.diffed")
doc_type = couch_json["doc_type"]
doc_id = couch_json["_id"]
self.add_diffs(doc_type, doc_id, diffs)
if diffs:
dd_count("commcare.couchsqlmigration.form.has_diff")
def replace_case_diffs(self, case_diffs, **kw):
diffs_by_doc = defaultdict(list)
for kind, doc_id, diffs in case_diffs:
assert all(isinstance(d.path, (list, tuple)) for d in diffs), diffs
if kind == "stock state":
case_id = doc_id.split("/", 1)[0]
diffs = [
d._replace(path={"stock_id": doc_id, "path": d.path})
for d in diffs
]
diffs_by_doc[("CommCareCase", case_id)].extend(diffs)
else:
diffs_by_doc[(kind, doc_id)].extend(diffs)
for (doc_type, case_id), diffs in diffs_by_doc.items():
self.add_diffs(doc_type, case_id, diffs, **kw)
def add_diffs(self, kind, doc_id, diffs, *, session=None, _model=None):
if _model is None:
_model = DocDiffs
to_dict = _model.diff_to_dict
assert kind != "stock state", ("stock state diffs should be "
"combined with other diffs for the same case")
if diffs:
diff_json = json.dumps([to_dict(d) for d in diffs], cls=LazyEncoder)
with self.session(session) as session:
session.execute(
f"""
REPLACE INTO {_model.__tablename__} (kind, doc_id, diffs)
VALUES (:kind, :doc_id, :diffs)
""",
[{"kind": kind, "doc_id": doc_id, "diffs": diff_json}],
)
else:
with self.session(session) as session:
session.query(_model).filter(
_model.kind == kind,
_model.doc_id == doc_id,
).delete(synchronize_session=False)
def replace_case_changes(self, changes):
self.replace_case_diffs(changes, _model=DocChanges)
def iter_diffs(self, *, _model=None):
if _model is None:
_model = DocDiffs
with self.session() as session:
for kind, in list(session.query(_model.kind).distinct()):
query = session.query(_model).filter_by(kind=kind)
for doc in iter_large(query, _model.doc_id):
for data in json.loads(doc.diffs):
yield _model.dict_to_diff(doc.kind, doc.doc_id, data)
def iter_changes(self):
return self.iter_diffs(_model=DocChanges)
def iter_doc_diffs(self, kind=None, _model=None):
"""Iterate over diffs of the given kind
"stock state" diffs cannot be queried directly with this method.
They are grouped with diffs of the corresponding case
(kind="CommCareCase", doc_id=<case_id>).
:yeilds: two-tuples `(doc_id, diffs)`. The diffs yielded here are
`PlanningDiff` objects, which should not be confused with json
diffs (`<PlanningDiff>.json_diff`).
"""
if _model is None:
_model = DocDiffs
with self.session() as session:
query = session.query(_model)
if kind is not None:
query = query.filter_by(kind=kind)
for doc in iter_large(query, _model.doc_id):
yield doc.kind, doc.doc_id, [
_model.dict_to_diff(doc.kind, doc.doc_id, data)
for data in json.loads(doc.diffs)
]
def iter_doc_changes(self, kind=None):
return self.iter_doc_diffs(kind, _model=DocChanges)
def get_diffs(self):
"""DEPRECATED use iter_diffs(); the result may be very large"""
return list(self.iter_diffs())
def set_counter(self, kind, value):
self._upsert(DocCount, DocCount.kind, kind, value)
def get_doc_counts(self):
"""Returns a dict of counts by kind
Values are `Counts` objects having `total` and `missing`
fields:
- total: number of items counted with `increment_counter`.
- missing: count of ids found in Couch but not in SQL.
- diffs: count of docs with diffs.
"""
with self.session() as session:
totals = {dc.kind: dc.value for dc in session.query(DocCount)}
diffs = dict(session.query(
DocDiffs.kind,
func.count(DocDiffs.doc_id),
).group_by(DocDiffs.kind))
missing = dict(session.query(
MissingDoc.kind,
func.count(MissingDoc.doc_id),
).group_by(MissingDoc.kind))
changes = dict(session.query(
DocChanges.kind,
func.count(DocChanges.doc_id),
).group_by(DocChanges.kind))
return {kind: Counts(
total=totals.get(kind, 0),
diffs=diffs.get(kind, 0),
missing=missing.get(kind, 0),
changes=changes.get(kind, 0),
) for kind in set(totals) | set(missing) | set(diffs)}
def iter_missing_doc_ids(self, kind):
with self.session() as session:
query = (
session.query(MissingDoc.doc_id)
.filter(MissingDoc.kind == kind)
)
yield from (x for x, in iter_large(query, MissingDoc.doc_id))
def get_diff_stats(self):
raise NotImplementedError("use get_doc_counts")
def clone_casediff_data_from(self, casediff_state_path):
"""Copy casediff state into this state db
model analysis
- CaseForms - casediff r/w
- Diff - deprecated
- KeyValue - casediff r/w, main r/w (different keys)
- DocCount - casediff w, main r
- DocDiffs - casediff w (case and stock kinds), main r/w
- DocChanges - casediff w (case and stock kinds), main r/w
- MissingDoc - casediff w, main r
- NoActionCaseForm - main r/w
- ProblemForm - main r/w
"""
def quote(value):
assert isinstance(value, str) and "'" not in value, repr(value)
return f"'{value}'"
def quotelist(values):
return f"({', '.join(quote(v) for v in values)})"
def is_id(column):
return column.key == "id" and isinstance(column.type, Integer)
def copy(model, session, where_expr=None):
log.info("copying casediff data: %s", model.__name__)
where = f"WHERE {where_expr}" if where_expr else ""
fields = ", ".join(c.key for c in model.__table__.columns if not is_id(c))
session.execute(f"DELETE FROM main.{model.__tablename__} {where}")
session.execute(f"""
INSERT INTO main.{model.__tablename__} ({fields})
SELECT {fields} FROM cddb.{model.__tablename__} {where}
""")
log.info("checking casediff data preconditions...")
casediff_db = type(self).open(self.domain, casediff_state_path)
with casediff_db.session() as cddb:
expect_casediff_kinds = {
"CommCareCase",
"CommCareCase-Deleted",
"stock state",
}
casediff_kinds = {k for k, in cddb.query(DocDiffs.kind).distinct()}
casediff_kinds.update(k for k, in cddb.query(DocChanges.kind).distinct())
assert not casediff_kinds - expect_casediff_kinds, casediff_kinds
resume_keys = [
key for key, in cddb.query(KeyValue.key)
.filter(KeyValue.key.startswith("resume-"))
]
assert all("Case" in key for key in resume_keys), resume_keys
count_kinds = [k for k, in cddb.query(DocCount.kind).distinct()]
assert all("CommCareCase" in k for k in count_kinds), count_kinds
missing_kinds = [m for m, in cddb.query(MissingDoc.kind).distinct()]
assert all("CommCareCase" in k for k in missing_kinds), missing_kinds
casediff_db.close()
with self.session() as session:
session.execute(f"ATTACH DATABASE {quote(casediff_state_path)} AS cddb")
copy(CaseForms, session)
copy(Diff, session, f"kind IN {quotelist(expect_casediff_kinds)}")
copy(DocDiffs, session, f"kind IN {quotelist(expect_casediff_kinds)}")
copy(DocChanges, session, f"kind IN {quotelist(expect_casediff_kinds)}")
copy(KeyValue, session, f"key IN {quotelist(resume_keys)}")
copy(DocCount, session)
copy(MissingDoc, session)
def _migrate(self):
with self.session() as session:
self._migrate_diff_to_docdiffs(session)
def _migrate_diff_to_docdiffs(self, session):
if session.query(session.query(DocDiffs).exists()).scalar():
return # already migrated
if not session.query(session.query(Diff).exists()).scalar():
return # nothing to migrate
log.info("migrating PlanningDiff to DocDiffs...")
base_query = session.query(Diff).filter(Diff.kind != "stock state")
count = base_query.count()
query = base_query.order_by(Diff.kind, Diff.doc_id)
items = with_progress_bar(query, count, oneline="concise", prefix="main diffs")
for (kind, doc_id), diffs in groupby(items, lambda d: (d.kind, d.doc_id)):
diffs = [d.json_diff for d in diffs]
self.add_diffs(kind, doc_id, diffs, session=session)
# "stock state" diffs must be migrated after "CommCareCase"
# diffs since it will probably replace some of them
self._migrate_stock_state_diffs(session)
def _migrate_stock_state_diffs(self, session):
def get_case_diffs(case_id):
case_diffs = session.query(Diff).filter_by(doc_id=case_id)
return [d.json_diff for d in case_diffs]
query = session.query(Diff).filter_by(kind="stock state")
count = query.count()
stock_state_diffs = with_progress_bar(
query, count, oneline="concise", prefix="stock state cases")
diffs_by_doc = defaultdict(list)
for stock_diff in stock_state_diffs:
case_id, x, x = stock_diff.doc_id.split("/")
key = ("CommCareCase", case_id)
jsdiff = stock_diff.json_diff
stock_json_diff = jsdiff._replace(path={
"stock_id": stock_diff.doc_id,
"path": jsdiff.path,
})
if key not in diffs_by_doc:
diffs_by_doc[key].extend(get_case_diffs(case_id))
diffs_by_doc[key].append(stock_json_diff)
for (doc_type, case_id), diffs in diffs_by_doc.items():
self.add_diffs(doc_type, case_id, diffs, session=session)
def vacuum(self):
with self.session() as session:
session.execute("VACUUM")
class Error(Exception):
pass
class ResumeError(Exception):
pass
RESUME_NOT_ALLOWED = "RESUME_NOT_ALLOWED"
class CaseForms(Base):
__tablename__ = "caseforms"
case_id = Column(String(50), nullable=False, primary_key=True)
total_forms = Column(Integer, nullable=False)
processed_forms = Column(Integer, nullable=False, default=0)
class CaseToDiff(Base):
__tablename__ = 'case_to_diff'
id = Column(String(50), nullable=False, primary_key=True)
class DiffedCase(Base):
__tablename__ = 'diffed_case'
id = Column(String(50), nullable=False, primary_key=True)
class DocCount(Base):
__tablename__ = 'doc_count'
kind = Column(String(50), primary_key=True)
value = Column(Integer, nullable=False)
class DocDiffs(Base):
__tablename__ = 'doc_diffs'
kind = Column(String(50), nullable=False, primary_key=True)
doc_id = Column(String(50), nullable=False, primary_key=True)
diffs = Column(Text(), nullable=False)
def diff_to_dict(diff):
data = {"type": diff.diff_type, "path": diff.path}
if diff.old_value is not MISSING:
data["old_value"] = diff.old_value
if diff.new_value is not MISSING:
data["new_value"] = diff.new_value
return data
def dict_to_diff(kind, doc_id, data, *, _make_diff=Diff):
def json_or_none(data, key):
return json.dumps(data[key]) if key in data else None
path = data["path"]
if len(path) == 2 and isinstance(path, dict):
assert path.keys() == {"stock_id", "path"}, path
assert path["stock_id"].startswith(doc_id + "/"), (doc_id, path)
kind = "stock state"
doc_id = path["stock_id"]
path = path["path"]
return _make_diff(
kind=kind,
doc_id=doc_id,
diff_type=data["type"],
path=json.dumps(path),
old_value=json_or_none(data, "old_value"),
new_value=json_or_none(data, "new_value"),
)
class DocChanges(Base):
__tablename__ = 'doc_changes'
kind = Column(String(50), nullable=False, primary_key=True)
doc_id = Column(String(50), nullable=False, primary_key=True)
diffs = Column(Text(), nullable=False)
def diff_to_dict(diff):
data = DocDiffs.diff_to_dict(diff)
data["reason"] = diff.reason
return data
def dict_to_diff(kind, doc_id, data):
def change(**kw):
for key in ["path", "old_value", "new_value"]:
kw[key] = MISSING if kw[key] is None else json.loads(kw[key])
return Change(reason=data["reason"], **kw)
return DocDiffs.dict_to_diff(kind, doc_id, data, _make_diff=change)
@attr.s
class Change:
kind = attr.ib()
doc_id = attr.ib()
reason = attr.ib()
diff_type = attr.ib()
path = attr.ib()
old_value = attr.ib()
new_value = attr.ib()
@property
def json_diff(self):
return self
def _replace(self, **data):
cls = type(self)
for att in attr.fields(cls):
if att.name not in data:
data[att.name] = getattr(self, att.name)
return cls(**data)
class KeyValue(Base):
__tablename__ = "keyvalue"
key = Column(String(50), nullable=False, primary_key=True)
value = Column(Text(), nullable=False)
class MissingDoc(Base):
__tablename__ = 'missing_doc'
kind = Column(String(50), nullable=False, primary_key=True)
doc_id = Column(String(50), nullable=False, primary_key=True)
class NoActionCaseForm(Base):
__tablename__ = "noactioncaseform"
id = Column(String(50), nullable=False, primary_key=True)
class ProblemForm(Base):
__tablename__ = "problemform"
id = Column(String(50), nullable=False, primary_key=True)
@attr.s
class Counts:
total = attr.ib(default=0)
diffs = attr.ib(default=0)
missing = attr.ib(default=0)
changes = attr.ib(default=0)
def iter_large(query, pk_attr, maxrq=1000):
"""Specialized windowed query generator using WHERE/LIMIT
Iterate over a dataset that is too large to fetch at once. Results
are ordered by `pk_attr`.
Adapted from https://github.com/sqlalchemy/sqlalchemy/wiki/WindowedRangeQuery
"""
first_id = None
while True:
qry = query
if first_id is not None:
qry = query.filter(pk_attr > first_id)
rec = None
for rec in qry.order_by(pk_attr).limit(maxrq):
yield rec
if rec is None:
break
first_id = getattr(rec, pk_attr.name)
| 35.394018 | 87 | 0.594937 | import errno
import json
import logging
import os
import os.path
from collections import defaultdict, namedtuple
from contextlib import contextmanager
from datetime import datetime
from functools import partial
from itertools import groupby
import attr
from memoized import memoized
from sqlalchemy import (
Column,
Integer,
String,
Text,
bindparam,
func,
)
from sqlalchemy.exc import IntegrityError
from corehq.apps.hqwebapp.encoders import LazyEncoder
from corehq.apps.tzmigration.planning import Base, DiffDB, PlanningDiff as Diff
from corehq.apps.tzmigration.timezonemigration import MISSING, json_diff
from corehq.util.datadog.gauges import datadog_counter
from corehq.util.log import with_progress_bar
from .diff import filter_form_diffs
log = logging.getLogger(__name__)
def init_state_db(domain, state_dir):
db_filepath = _get_state_db_filepath(domain, state_dir)
db_dir = os.path.dirname(db_filepath)
if os.path.isdir(state_dir) and not os.path.isdir(db_dir):
os.mkdir(db_dir)
return StateDB.init(domain, db_filepath)
def open_state_db(domain, state_dir, *, readonly=True):
db_filepath = _get_state_db_filepath(domain, state_dir)
if not os.path.exists(db_filepath):
raise Error(f"not found: {db_filepath}")
return StateDB.open(domain, db_filepath, readonly=readonly)
def delete_state_db(domain, state_dir):
db_filepath = _get_state_db_filepath(domain, state_dir)
try:
os.remove(db_filepath)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def _get_state_db_filepath(domain, state_dir):
return os.path.join(state_dir, "db", '{}-couch-sql.db'.format(domain))
class StateDB(DiffDB):
@classmethod
def init(cls, domain, path):
is_new_db = not os.path.exists(path)
db = super(StateDB, cls).init(domain, path)
if is_new_db:
db._set_kv("db_unique_id", datetime.utcnow().strftime("%Y%m%d-%H%M%S.%f"))
else:
db._migrate()
return db
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.is_rebuild = False
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def close(self):
self.engine.dispose()
@contextmanager
def session(self, session=None):
if session is not None:
yield session
return
session = self.Session()
try:
yield session
session.commit()
finally:
session.close()
@property
@memoized
def unique_id(self):
with self.session() as session:
return self._get_kv("db_unique_id", session).value
def get(self, name, default=None):
with self.session() as session:
kv = self._get_kv(f"kv-{name}", session)
if kv is None:
return default
return json.loads(kv.value)
def set(self, name, value):
self._upsert(KeyValue, KeyValue.key, f"kv-{name}", json.dumps(value))
def update_cases(self, case_records):
params = [
{"case": rec.id, "total": rec.total_forms, "proc": rec.processed_forms}
for rec in case_records
]
with self.session() as session:
session.execute(
"""
REPLACE INTO {table} (case_id, total_forms, processed_forms)
VALUES (
:case,
MAX(COALESCE((
SELECT total_forms
FROM {table}
WHERE case_id = :case
), 0), :total),
COALESCE((
SELECT processed_forms
FROM {table}
WHERE case_id = :case
), 0) + :proc
)
""".format(table=CaseForms.__tablename__),
params,
)
case_ids = [p["case"] for p in params]
query = session.query(CaseForms).filter(CaseForms.case_id.in_(case_ids))
result = [(c.case_id, c.total_forms, c.processed_forms) for c in query]
assert len(case_ids) == len(result), (case_ids, result)
return result
def add_processed_forms(self, cases):
case_col = CaseForms.case_id
proc_col = CaseForms.processed_forms
params = [{"case": c, "proc": p} for c, p in cases.items()]
with self.session() as session:
session.execute(
CaseForms.__table__.update()
.where(case_col == bindparam("case"))
.values({proc_col: proc_col + bindparam("proc")}),
params,
)
query = session.query(CaseForms).filter(case_col.in_(cases))
case_forms = {cf.case_id: cf for cf in query}
def make_result(case_id):
case = case_forms.get(case_id)
if case is None:
return (case_id, None, None)
return (case_id, case.total_forms, case.processed_forms)
return [make_result(case_id) for case_id in cases]
def iter_cases_with_unprocessed_forms(self):
query = self.Session().query(
CaseForms.case_id,
CaseForms.total_forms,
).filter(CaseForms.total_forms > CaseForms.processed_forms)
for case_id, total_forms in iter_large(query, CaseForms.case_id):
yield case_id, total_forms
def get_forms_count(self, case_id):
with self.session() as session:
query = session.query(CaseForms.total_forms).filter_by(case_id=case_id)
return query.scalar() or 0
def add_cases_to_diff(self, case_ids):
if not case_ids:
return
with self.session() as session:
session.execute(
f"INSERT OR IGNORE INTO {CaseToDiff.__tablename__} (id) VALUES (:id)",
[{"id": x} for x in case_ids],
)
def add_diffed_cases(self, case_ids):
if not case_ids:
return
with self.session() as session:
session.execute(
f"INSERT OR IGNORE INTO {DiffedCase.__tablename__} (id) VALUES (:id)",
[{"id": x} for x in case_ids],
)
(
session.query(CaseToDiff)
.filter(CaseToDiff.id.in_(case_ids))
.delete(synchronize_session=False)
)
def iter_undiffed_case_ids(self):
query = self.Session().query(CaseToDiff.id)
for case_id, in iter_large(query, CaseToDiff.id):
yield case_id
def count_undiffed_cases(self):
with self.session() as session:
return session.query(CaseToDiff).count()
def iter_case_ids_with_diffs(self):
query = (
self.Session().query(DocDiffs.doc_id)
.filter(DocDiffs.kind == "CommCareCase")
)
for doc_id, in iter_large(query, DocDiffs.doc_id):
yield doc_id
def count_case_ids_with_diffs(self):
with self.session() as session:
return (
session.query(DocDiffs.doc_id)
.filter(DocDiffs.kind == "CommCareCase")
.count()
)
def add_problem_form(self, form_id):
with self.session() as session:
session.add(ProblemForm(id=form_id))
def iter_problem_forms(self):
query = self.Session().query(ProblemForm.id)
for form_id, in iter_large(query, ProblemForm.id):
yield form_id
def add_no_action_case_form(self, form_id):
try:
with self.session() as session:
session.add(NoActionCaseForm(id=form_id))
except IntegrityError:
pass
else:
self.get_no_action_case_forms.reset_cache(self)
@memoized
def get_no_action_case_forms(self):
return {x for x, in self.Session().query(NoActionCaseForm.id)}
def set_resume_state(self, key, value):
resume_key = "resume-{}".format(key)
self._upsert(KeyValue, KeyValue.key, resume_key, json.dumps(value))
@contextmanager
def pop_resume_state(self, key, default):
resume_key = "resume-{}".format(key)
with self.session() as session:
kv = self._get_kv(resume_key, session)
if kv is None:
self._set_kv(resume_key, RESUME_NOT_ALLOWED, session)
yield default
elif self.is_rebuild:
yield default
elif kv.value == RESUME_NOT_ALLOWED:
raise ResumeError("previous session did not save resume state")
else:
yield json.loads(kv.value)
kv.value = RESUME_NOT_ALLOWED
def _get_kv(self, key, session):
return session.query(KeyValue).get(key)
def _set_kv(self, key, value, session=None):
with self.session(session) as session:
session.add(KeyValue(key=key, value=value))
def _upsert(self, model, key_field, key, value, incr=False):
with self.session() as session:
updated = (
session.query(model)
.filter(key_field == key)
.update(
{model.value: (model.value + value) if incr else value},
synchronize_session=False,
)
)
if not updated:
obj = model(value=value)
key_field.__set__(obj, key)
session.add(obj)
else:
assert updated == 1, (key, updated)
def add_missing_docs(self, kind, doc_ids):
with self.session() as session:
session.bulk_save_objects([
MissingDoc(kind=kind, doc_id=doc_id)
for doc_id in doc_ids
])
def delete_missing_docs(self, kind):
with self.session() as session:
(
session.query(MissingDoc)
.filter_by(kind=kind)
.delete(synchronize_session=False)
)
def doc_not_missing(self, kind, doc_id):
with self.session() as session:
(
session.query(MissingDoc.doc_id)
.filter_by(kind=kind, doc_id=doc_id)
.delete(synchronize_session=False)
)
def save_form_diffs(self, couch_json, sql_json):
diffs = json_diff(couch_json, sql_json, track_list_indices=False)
diffs = filter_form_diffs(couch_json, sql_json, diffs)
dd_count = partial(datadog_counter, tags=["domain:" + self.domain])
dd_count("commcare.couchsqlmigration.form.diffed")
doc_type = couch_json["doc_type"]
doc_id = couch_json["_id"]
self.add_diffs(doc_type, doc_id, diffs)
if diffs:
dd_count("commcare.couchsqlmigration.form.has_diff")
def replace_case_diffs(self, case_diffs, **kw):
diffs_by_doc = defaultdict(list)
for kind, doc_id, diffs in case_diffs:
assert all(isinstance(d.path, (list, tuple)) for d in diffs), diffs
if kind == "stock state":
case_id = doc_id.split("/", 1)[0]
diffs = [
d._replace(path={"stock_id": doc_id, "path": d.path})
for d in diffs
]
diffs_by_doc[("CommCareCase", case_id)].extend(diffs)
else:
diffs_by_doc[(kind, doc_id)].extend(diffs)
for (doc_type, case_id), diffs in diffs_by_doc.items():
self.add_diffs(doc_type, case_id, diffs, **kw)
def add_diffs(self, kind, doc_id, diffs, *, session=None, _model=None):
if _model is None:
_model = DocDiffs
to_dict = _model.diff_to_dict
assert kind != "stock state", ("stock state diffs should be "
"combined with other diffs for the same case")
if diffs:
diff_json = json.dumps([to_dict(d) for d in diffs], cls=LazyEncoder)
with self.session(session) as session:
session.execute(
f"""
REPLACE INTO {_model.__tablename__} (kind, doc_id, diffs)
VALUES (:kind, :doc_id, :diffs)
""",
[{"kind": kind, "doc_id": doc_id, "diffs": diff_json}],
)
else:
with self.session(session) as session:
session.query(_model).filter(
_model.kind == kind,
_model.doc_id == doc_id,
).delete(synchronize_session=False)
def replace_case_changes(self, changes):
self.replace_case_diffs(changes, _model=DocChanges)
def iter_diffs(self, *, _model=None):
if _model is None:
_model = DocDiffs
with self.session() as session:
for kind, in list(session.query(_model.kind).distinct()):
query = session.query(_model).filter_by(kind=kind)
for doc in iter_large(query, _model.doc_id):
for data in json.loads(doc.diffs):
yield _model.dict_to_diff(doc.kind, doc.doc_id, data)
def iter_changes(self):
return self.iter_diffs(_model=DocChanges)
def iter_doc_diffs(self, kind=None, _model=None):
if _model is None:
_model = DocDiffs
with self.session() as session:
query = session.query(_model)
if kind is not None:
query = query.filter_by(kind=kind)
for doc in iter_large(query, _model.doc_id):
yield doc.kind, doc.doc_id, [
_model.dict_to_diff(doc.kind, doc.doc_id, data)
for data in json.loads(doc.diffs)
]
def iter_doc_changes(self, kind=None):
return self.iter_doc_diffs(kind, _model=DocChanges)
def get_diffs(self):
return list(self.iter_diffs())
def set_counter(self, kind, value):
self._upsert(DocCount, DocCount.kind, kind, value)
def get_doc_counts(self):
with self.session() as session:
totals = {dc.kind: dc.value for dc in session.query(DocCount)}
diffs = dict(session.query(
DocDiffs.kind,
func.count(DocDiffs.doc_id),
).group_by(DocDiffs.kind))
missing = dict(session.query(
MissingDoc.kind,
func.count(MissingDoc.doc_id),
).group_by(MissingDoc.kind))
changes = dict(session.query(
DocChanges.kind,
func.count(DocChanges.doc_id),
).group_by(DocChanges.kind))
return {kind: Counts(
total=totals.get(kind, 0),
diffs=diffs.get(kind, 0),
missing=missing.get(kind, 0),
changes=changes.get(kind, 0),
) for kind in set(totals) | set(missing) | set(diffs)}
def iter_missing_doc_ids(self, kind):
with self.session() as session:
query = (
session.query(MissingDoc.doc_id)
.filter(MissingDoc.kind == kind)
)
yield from (x for x, in iter_large(query, MissingDoc.doc_id))
def get_diff_stats(self):
raise NotImplementedError("use get_doc_counts")
def clone_casediff_data_from(self, casediff_state_path):
def quote(value):
assert isinstance(value, str) and "'" not in value, repr(value)
return f"'{value}'"
def quotelist(values):
return f"({', '.join(quote(v) for v in values)})"
def is_id(column):
return column.key == "id" and isinstance(column.type, Integer)
def copy(model, session, where_expr=None):
log.info("copying casediff data: %s", model.__name__)
where = f"WHERE {where_expr}" if where_expr else ""
fields = ", ".join(c.key for c in model.__table__.columns if not is_id(c))
session.execute(f"DELETE FROM main.{model.__tablename__} {where}")
session.execute(f"""
INSERT INTO main.{model.__tablename__} ({fields})
SELECT {fields} FROM cddb.{model.__tablename__} {where}
""")
log.info("checking casediff data preconditions...")
casediff_db = type(self).open(self.domain, casediff_state_path)
with casediff_db.session() as cddb:
expect_casediff_kinds = {
"CommCareCase",
"CommCareCase-Deleted",
"stock state",
}
casediff_kinds = {k for k, in cddb.query(DocDiffs.kind).distinct()}
casediff_kinds.update(k for k, in cddb.query(DocChanges.kind).distinct())
assert not casediff_kinds - expect_casediff_kinds, casediff_kinds
resume_keys = [
key for key, in cddb.query(KeyValue.key)
.filter(KeyValue.key.startswith("resume-"))
]
assert all("Case" in key for key in resume_keys), resume_keys
count_kinds = [k for k, in cddb.query(DocCount.kind).distinct()]
assert all("CommCareCase" in k for k in count_kinds), count_kinds
missing_kinds = [m for m, in cddb.query(MissingDoc.kind).distinct()]
assert all("CommCareCase" in k for k in missing_kinds), missing_kinds
casediff_db.close()
with self.session() as session:
session.execute(f"ATTACH DATABASE {quote(casediff_state_path)} AS cddb")
copy(CaseForms, session)
copy(Diff, session, f"kind IN {quotelist(expect_casediff_kinds)}")
copy(DocDiffs, session, f"kind IN {quotelist(expect_casediff_kinds)}")
copy(DocChanges, session, f"kind IN {quotelist(expect_casediff_kinds)}")
copy(KeyValue, session, f"key IN {quotelist(resume_keys)}")
copy(DocCount, session)
copy(MissingDoc, session)
def _migrate(self):
with self.session() as session:
self._migrate_diff_to_docdiffs(session)
def _migrate_diff_to_docdiffs(self, session):
if session.query(session.query(DocDiffs).exists()).scalar():
return # already migrated
if not session.query(session.query(Diff).exists()).scalar():
return # nothing to migrate
log.info("migrating PlanningDiff to DocDiffs...")
base_query = session.query(Diff).filter(Diff.kind != "stock state")
count = base_query.count()
query = base_query.order_by(Diff.kind, Diff.doc_id)
items = with_progress_bar(query, count, oneline="concise", prefix="main diffs")
for (kind, doc_id), diffs in groupby(items, lambda d: (d.kind, d.doc_id)):
diffs = [d.json_diff for d in diffs]
self.add_diffs(kind, doc_id, diffs, session=session)
# "stock state" diffs must be migrated after "CommCareCase"
# diffs since it will probably replace some of them
self._migrate_stock_state_diffs(session)
def _migrate_stock_state_diffs(self, session):
def get_case_diffs(case_id):
case_diffs = session.query(Diff).filter_by(doc_id=case_id)
return [d.json_diff for d in case_diffs]
query = session.query(Diff).filter_by(kind="stock state")
count = query.count()
stock_state_diffs = with_progress_bar(
query, count, oneline="concise", prefix="stock state cases")
diffs_by_doc = defaultdict(list)
for stock_diff in stock_state_diffs:
case_id, x, x = stock_diff.doc_id.split("/")
key = ("CommCareCase", case_id)
jsdiff = stock_diff.json_diff
stock_json_diff = jsdiff._replace(path={
"stock_id": stock_diff.doc_id,
"path": jsdiff.path,
})
if key not in diffs_by_doc:
diffs_by_doc[key].extend(get_case_diffs(case_id))
diffs_by_doc[key].append(stock_json_diff)
for (doc_type, case_id), diffs in diffs_by_doc.items():
self.add_diffs(doc_type, case_id, diffs, session=session)
def vacuum(self):
with self.session() as session:
session.execute("VACUUM")
class Error(Exception):
pass
class ResumeError(Exception):
pass
RESUME_NOT_ALLOWED = "RESUME_NOT_ALLOWED"
class CaseForms(Base):
__tablename__ = "caseforms"
case_id = Column(String(50), nullable=False, primary_key=True)
total_forms = Column(Integer, nullable=False)
processed_forms = Column(Integer, nullable=False, default=0)
class CaseToDiff(Base):
__tablename__ = 'case_to_diff'
id = Column(String(50), nullable=False, primary_key=True)
class DiffedCase(Base):
__tablename__ = 'diffed_case'
id = Column(String(50), nullable=False, primary_key=True)
class DocCount(Base):
__tablename__ = 'doc_count'
kind = Column(String(50), primary_key=True)
value = Column(Integer, nullable=False)
class DocDiffs(Base):
__tablename__ = 'doc_diffs'
kind = Column(String(50), nullable=False, primary_key=True)
doc_id = Column(String(50), nullable=False, primary_key=True)
diffs = Column(Text(), nullable=False)
def diff_to_dict(diff):
data = {"type": diff.diff_type, "path": diff.path}
if diff.old_value is not MISSING:
data["old_value"] = diff.old_value
if diff.new_value is not MISSING:
data["new_value"] = diff.new_value
return data
def dict_to_diff(kind, doc_id, data, *, _make_diff=Diff):
def json_or_none(data, key):
return json.dumps(data[key]) if key in data else None
path = data["path"]
if len(path) == 2 and isinstance(path, dict):
assert path.keys() == {"stock_id", "path"}, path
assert path["stock_id"].startswith(doc_id + "/"), (doc_id, path)
kind = "stock state"
doc_id = path["stock_id"]
path = path["path"]
return _make_diff(
kind=kind,
doc_id=doc_id,
diff_type=data["type"],
path=json.dumps(path),
old_value=json_or_none(data, "old_value"),
new_value=json_or_none(data, "new_value"),
)
class DocChanges(Base):
__tablename__ = 'doc_changes'
kind = Column(String(50), nullable=False, primary_key=True)
doc_id = Column(String(50), nullable=False, primary_key=True)
diffs = Column(Text(), nullable=False)
def diff_to_dict(diff):
data = DocDiffs.diff_to_dict(diff)
data["reason"] = diff.reason
return data
def dict_to_diff(kind, doc_id, data):
def change(**kw):
for key in ["path", "old_value", "new_value"]:
kw[key] = MISSING if kw[key] is None else json.loads(kw[key])
return Change(reason=data["reason"], **kw)
return DocDiffs.dict_to_diff(kind, doc_id, data, _make_diff=change)
@attr.s
class Change:
kind = attr.ib()
doc_id = attr.ib()
reason = attr.ib()
diff_type = attr.ib()
path = attr.ib()
old_value = attr.ib()
new_value = attr.ib()
@property
def json_diff(self):
return self
def _replace(self, **data):
cls = type(self)
for att in attr.fields(cls):
if att.name not in data:
data[att.name] = getattr(self, att.name)
return cls(**data)
class KeyValue(Base):
__tablename__ = "keyvalue"
key = Column(String(50), nullable=False, primary_key=True)
value = Column(Text(), nullable=False)
class MissingDoc(Base):
__tablename__ = 'missing_doc'
kind = Column(String(50), nullable=False, primary_key=True)
doc_id = Column(String(50), nullable=False, primary_key=True)
class NoActionCaseForm(Base):
__tablename__ = "noactioncaseform"
id = Column(String(50), nullable=False, primary_key=True)
class ProblemForm(Base):
__tablename__ = "problemform"
id = Column(String(50), nullable=False, primary_key=True)
@attr.s
class Counts:
total = attr.ib(default=0)
diffs = attr.ib(default=0)
missing = attr.ib(default=0)
changes = attr.ib(default=0)
def iter_large(query, pk_attr, maxrq=1000):
first_id = None
while True:
qry = query
if first_id is not None:
qry = query.filter(pk_attr > first_id)
rec = None
for rec in qry.order_by(pk_attr).limit(maxrq):
yield rec
if rec is None:
break
first_id = getattr(rec, pk_attr.name)
| true | true |
f7fc4874255b60ca2a2524b6a6b2aafbdb140069 | 54 | py | Python | casepro/statistics/__init__.py | rapidpro/ureport-partners | 16e5b95eae36ecbbe8ab2a59f34a2f5fd32ceacd | [
"BSD-3-Clause"
] | 21 | 2015-07-21T15:57:49.000Z | 2021-11-04T18:26:35.000Z | casepro/statistics/__init__.py | rapidpro/ureport-partners | 16e5b95eae36ecbbe8ab2a59f34a2f5fd32ceacd | [
"BSD-3-Clause"
] | 357 | 2015-05-22T07:26:45.000Z | 2022-03-12T01:08:28.000Z | casepro/statistics/__init__.py | rapidpro/ureport-partners | 16e5b95eae36ecbbe8ab2a59f34a2f5fd32ceacd | [
"BSD-3-Clause"
] | 24 | 2015-05-28T12:30:25.000Z | 2021-11-19T01:57:38.000Z | default_app_config = "casepro.statistics.apps.Config"
| 27 | 53 | 0.833333 | default_app_config = "casepro.statistics.apps.Config"
| true | true |
f7fc4ad86d567854dbbab3c4b96313f1fc47a00f | 4,041 | py | Python | scripts/use_evaluation_model_distmult.py | wang-yuhao/Practical-Big-Data-Science-ADL-AI | 0bf63bf210f506e287f8492e716bb3394137d74b | [
"MIT"
] | null | null | null | scripts/use_evaluation_model_distmult.py | wang-yuhao/Practical-Big-Data-Science-ADL-AI | 0bf63bf210f506e287f8492e716bb3394137d74b | [
"MIT"
] | null | null | null | scripts/use_evaluation_model_distmult.py | wang-yuhao/Practical-Big-Data-Science-ADL-AI | 0bf63bf210f506e287f8492e716bb3394137d74b | [
"MIT"
] | 1 | 2021-12-24T00:26:26.000Z | 2021-12-24T00:26:26.000Z | import argparse
import pickle
import torch
import os
import numpy as np
from src.models.api import EvaluationModel, NegSampleGenerator
from torch import nn
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class DistMult:
def get_score(self, head: torch.tensor, relation: torch.tensor,
tail: torch.tensor, mode: str) -> torch.tensor:
"""
Computes Scores for head, relation, tail triples with DistMult model
:param head: torch.tensor, dtype: int, shape: (batch_size, sample_size,
entity_dim)
:param relation: torch.tensor, dtype: int, shape: (batch_size,
sample_size, relation_dim)
:param tail: torch.tensor, dtype: int, shape: (batch_size, sample_size,
entity_dim)
:param mode: str ('single', 'head-batch' or 'head-tail')
:return: torch.tensor, dtype: float, shape: (batch_size, num_entities)
"""
if mode == 'head-batch':
score = head * (relation * tail)
else:
score = (head * relation) * tail
score = score.sum(dim=2)
return score
def parse_args(args=None):
parser = argparse.ArgumentParser(
description='Load trained model and use it for predictions'
)
parser.add_argument('-m', '--model', type=str, default=None)
parser.add_argument('-d', '--data', type=str, default=None)
parser.add_argument('-o', '--output_path', type=str, default=None)
return parser.parse_args(args)
def load_data(data_path):
path_train = os.path.join(data_path, 'train.pickle')
with open(path_train, 'rb') as handle:
train = pickle.load(handle)
path_valid = os.path.join(data_path, 'valid.pickle')
with open(path_valid, 'rb') as handle:
valid = pickle.load(handle)
path_test = os.path.join(data_path, 'test.pickle')
with open(path_test, 'rb') as handle:
test = pickle.load(handle)
return train, valid, test
def main(args):
"""
Load trained model and use it for predictions.
"""
if args.model is None or args.data is None:
raise ValueError('You have to specify model and data input paths.')
# load data
train_triples, valid_triples, test_triples = load_data(args.data)
# create model and load already trained embeddings
all_true_triples = np.concatenate([train_triples, valid_triples,
test_triples], axis=0)
neg_sample_generator = NegSampleGenerator(all_true_triples,
create_filter_bias=True)
model = EvaluationModel(model_class=DistMult(),
neg_sample_generator=neg_sample_generator)
path = os.path.join(args.model, 'entity_embedding.npy')
new_entity_embedding = nn.Parameter(torch.from_numpy(np.load(path)))
path = os.path.join(args.model, 'relation_embedding.npy')
new_relation_embedding = nn.Parameter(torch.from_numpy(np.load(path)))
# only True if embeddings of RotatE are used
if new_entity_embedding.shape[1] != new_relation_embedding.shape[1]:
stop = new_relation_embedding.shape[1]
new_entity_embedding = new_entity_embedding[:, :stop]
new_entity_embedding = nn.Parameter(new_entity_embedding)
model.change_entity_embedding(new_entity_embedding.cuda())
model.change_relation_embedding(new_relation_embedding.cuda())
model.cuda()
model.eval()
# use API to evaluate model and generate model output for error analysis
s = torch.tensor(test_triples[:, 0]).cuda()
p = torch.tensor(test_triples[:, 1]).cuda()
o = torch.tensor(test_triples[:, 2]).cuda()
evaluation_result = model.evaluate(s, p, o, batch_size=4)
if args.output_path is not None:
model.generate_model_output(output_path=args.output_path,
test_triples=test_triples,
evaluation_result=evaluation_result)
if __name__ == '__main__':
main(parse_args())
| 33.675 | 79 | 0.655778 | import argparse
import pickle
import torch
import os
import numpy as np
from src.models.api import EvaluationModel, NegSampleGenerator
from torch import nn
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class DistMult:
def get_score(self, head: torch.tensor, relation: torch.tensor,
tail: torch.tensor, mode: str) -> torch.tensor:
if mode == 'head-batch':
score = head * (relation * tail)
else:
score = (head * relation) * tail
score = score.sum(dim=2)
return score
def parse_args(args=None):
parser = argparse.ArgumentParser(
description='Load trained model and use it for predictions'
)
parser.add_argument('-m', '--model', type=str, default=None)
parser.add_argument('-d', '--data', type=str, default=None)
parser.add_argument('-o', '--output_path', type=str, default=None)
return parser.parse_args(args)
def load_data(data_path):
path_train = os.path.join(data_path, 'train.pickle')
with open(path_train, 'rb') as handle:
train = pickle.load(handle)
path_valid = os.path.join(data_path, 'valid.pickle')
with open(path_valid, 'rb') as handle:
valid = pickle.load(handle)
path_test = os.path.join(data_path, 'test.pickle')
with open(path_test, 'rb') as handle:
test = pickle.load(handle)
return train, valid, test
def main(args):
if args.model is None or args.data is None:
raise ValueError('You have to specify model and data input paths.')
train_triples, valid_triples, test_triples = load_data(args.data)
all_true_triples = np.concatenate([train_triples, valid_triples,
test_triples], axis=0)
neg_sample_generator = NegSampleGenerator(all_true_triples,
create_filter_bias=True)
model = EvaluationModel(model_class=DistMult(),
neg_sample_generator=neg_sample_generator)
path = os.path.join(args.model, 'entity_embedding.npy')
new_entity_embedding = nn.Parameter(torch.from_numpy(np.load(path)))
path = os.path.join(args.model, 'relation_embedding.npy')
new_relation_embedding = nn.Parameter(torch.from_numpy(np.load(path)))
if new_entity_embedding.shape[1] != new_relation_embedding.shape[1]:
stop = new_relation_embedding.shape[1]
new_entity_embedding = new_entity_embedding[:, :stop]
new_entity_embedding = nn.Parameter(new_entity_embedding)
model.change_entity_embedding(new_entity_embedding.cuda())
model.change_relation_embedding(new_relation_embedding.cuda())
model.cuda()
model.eval()
s = torch.tensor(test_triples[:, 0]).cuda()
p = torch.tensor(test_triples[:, 1]).cuda()
o = torch.tensor(test_triples[:, 2]).cuda()
evaluation_result = model.evaluate(s, p, o, batch_size=4)
if args.output_path is not None:
model.generate_model_output(output_path=args.output_path,
test_triples=test_triples,
evaluation_result=evaluation_result)
if __name__ == '__main__':
main(parse_args())
| true | true |
f7fc4b3659b6971196dde3e9411032bf94ac77e3 | 4,604 | py | Python | lanedet/engine/runner.py | zhangzhongshuai/lanedet | bff96fcbed122ac0f876d8e64ada7795ca34e4b6 | [
"Apache-2.0"
] | null | null | null | lanedet/engine/runner.py | zhangzhongshuai/lanedet | bff96fcbed122ac0f876d8e64ada7795ca34e4b6 | [
"Apache-2.0"
] | null | null | null | lanedet/engine/runner.py | zhangzhongshuai/lanedet | bff96fcbed122ac0f876d8e64ada7795ca34e4b6 | [
"Apache-2.0"
] | 1 | 2022-02-02T10:33:10.000Z | 2022-02-02T10:33:10.000Z | import time
import torch
from tqdm import tqdm
import pytorch_warmup as warmup
import numpy as np
import random
import cv2
from lanedet.models.registry import build_net
from .registry import build_trainer, build_evaluator
from .optimizer import build_optimizer
from .scheduler import build_scheduler
from lanedet.datasets import build_dataloader
from lanedet.utils.recorder import build_recorder
from lanedet.utils.net_utils import save_model, load_network
class Runner(object):
def __init__(self, cfg):
torch.manual_seed(cfg.seed)
np.random.seed(cfg.seed)
random.seed(cfg.seed)
self.cfg = cfg
self.recorder = build_recorder(self.cfg)
self.net = build_net(self.cfg)
# self.net.to(torch.device('cuda'))
self.net = torch.nn.parallel.DataParallel(
self.net, device_ids = range(self.cfg.gpus)).cuda()
self.recorder.logger.info('Network: \n' + str(self.net))
self.resume()
self.optimizer = build_optimizer(self.cfg, self.net)
self.scheduler = build_scheduler(self.cfg, self.optimizer)
self.warmup_scheduler = None
# TODO(zhengtu): remove this hard code
if self.cfg.optimizer.type == 'SGD':
self.warmup_scheduler = warmup.LinearWarmup(
self.optimizer, warmup_period=5000)
self.metric = 0.
self.val_loader = None
def resume(self):
if not self.cfg.load_from and not self.cfg.finetune_from:
return
load_network(self.net, self.cfg.load_from,
finetune_from=self.cfg.finetune_from, logger=self.recorder.logger)
def to_cuda(self, batch):
for k in batch:
if k == 'meta':
continue
batch[k] = batch[k].cuda()
return batch
def train_epoch(self, epoch, train_loader):
self.net.train()
end = time.time()
max_iter = len(train_loader)
for i, data in enumerate(train_loader):
if self.recorder.step >= self.cfg.total_iter:
break
date_time = time.time() - end
self.recorder.step += 1
data = self.to_cuda(data)
output = self.net(data)
self.optimizer.zero_grad()
loss = output['loss']
loss.backward()
self.optimizer.step()
self.scheduler.step()
if self.warmup_scheduler:
self.warmup_scheduler.dampen()
batch_time = time.time() - end
end = time.time()
self.recorder.update_loss_stats(output['loss_stats'])
self.recorder.batch_time.update(batch_time)
self.recorder.data_time.update(date_time)
if i % self.cfg.log_interval == 0 or i == max_iter - 1:
lr = self.optimizer.param_groups[0]['lr']
self.recorder.lr = lr
self.recorder.record('train')
def train(self):
self.recorder.logger.info('Build train loader...')
train_loader = build_dataloader(self.cfg.dataset.train, self.cfg, is_train=True)
self.recorder.logger.info('Start training...')
for epoch in range(self.cfg.epochs):
self.recorder.epoch = epoch
self.train_epoch(epoch, train_loader)
if (epoch + 1) % self.cfg.save_ep == 0 or epoch == self.cfg.epochs - 1:
self.save_ckpt()
if (epoch + 1) % self.cfg.eval_ep == 0 or epoch == self.cfg.epochs - 1:
self.validate()
if self.recorder.step >= self.cfg.total_iter:
break
def validate(self):
if not self.val_loader:
self.val_loader = build_dataloader(self.cfg.dataset.val, self.cfg, is_train=False)
self.net.eval()
predictions = []
for i, data in enumerate(tqdm(self.val_loader, desc=f'Validate')):
data = self.to_cuda(data)
with torch.no_grad():
output = self.net(data)
predictions.extend(output)
if self.cfg.view:
self.val_loader.dataset.view(predictions, data['meta'])
out = self.val_loader.dataset.evaluate(predictions, self.cfg.work_dir)
self.recorder.logger.info(out)
metric = out
if metric > self.metric:
self.metric = metric
self.save_ckpt(is_best=True)
self.recorder.logger.info('Best metric: ' + str(self.metric))
def save_ckpt(self, is_best=False):
save_model(self.net, self.optimizer, self.scheduler,
self.recorder, is_best)
| 37.737705 | 94 | 0.60404 | import time
import torch
from tqdm import tqdm
import pytorch_warmup as warmup
import numpy as np
import random
import cv2
from lanedet.models.registry import build_net
from .registry import build_trainer, build_evaluator
from .optimizer import build_optimizer
from .scheduler import build_scheduler
from lanedet.datasets import build_dataloader
from lanedet.utils.recorder import build_recorder
from lanedet.utils.net_utils import save_model, load_network
class Runner(object):
def __init__(self, cfg):
torch.manual_seed(cfg.seed)
np.random.seed(cfg.seed)
random.seed(cfg.seed)
self.cfg = cfg
self.recorder = build_recorder(self.cfg)
self.net = build_net(self.cfg)
self.net = torch.nn.parallel.DataParallel(
self.net, device_ids = range(self.cfg.gpus)).cuda()
self.recorder.logger.info('Network: \n' + str(self.net))
self.resume()
self.optimizer = build_optimizer(self.cfg, self.net)
self.scheduler = build_scheduler(self.cfg, self.optimizer)
self.warmup_scheduler = None
if self.cfg.optimizer.type == 'SGD':
self.warmup_scheduler = warmup.LinearWarmup(
self.optimizer, warmup_period=5000)
self.metric = 0.
self.val_loader = None
def resume(self):
if not self.cfg.load_from and not self.cfg.finetune_from:
return
load_network(self.net, self.cfg.load_from,
finetune_from=self.cfg.finetune_from, logger=self.recorder.logger)
def to_cuda(self, batch):
for k in batch:
if k == 'meta':
continue
batch[k] = batch[k].cuda()
return batch
def train_epoch(self, epoch, train_loader):
self.net.train()
end = time.time()
max_iter = len(train_loader)
for i, data in enumerate(train_loader):
if self.recorder.step >= self.cfg.total_iter:
break
date_time = time.time() - end
self.recorder.step += 1
data = self.to_cuda(data)
output = self.net(data)
self.optimizer.zero_grad()
loss = output['loss']
loss.backward()
self.optimizer.step()
self.scheduler.step()
if self.warmup_scheduler:
self.warmup_scheduler.dampen()
batch_time = time.time() - end
end = time.time()
self.recorder.update_loss_stats(output['loss_stats'])
self.recorder.batch_time.update(batch_time)
self.recorder.data_time.update(date_time)
if i % self.cfg.log_interval == 0 or i == max_iter - 1:
lr = self.optimizer.param_groups[0]['lr']
self.recorder.lr = lr
self.recorder.record('train')
def train(self):
self.recorder.logger.info('Build train loader...')
train_loader = build_dataloader(self.cfg.dataset.train, self.cfg, is_train=True)
self.recorder.logger.info('Start training...')
for epoch in range(self.cfg.epochs):
self.recorder.epoch = epoch
self.train_epoch(epoch, train_loader)
if (epoch + 1) % self.cfg.save_ep == 0 or epoch == self.cfg.epochs - 1:
self.save_ckpt()
if (epoch + 1) % self.cfg.eval_ep == 0 or epoch == self.cfg.epochs - 1:
self.validate()
if self.recorder.step >= self.cfg.total_iter:
break
def validate(self):
if not self.val_loader:
self.val_loader = build_dataloader(self.cfg.dataset.val, self.cfg, is_train=False)
self.net.eval()
predictions = []
for i, data in enumerate(tqdm(self.val_loader, desc=f'Validate')):
data = self.to_cuda(data)
with torch.no_grad():
output = self.net(data)
predictions.extend(output)
if self.cfg.view:
self.val_loader.dataset.view(predictions, data['meta'])
out = self.val_loader.dataset.evaluate(predictions, self.cfg.work_dir)
self.recorder.logger.info(out)
metric = out
if metric > self.metric:
self.metric = metric
self.save_ckpt(is_best=True)
self.recorder.logger.info('Best metric: ' + str(self.metric))
def save_ckpt(self, is_best=False):
save_model(self.net, self.optimizer, self.scheduler,
self.recorder, is_best)
| true | true |
f7fc4b3e9a8c1b9f418fcf6693153927305cbf34 | 6,803 | py | Python | statistics/mySQLvsPostgreSQLall.36.py | biosoda/bioquery | de80b6a06f079d3383a6b159151043d1f2b77d52 | [
"CC0-1.0"
] | 1 | 2019-07-01T01:36:28.000Z | 2019-07-01T01:36:28.000Z | statistics/mySQLvsPostgreSQLall.36.py | biosoda/bioquery | de80b6a06f079d3383a6b159151043d1f2b77d52 | [
"CC0-1.0"
] | 1 | 2020-04-30T01:09:38.000Z | 2020-04-30T01:09:38.000Z | statistics/mySQLvsPostgreSQLall.36.py | biosoda/bioquery | de80b6a06f079d3383a6b159151043d1f2b77d52 | [
"CC0-1.0"
] | 1 | 2021-11-02T17:08:21.000Z | 2021-11-02T17:08:21.000Z | import requests, os, re, json, sys
import argparse
import urllib.parse
from pprint import pprint
import datetime
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--loops')
parser.add_argument('--outputfile')
parser.add_argument('--inputfile')
args = parser.parse_args()
url0 = 'http://biosoda.expasy.org:8080/rdf4j-server/repositories/bgeelight'
url1_title = "mySQL"
url1 = "http://biosoda.expasy.org:8080/rdf4j-server/repositories/bgeelight_mysql"
url2_title = "postgreSQL"
url2 = "http://biosoda.expasy.org:8080/rdf4j-server/repositories/bgeelight_postgres"
header_title = "header"
header = "header"
intermed = "?query="
restofurl = "&format=JSON&limit=100&offset=0&inference=false"
# the separator we use in our json
separator = '$$'
# add inner limits? leave an empty string if no limits are wanted
addlimits = 'LIMIT 10'
# how many requests should be done:
loops = 3
if None != args.loops:
loops = int(args.loops)
# verbose means, that the logfile gets verbose. console is always verbose
verbose = 0
# open statistics file
outputfile = "statisticsall"
if None != args.outputfile:
outputfile = args.outputfile
outputfile = outputfile + datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
f = open(outputfile, "w")
f.close()
# pstring: what to print
# thiverbose: should the string be printed into logfile also
# newline: what is to be printed after the string (typically newline)
# printbefore: what is to be printed before string
def printwrite(pstring, thisverbose = 1, newline = "\n", printbefore = ""):
f = open("statisticsall", "a")
tmpstr = str(pstring)
print(tmpstr)
if thisverbose == 1:
f.write(printbefore + tmpstr + newline)
f.close();
# where to get the .rq files from
targetfile = '../biosoda_frontend/src/biosodadata.json'
if None != args.inputfile:
targetfile = args.inputfile
# on the server we deliver the json file next to the script:
# targetfile = 'biosodadata.json'
# initialise statistics variable
sums = {}
printwrite('LIMITS: ' + addlimits)
printwrite('url0, 1, 2: ' + url0 + ', ' + url1 + ' (' + url1_title + '), ' + url2 + ' (' + url2_title + ')')
printwrite('Loops: ' + str(loops))
with open(targetfile) as jsondata:
data = json.load(jsondata)
for onequery in data['questions']:
printwrite("===============", 1, "\n", "\n")
queryid = onequery['id']
printwrite(queryid)
if not queryid == 'uniprotQ3':
nodebug = 1
# continue # debug only
if not 'SPARQL' in onequery:
printwrite('no query')
continue
# prepare statistics
sums[queryid] = {}
sums[queryid]['header'] = {}
sums[queryid]['url1'] = {}
sums[queryid]['url2'] = {}
sums[queryid]['header']['total'] = 'total'
sums[queryid]['header']['avg'] = 'avg'
sums[queryid]['url1']['total'] = 0
sums[queryid]['url1']['avg'] = 0
sums[queryid]['url2']['total'] = 0
sums[queryid]['url2']['avg'] = 0
printwrite("===============")
printwrite(onequery['SPARQL'], verbose)
tmpSPARQL = onequery['SPARQL']
tmpfetchurl = onequery['fetchUrl']
if (not tmpSPARQL.find('bgeelight') and not tmpfetchurl.find('bgeelight')):
printwrite('no bgee involved')
continue
tmpQuestion = onequery['question']
for onevariable in onequery['vars']:
tmptoken = separator + onevariable['name'] + separator
if 'defaultvalue' in onevariable:
tmpSPARQL = tmpSPARQL.replace(tmptoken, onevariable['defaultvalue'])
else:
tmpSPARQL = tmpSPARQL.replace(tmptoken, onevariable['default'])
tmpQuestion = tmpQuestion.replace(tmptoken, onevariable['default'])
tmpSPARQL = tmpSPARQL.replace(separator + 'innerlimit' + separator, ' ' + addlimits + ' ')
for x in range(loops):
# loop over different servers
for oneurl in ['url1', 'url2']:
# prepare URL
tmpThisSPARQL = tmpSPARQL.replace(url0, eval(oneurl))
tmpFetchURL = onequery['fetchUrlShort'].replace(url0, eval(oneurl))
printwrite("===============", verbose)
printwrite(tmpThisSPARQL, verbose)
tmpThisSPARQL = re.sub('#[^>\n]+\n', '\n', tmpThisSPARQL)
printwrite("===============", verbose)
printwrite(tmpThisSPARQL, verbose)
tmpThisSPARQL = tmpThisSPARQL.replace('\n', ' ')
tmpThisSPARQL = tmpThisSPARQL.replace('\t', ' ')
tmpThisSPARQL = urllib.parse.quote_plus(tmpThisSPARQL)
tmpurl = tmpFetchURL + intermed + tmpThisSPARQL + restofurl
printwrite("===============", verbose)
printwrite(tmpurl, verbose)
# fetch result
tmpres = requests.get(tmpurl, headers={'Cache-Control': 'no-cache'})
printwrite("===============", verbose)
printwrite(tmpres.text, verbose)
tmptime = tmpres.elapsed.total_seconds()
sums[queryid][oneurl]['total'] = sums[queryid][oneurl]['total'] + tmptime
sums[queryid]['header']['p' + str(x)] = x
sums[queryid][oneurl]['p' + str(x)] = str(tmptime)
printwrite("===============", verbose)
printwrite("\n", verbose)
# calculate average
sums[queryid][oneurl]['avg'] = sums[queryid][oneurl]['total']/loops
# write single statistic to console
printwrite("===============", 0, "\n", "\n")
printwrite("===============", 0, "\n", "\n")
printwrite("statistics")
printwrite("===============")
printwrite(str(loops) + " tries")
for qnum, qnumdata in sums[queryid].items():
printwrite("", 1)
printwrite(eval(qnum + '_title'), 1, ",")
for point, pointdata in qnumdata.items():
printwrite(pointdata, 1, ",")
printwrite("", 1)
# break # used for debugging
# full statistic at the end
printwrite("===============")
printwrite("fullstat")
printwrite("===============")
for queryid, filedata in sums.items():
printwrite(queryid)
for qnum, qnumdata in sums[queryid].items():
printwrite(eval(qnum + '_title'), 1, ",")
for point, pointdata in qnumdata.items():
printwrite(pointdata, 1, ",")
printwrite("", 1)
printwrite('### this statistics file ist created by bioSODA mySQLvsPostgreSQLall.py (https://github.com/biosoda/bioquery/tree/master/statistics) - created at ' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
| 35.617801 | 216 | 0.588123 | import requests, os, re, json, sys
import argparse
import urllib.parse
from pprint import pprint
import datetime
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--loops')
parser.add_argument('--outputfile')
parser.add_argument('--inputfile')
args = parser.parse_args()
url0 = 'http://biosoda.expasy.org:8080/rdf4j-server/repositories/bgeelight'
url1_title = "mySQL"
url1 = "http://biosoda.expasy.org:8080/rdf4j-server/repositories/bgeelight_mysql"
url2_title = "postgreSQL"
url2 = "http://biosoda.expasy.org:8080/rdf4j-server/repositories/bgeelight_postgres"
header_title = "header"
header = "header"
intermed = "?query="
restofurl = "&format=JSON&limit=100&offset=0&inference=false"
separator = '$$'
addlimits = 'LIMIT 10'
loops = 3
if None != args.loops:
loops = int(args.loops)
verbose = 0
outputfile = "statisticsall"
if None != args.outputfile:
outputfile = args.outputfile
outputfile = outputfile + datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
f = open(outputfile, "w")
f.close()
def printwrite(pstring, thisverbose = 1, newline = "\n", printbefore = ""):
f = open("statisticsall", "a")
tmpstr = str(pstring)
print(tmpstr)
if thisverbose == 1:
f.write(printbefore + tmpstr + newline)
f.close();
targetfile = '../biosoda_frontend/src/biosodadata.json'
if None != args.inputfile:
targetfile = args.inputfile
sums = {}
printwrite('LIMITS: ' + addlimits)
printwrite('url0, 1, 2: ' + url0 + ', ' + url1 + ' (' + url1_title + '), ' + url2 + ' (' + url2_title + ')')
printwrite('Loops: ' + str(loops))
with open(targetfile) as jsondata:
data = json.load(jsondata)
for onequery in data['questions']:
printwrite("===============", 1, "\n", "\n")
queryid = onequery['id']
printwrite(queryid)
if not queryid == 'uniprotQ3':
nodebug = 1
not 'SPARQL' in onequery:
printwrite('no query')
continue
sums[queryid] = {}
sums[queryid]['header'] = {}
sums[queryid]['url1'] = {}
sums[queryid]['url2'] = {}
sums[queryid]['header']['total'] = 'total'
sums[queryid]['header']['avg'] = 'avg'
sums[queryid]['url1']['total'] = 0
sums[queryid]['url1']['avg'] = 0
sums[queryid]['url2']['total'] = 0
sums[queryid]['url2']['avg'] = 0
printwrite("===============")
printwrite(onequery['SPARQL'], verbose)
tmpSPARQL = onequery['SPARQL']
tmpfetchurl = onequery['fetchUrl']
if (not tmpSPARQL.find('bgeelight') and not tmpfetchurl.find('bgeelight')):
printwrite('no bgee involved')
continue
tmpQuestion = onequery['question']
for onevariable in onequery['vars']:
tmptoken = separator + onevariable['name'] + separator
if 'defaultvalue' in onevariable:
tmpSPARQL = tmpSPARQL.replace(tmptoken, onevariable['defaultvalue'])
else:
tmpSPARQL = tmpSPARQL.replace(tmptoken, onevariable['default'])
tmpQuestion = tmpQuestion.replace(tmptoken, onevariable['default'])
tmpSPARQL = tmpSPARQL.replace(separator + 'innerlimit' + separator, ' ' + addlimits + ' ')
for x in range(loops):
for oneurl in ['url1', 'url2']:
tmpThisSPARQL = tmpSPARQL.replace(url0, eval(oneurl))
tmpFetchURL = onequery['fetchUrlShort'].replace(url0, eval(oneurl))
printwrite("===============", verbose)
printwrite(tmpThisSPARQL, verbose)
tmpThisSPARQL = re.sub('#[^>\n]+\n', '\n', tmpThisSPARQL)
printwrite("===============", verbose)
printwrite(tmpThisSPARQL, verbose)
tmpThisSPARQL = tmpThisSPARQL.replace('\n', ' ')
tmpThisSPARQL = tmpThisSPARQL.replace('\t', ' ')
tmpThisSPARQL = urllib.parse.quote_plus(tmpThisSPARQL)
tmpurl = tmpFetchURL + intermed + tmpThisSPARQL + restofurl
printwrite("===============", verbose)
printwrite(tmpurl, verbose)
tmpres = requests.get(tmpurl, headers={'Cache-Control': 'no-cache'})
printwrite("===============", verbose)
printwrite(tmpres.text, verbose)
tmptime = tmpres.elapsed.total_seconds()
sums[queryid][oneurl]['total'] = sums[queryid][oneurl]['total'] + tmptime
sums[queryid]['header']['p' + str(x)] = x
sums[queryid][oneurl]['p' + str(x)] = str(tmptime)
printwrite("===============", verbose)
printwrite("\n", verbose)
sums[queryid][oneurl]['avg'] = sums[queryid][oneurl]['total']/loops
printwrite("===============", 0, "\n", "\n")
printwrite("===============", 0, "\n", "\n")
printwrite("statistics")
printwrite("===============")
printwrite(str(loops) + " tries")
for qnum, qnumdata in sums[queryid].items():
printwrite("", 1)
printwrite(eval(qnum + '_title'), 1, ",")
for point, pointdata in qnumdata.items():
printwrite(pointdata, 1, ",")
printwrite("", 1)
==========")
printwrite("fullstat")
printwrite("===============")
for queryid, filedata in sums.items():
printwrite(queryid)
for qnum, qnumdata in sums[queryid].items():
printwrite(eval(qnum + '_title'), 1, ",")
for point, pointdata in qnumdata.items():
printwrite(pointdata, 1, ",")
printwrite("", 1)
printwrite('### this statistics file ist created by bioSODA mySQLvsPostgreSQLall.py (https://github.com/biosoda/bioquery/tree/master/statistics) - created at ' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
| true | true |
f7fc4b9fe018ec1e58e9615fa9df7e1ec204ebfb | 554 | py | Python | PythonEx.py | microsoft/MSRC-Microsoft-Engage-API | b7e8c06388dcfaa5ebcf7911a8578dd0e81f8b21 | [
"MIT"
] | 9 | 2019-06-18T01:51:32.000Z | 2021-07-26T17:40:30.000Z | PythonEx.py | microsoft/MSRC-Microsoft-Engage-API | b7e8c06388dcfaa5ebcf7911a8578dd0e81f8b21 | [
"MIT"
] | 3 | 2020-04-22T20:31:38.000Z | 2021-02-19T23:00:49.000Z | PythonEx.py | microsoft/MSRC-Microsoft-Engage-API | b7e8c06388dcfaa5ebcf7911a8578dd0e81f8b21 | [
"MIT"
] | 9 | 2019-05-22T01:14:42.000Z | 2021-06-08T19:10:23.000Z | import http.client, urllib.request, urllib.parse, urllib.error, base64
headers = {
'Content-Type': 'application/json'
}
params = urllib.parse.urlencode({
})
try:
conn = http.client.HTTPSConnection('https://api.msrc.microsoft.com')
# Data model documentation at https://msrc.microsoft.com/report/developer
str = "{}"
conn.request("POST", "/report/v2.0/abuse?%s" % params, str, headers)
response = conn.getresponse()
data = response.read()
conn.close()
except Exception as e:
print("[Errno {0}] {1}".format(e.errno, e.strerror))
| 25.181818 | 75 | 0.685921 | import http.client, urllib.request, urllib.parse, urllib.error, base64
headers = {
'Content-Type': 'application/json'
}
params = urllib.parse.urlencode({
})
try:
conn = http.client.HTTPSConnection('https://api.msrc.microsoft.com')
str = "{}"
conn.request("POST", "/report/v2.0/abuse?%s" % params, str, headers)
response = conn.getresponse()
data = response.read()
conn.close()
except Exception as e:
print("[Errno {0}] {1}".format(e.errno, e.strerror))
| true | true |
f7fc4c4ee59cae38919336f439189961e59371b2 | 2,519 | py | Python | tests/unit/resources/scales/test_log_scale.py | primitybio/cellengine-python-toolk | 1f9dd168f1f27e2beba69f02e340371190857b33 | [
"MIT"
] | 4 | 2021-01-12T17:03:37.000Z | 2021-12-16T13:23:57.000Z | tests/unit/resources/scales/test_log_scale.py | primitybio/cellengine-python-toolk | 1f9dd168f1f27e2beba69f02e340371190857b33 | [
"MIT"
] | 61 | 2021-01-11T05:27:16.000Z | 2022-03-08T01:50:09.000Z | tests/unit/resources/scales/test_log_scale.py | primitybio/cellengine-python-toolkit | 1f9dd168f1f27e2beba69f02e340371190857b33 | [
"MIT"
] | null | null | null | import pytest
from math import isclose
from numpy import log10
from pandas import Series
from cellengine.utils.scale_utils import apply_scale
@pytest.fixture(scope="module")
def scale():
return {"minimum": 5, "maximum": 10, "type": "LogScale"}
def test_should_apply_scale(scale):
# fmt: off
input = Series([
-20, 0, 1e-40, 0.01, 0.2, 0.5, 0.9999, 1, 1.00001,
2, 5, 10, 100, 250, 500, 1000, 5000, 10000, 50000,
5e5, 5e6, 5e7, 5e8, 5e9, 5e10, 5e11, 5e12, 5e13, 5e14,
5e15, 5e16, 5e17
])
# fmt: on
output = Series([], dtype="float64")
output = input.map(lambda a: apply_scale(scale, a, False))
# fmt: off
expected = Series([
0, 0, 0, 0, 0, 0, 0, 0, 0.00000434292310445319, 0.30102999566398114,
0.6989700043360186, 1, 2, 2.397940008672037, 2.6989700043360183, 3,
3.6989700043360187, 4, 4.698970004336018, 5.698970004336018,
6.698970004336018, 7.698970004336018, 8.698970004336018,
9.698970004336018, 10.698970004336018, 11.698970004336018,
12.698970004336018, 13.698970004336018, 14.698970004336018,
15.698970004336018, 16.698970004336018, 17.698970004336018,
])
# fmt: on
assert [isclose(a, b, rel_tol=0.00001) for a, b in zip(output, expected)]
def test_should_apply_clamped(scale):
# fmt: off
input = Series([
-20, 0, 0.01, 0.2, 0.5, 1, 2, 5, 10,
100, 250, 500, 1000, 5000, 10000, 50000
])
# fmt: on
output = Series([], dtype="float64")
MINV = 0.6989700043360186
MAXV = 1
output = input.map(lambda a: apply_scale(scale, a, True))
# fmt: off
expected = Series([
MINV, MINV, MINV, MINV, MINV, MINV, MINV,
0.6989700043360186, 1, MAXV, MAXV, MAXV,
MAXV, MAXV, MAXV, MAXV,
])
# fmt: on
assert [isclose(a, b, rel_tol=0.00001) for a, b in zip(output, expected)]
def test_should_handle_0_length_arrays(scale):
input = Series([], dtype="float64")
output = Series([], dtype="float64")
output = Series([], dtype="float64")
output = input.map(lambda a: apply_scale(scale, a, True))
assert type(output) is Series
assert output.size == 0
def test_correctly_applies_scale_of_length_n(scale):
for n in range(1, 32):
input = Series([1] * n)
output = Series([], dtype="float64")
output = input.map(lambda a: apply_scale(scale, a, True))
for i in range(0, n):
assert isclose(output[i], log10(scale["minimum"]), rel_tol=0.00001)
| 33.144737 | 79 | 0.627233 | import pytest
from math import isclose
from numpy import log10
from pandas import Series
from cellengine.utils.scale_utils import apply_scale
@pytest.fixture(scope="module")
def scale():
return {"minimum": 5, "maximum": 10, "type": "LogScale"}
def test_should_apply_scale(scale):
input = Series([
-20, 0, 1e-40, 0.01, 0.2, 0.5, 0.9999, 1, 1.00001,
2, 5, 10, 100, 250, 500, 1000, 5000, 10000, 50000,
5e5, 5e6, 5e7, 5e8, 5e9, 5e10, 5e11, 5e12, 5e13, 5e14,
5e15, 5e16, 5e17
])
output = Series([], dtype="float64")
output = input.map(lambda a: apply_scale(scale, a, False))
expected = Series([
0, 0, 0, 0, 0, 0, 0, 0, 0.00000434292310445319, 0.30102999566398114,
0.6989700043360186, 1, 2, 2.397940008672037, 2.6989700043360183, 3,
3.6989700043360187, 4, 4.698970004336018, 5.698970004336018,
6.698970004336018, 7.698970004336018, 8.698970004336018,
9.698970004336018, 10.698970004336018, 11.698970004336018,
12.698970004336018, 13.698970004336018, 14.698970004336018,
15.698970004336018, 16.698970004336018, 17.698970004336018,
])
assert [isclose(a, b, rel_tol=0.00001) for a, b in zip(output, expected)]
def test_should_apply_clamped(scale):
input = Series([
-20, 0, 0.01, 0.2, 0.5, 1, 2, 5, 10,
100, 250, 500, 1000, 5000, 10000, 50000
])
output = Series([], dtype="float64")
MINV = 0.6989700043360186
MAXV = 1
output = input.map(lambda a: apply_scale(scale, a, True))
expected = Series([
MINV, MINV, MINV, MINV, MINV, MINV, MINV,
0.6989700043360186, 1, MAXV, MAXV, MAXV,
MAXV, MAXV, MAXV, MAXV,
])
assert [isclose(a, b, rel_tol=0.00001) for a, b in zip(output, expected)]
def test_should_handle_0_length_arrays(scale):
input = Series([], dtype="float64")
output = Series([], dtype="float64")
output = Series([], dtype="float64")
output = input.map(lambda a: apply_scale(scale, a, True))
assert type(output) is Series
assert output.size == 0
def test_correctly_applies_scale_of_length_n(scale):
for n in range(1, 32):
input = Series([1] * n)
output = Series([], dtype="float64")
output = input.map(lambda a: apply_scale(scale, a, True))
for i in range(0, n):
assert isclose(output[i], log10(scale["minimum"]), rel_tol=0.00001)
| true | true |
f7fc4cec09dd5d7696ea7e3c67f53b28631935e4 | 538 | py | Python | setup.py | neal-o-r/-ireamh-n | a7cdb55dfbf3e871b17f2621f536f0f04ac5ed57 | [
"MIT"
] | 36 | 2017-07-05T05:47:41.000Z | 2021-05-19T22:58:27.000Z | setup.py | neal-o-r/-ireamh-n | a7cdb55dfbf3e871b17f2621f536f0f04ac5ed57 | [
"MIT"
] | 1 | 2017-10-18T09:13:04.000Z | 2017-10-18T15:11:06.000Z | setup.py | neal-o-r/-ireamh-n | a7cdb55dfbf3e871b17f2621f536f0f04ac5ed57 | [
"MIT"
] | 6 | 2017-07-08T03:46:41.000Z | 2019-10-25T13:12:11.000Z | from distutils.core import setup
setup(
name = 'aireamhan',
packages = ['aireamhan'], # this must be the same as the name above
version = '1.4',
description = 'Teanga Ríomhchlárúchain as Gaeilge',
author = 'Neal Ó Riain',
author_email = 'neal@n-o-r.xyz',
url = 'https://github.com/neal-o-r/aireamhan', # use the URL to the github repo
download_url = 'https://github.com/neal-o-r/aireamhan/archive/1.4.tar.gz',
keywords = ['Irish', 'language', 'programming language'],
classifiers = [],
scripts=['bin/áireamhán']
)
| 35.866667 | 81 | 0.67658 | from distutils.core import setup
setup(
name = 'aireamhan',
packages = ['aireamhan'],
version = '1.4',
description = 'Teanga Ríomhchlárúchain as Gaeilge',
author = 'Neal Ó Riain',
author_email = 'neal@n-o-r.xyz',
url = 'https://github.com/neal-o-r/aireamhan',
download_url = 'https://github.com/neal-o-r/aireamhan/archive/1.4.tar.gz',
keywords = ['Irish', 'language', 'programming language'],
classifiers = [],
scripts=['bin/áireamhán']
)
| true | true |
f7fc4d2c32f0a20cbdfd1104ad91a5665b4b7810 | 4,095 | py | Python | tests/Tester.py | FelixKleineBoesing/FeatureSelector | b33454be39d53881b1c1b5b7b6dca8d782cabd36 | [
"MIT"
] | null | null | null | tests/Tester.py | FelixKleineBoesing/FeatureSelector | b33454be39d53881b1c1b5b7b6dca8d782cabd36 | [
"MIT"
] | 6 | 2019-02-25T08:09:48.000Z | 2019-02-25T08:11:55.000Z | tests/Tester.py | FelixKleineBoesing/pyFeatSel | b33454be39d53881b1c1b5b7b6dca8d782cabd36 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import logging
import time
from pyFeatSel.Models.Model import XGBoostModel
from pyFeatSel.FeatureSelectors.CompleteFeatureSpace import CompleteFeatureSpace
from pyFeatSel.FeatureSelectors.GreedySearch import GreedySearch
from pyFeatSel.Evaluator.Evaluator import Accuracy
from pyFeatSel.misc.Helpers import threshold_base
class Tester:
def run_complete_feature_space(self):
start = time.time()
logging.getLogger().setLevel(logging.INFO)
train_data, train_label = self.read_files()
xgb_model = XGBoostModel(n_rounds=100, xgb_params={"eta": 0.3})
comp_feat_selector = CompleteFeatureSpace(model=xgb_model, train_data=train_data,
train_label=train_label, k_folds=5,
evaluator=Accuracy(), maximize_measure=True,
objective="classification", threshold_func=threshold_base)
comp_feat_selector.run_selecting()
logging.info("Used time in seconds: {0}, got test (val) measure: {1} ({2})".
format(str(int(time.time()-start)),comp_feat_selector.best_result["measure"]["test"],
comp_feat_selector.best_result["measure"]["val"]))
def run_greedy_search(self):
start = time.time()
logging.getLogger().setLevel(logging.INFO)
train_data, train_label = self.read_files()
xgb_model = XGBoostModel(n_rounds=100, xgb_params={"eta": 0.3})
comp_feat_selector = GreedySearch(model=xgb_model, train_data=train_data,
train_label=train_label, k_folds=10,
evaluator=Accuracy(), maximize_measure=True,
objective="classification", threshold_func=threshold_base)
comp_feat_selector.run_selecting()
logging.info(
"Used time in seconds: {0}, got test (val) measure: {1} ({2})".format(str(int(time.time() - start)),
comp_feat_selector.best_result[
"measure"]["test"],
comp_feat_selector.best_result[
"measure"]["val"]))
def run_greedy_search2(self):
start = time.time()
logging.getLogger().setLevel(logging.INFO)
train_data, train_label = self.read_files()
comp_feat_selector = GreedySearch(train_data=train_data, train_label=train_label, k_folds=10,
objective="classification")
comp_feat_selector.run_selecting()
logging.info(
"Used time in seconds: {0}, got test (val) measure: {1} ({2})".format(str(int(time.time() - start)),
comp_feat_selector.best_result[
"measure"]["test"],
comp_feat_selector.best_result[
"measure"]["val"]))
def read_files(self):
train_data = pd.concat([pd.read_csv("../data/train_data.csv"), pd.read_csv("../data/validation_data.csv")])
with open("../data/train_label.csv") as f:
train_label = f.readlines()[1:]
with open("../data/validation_label.csv") as f:
train_label += f.readlines()[1:]
train_label = np.char.replace(np.array(train_label), "\n", "").astype(int)
return train_data, train_label
if __name__=="__main__":
tester = Tester()
tester.run_complete_feature_space()
tester.run_greedy_search()
tester.run_greedy_search2() | 51.835443 | 115 | 0.529915 | import pandas as pd
import numpy as np
import logging
import time
from pyFeatSel.Models.Model import XGBoostModel
from pyFeatSel.FeatureSelectors.CompleteFeatureSpace import CompleteFeatureSpace
from pyFeatSel.FeatureSelectors.GreedySearch import GreedySearch
from pyFeatSel.Evaluator.Evaluator import Accuracy
from pyFeatSel.misc.Helpers import threshold_base
class Tester:
def run_complete_feature_space(self):
start = time.time()
logging.getLogger().setLevel(logging.INFO)
train_data, train_label = self.read_files()
xgb_model = XGBoostModel(n_rounds=100, xgb_params={"eta": 0.3})
comp_feat_selector = CompleteFeatureSpace(model=xgb_model, train_data=train_data,
train_label=train_label, k_folds=5,
evaluator=Accuracy(), maximize_measure=True,
objective="classification", threshold_func=threshold_base)
comp_feat_selector.run_selecting()
logging.info("Used time in seconds: {0}, got test (val) measure: {1} ({2})".
format(str(int(time.time()-start)),comp_feat_selector.best_result["measure"]["test"],
comp_feat_selector.best_result["measure"]["val"]))
def run_greedy_search(self):
start = time.time()
logging.getLogger().setLevel(logging.INFO)
train_data, train_label = self.read_files()
xgb_model = XGBoostModel(n_rounds=100, xgb_params={"eta": 0.3})
comp_feat_selector = GreedySearch(model=xgb_model, train_data=train_data,
train_label=train_label, k_folds=10,
evaluator=Accuracy(), maximize_measure=True,
objective="classification", threshold_func=threshold_base)
comp_feat_selector.run_selecting()
logging.info(
"Used time in seconds: {0}, got test (val) measure: {1} ({2})".format(str(int(time.time() - start)),
comp_feat_selector.best_result[
"measure"]["test"],
comp_feat_selector.best_result[
"measure"]["val"]))
def run_greedy_search2(self):
start = time.time()
logging.getLogger().setLevel(logging.INFO)
train_data, train_label = self.read_files()
comp_feat_selector = GreedySearch(train_data=train_data, train_label=train_label, k_folds=10,
objective="classification")
comp_feat_selector.run_selecting()
logging.info(
"Used time in seconds: {0}, got test (val) measure: {1} ({2})".format(str(int(time.time() - start)),
comp_feat_selector.best_result[
"measure"]["test"],
comp_feat_selector.best_result[
"measure"]["val"]))
def read_files(self):
train_data = pd.concat([pd.read_csv("../data/train_data.csv"), pd.read_csv("../data/validation_data.csv")])
with open("../data/train_label.csv") as f:
train_label = f.readlines()[1:]
with open("../data/validation_label.csv") as f:
train_label += f.readlines()[1:]
train_label = np.char.replace(np.array(train_label), "\n", "").astype(int)
return train_data, train_label
if __name__=="__main__":
tester = Tester()
tester.run_complete_feature_space()
tester.run_greedy_search()
tester.run_greedy_search2() | true | true |
f7fc4e2f3208e5c193c965cb1d4327dc1a76b3b9 | 6,526 | py | Python | salt/modules/kmod.py | vamshi98/salt-formulas | 30edeadafd5d173efe4e1f767a8d562547ad128a | [
"Apache-2.0"
] | null | null | null | salt/modules/kmod.py | vamshi98/salt-formulas | 30edeadafd5d173efe4e1f767a8d562547ad128a | [
"Apache-2.0"
] | null | null | null | salt/modules/kmod.py | vamshi98/salt-formulas | 30edeadafd5d173efe4e1f767a8d562547ad128a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Module to manage Linux kernel modules
'''
from __future__ import absolute_import
# Import python libs
import os
import re
# Import salt libs
import salt.utils
def __virtual__():
'''
Only runs on Linux systems
'''
return __grains__['kernel'] == 'Linux'
def _new_mods(pre_mods, post_mods):
'''
Return a list of the new modules, pass an lsmod dict before running
modprobe and one after modprobe has run
'''
pre = set()
post = set()
for mod in pre_mods:
pre.add(mod['module'])
for mod in post_mods:
post.add(mod['module'])
return post - pre
def _rm_mods(pre_mods, post_mods):
'''
Return a list of the new modules, pass an lsmod dict before running
modprobe and one after modprobe has run
'''
pre = set()
post = set()
for mod in pre_mods:
pre.add(mod['module'])
for mod in post_mods:
post.add(mod['module'])
return pre - post
def _get_modules_conf():
'''
Return location of modules config file.
Default: /etc/modules
'''
if 'systemd' in __grains__:
return '/etc/modules-load.d/salt_managed.conf'
return '/etc/modules'
def _strip_module_name(mod):
'''
Return module name and strip configuration. It is possible insert modules
in this format:
bonding mode=4 miimon=1000
This method return only 'bonding'
'''
if mod.strip() == '':
return False
return mod.split()[0]
def _set_persistent_module(mod):
'''
Add module to configuration file to make it persistent. If module is
commented uncomment it.
'''
conf = _get_modules_conf()
if not os.path.exists(conf):
__salt__['file.touch'](conf)
mod_name = _strip_module_name(mod)
if not mod_name or mod_name in mod_list(True) or mod_name \
not in available():
return set()
escape_mod = re.escape(mod)
# If module is commented only uncomment it
if __salt__['file.contains_regex_multiline'](conf,
'^#[\t ]*{0}[\t ]*$'.format(
escape_mod)):
__salt__['file.uncomment'](conf, escape_mod)
else:
__salt__['file.append'](conf, mod)
return set([mod_name])
def _remove_persistent_module(mod, comment):
'''
Remove module from configuration file. If comment is true only comment line
where module is.
'''
conf = _get_modules_conf()
mod_name = _strip_module_name(mod)
if not mod_name or mod_name not in mod_list(True):
return set()
escape_mod = re.escape(mod)
if comment:
__salt__['file.comment'](conf, '^[\t ]*{0}[\t ]?'.format(escape_mod))
else:
__salt__['file.sed'](conf, '^[\t ]*{0}[\t ]?'.format(escape_mod), '')
return set([mod_name])
def available():
'''
Return a list of all available kernel modules
CLI Example:
.. code-block:: bash
salt '*' kmod.available
'''
ret = []
mod_dir = os.path.join('/lib/modules/', os.uname()[2])
for root, dirs, files in os.walk(mod_dir):
for fn_ in files:
if '.ko' in fn_:
ret.append(fn_[:fn_.index('.ko')].replace('-', '_'))
return sorted(list(ret))
def check_available(mod):
'''
Check to see if the specified kernel module is available
CLI Example:
.. code-block:: bash
salt '*' kmod.check_available kvm
'''
return mod in available()
def lsmod():
'''
Return a dict containing information about currently loaded modules
CLI Example:
.. code-block:: bash
salt '*' kmod.lsmod
'''
ret = []
for line in __salt__['cmd.run']('lsmod').splitlines():
comps = line.split()
if not len(comps) > 2:
continue
if comps[0] == 'Module':
continue
mdat = {
'size': comps[1],
'module': comps[0],
'depcount': comps[2],
}
if len(comps) > 3:
mdat['deps'] = comps[3].split(',')
else:
mdat['deps'] = []
ret.append(mdat)
return ret
def mod_list(only_persist=False):
'''
Return a list of the loaded module names
CLI Example:
.. code-block:: bash
salt '*' kmod.mod_list
'''
mods = set()
if only_persist:
conf = _get_modules_conf()
if os.path.exists(conf):
with salt.utils.fopen(conf, 'r') as modules_file:
for line in modules_file:
line = line.strip()
mod_name = _strip_module_name(line)
if not line.startswith('#') and mod_name:
mods.add(mod_name)
else:
for mod in lsmod():
mods.add(mod['module'])
return sorted(list(mods))
def load(mod, persist=False):
'''
Load the specified kernel module
mod
Name of module to add
persist
Write module to /etc/modules to make it load on system reboot
CLI Example:
.. code-block:: bash
salt '*' kmod.load kvm
'''
pre_mods = lsmod()
response = __salt__['cmd.run_all']('modprobe {0}'.format(mod),
python_shell=False)
if response['retcode'] == 0:
post_mods = lsmod()
mods = _new_mods(pre_mods, post_mods)
persist_mods = set()
if persist:
persist_mods = _set_persistent_module(mod)
return sorted(list(mods | persist_mods))
else:
return 'Module {0} not found'.format(mod)
def is_loaded(mod):
'''
Check to see if the specified kernel module is loaded
CLI Example:
.. code-block:: bash
salt '*' kmod.is_loaded kvm
'''
return mod in mod_list()
def remove(mod, persist=False, comment=True):
'''
Remove the specified kernel module
mod
Name of module to remove
persist
Also remove module from /etc/modules
comment
If persist is set don't remove line from /etc/modules but only
comment it
CLI Example:
.. code-block:: bash
salt '*' kmod.remove kvm
'''
pre_mods = lsmod()
__salt__['cmd.run_all']('rmmod {0}'.format(mod), python_shell=False)
post_mods = lsmod()
mods = _rm_mods(pre_mods, post_mods)
persist_mods = set()
if persist:
persist_mods = _remove_persistent_module(mod, comment)
return sorted(list(mods | persist_mods))
| 24.081181 | 79 | 0.576923 |
from __future__ import absolute_import
import os
import re
import salt.utils
def __virtual__():
return __grains__['kernel'] == 'Linux'
def _new_mods(pre_mods, post_mods):
pre = set()
post = set()
for mod in pre_mods:
pre.add(mod['module'])
for mod in post_mods:
post.add(mod['module'])
return post - pre
def _rm_mods(pre_mods, post_mods):
pre = set()
post = set()
for mod in pre_mods:
pre.add(mod['module'])
for mod in post_mods:
post.add(mod['module'])
return pre - post
def _get_modules_conf():
if 'systemd' in __grains__:
return '/etc/modules-load.d/salt_managed.conf'
return '/etc/modules'
def _strip_module_name(mod):
if mod.strip() == '':
return False
return mod.split()[0]
def _set_persistent_module(mod):
conf = _get_modules_conf()
if not os.path.exists(conf):
__salt__['file.touch'](conf)
mod_name = _strip_module_name(mod)
if not mod_name or mod_name in mod_list(True) or mod_name \
not in available():
return set()
escape_mod = re.escape(mod)
if __salt__['file.contains_regex_multiline'](conf,
'^#[\t ]*{0}[\t ]*$'.format(
escape_mod)):
__salt__['file.uncomment'](conf, escape_mod)
else:
__salt__['file.append'](conf, mod)
return set([mod_name])
def _remove_persistent_module(mod, comment):
conf = _get_modules_conf()
mod_name = _strip_module_name(mod)
if not mod_name or mod_name not in mod_list(True):
return set()
escape_mod = re.escape(mod)
if comment:
__salt__['file.comment'](conf, '^[\t ]*{0}[\t ]?'.format(escape_mod))
else:
__salt__['file.sed'](conf, '^[\t ]*{0}[\t ]?'.format(escape_mod), '')
return set([mod_name])
def available():
ret = []
mod_dir = os.path.join('/lib/modules/', os.uname()[2])
for root, dirs, files in os.walk(mod_dir):
for fn_ in files:
if '.ko' in fn_:
ret.append(fn_[:fn_.index('.ko')].replace('-', '_'))
return sorted(list(ret))
def check_available(mod):
return mod in available()
def lsmod():
ret = []
for line in __salt__['cmd.run']('lsmod').splitlines():
comps = line.split()
if not len(comps) > 2:
continue
if comps[0] == 'Module':
continue
mdat = {
'size': comps[1],
'module': comps[0],
'depcount': comps[2],
}
if len(comps) > 3:
mdat['deps'] = comps[3].split(',')
else:
mdat['deps'] = []
ret.append(mdat)
return ret
def mod_list(only_persist=False):
mods = set()
if only_persist:
conf = _get_modules_conf()
if os.path.exists(conf):
with salt.utils.fopen(conf, 'r') as modules_file:
for line in modules_file:
line = line.strip()
mod_name = _strip_module_name(line)
if not line.startswith('#') and mod_name:
mods.add(mod_name)
else:
for mod in lsmod():
mods.add(mod['module'])
return sorted(list(mods))
def load(mod, persist=False):
pre_mods = lsmod()
response = __salt__['cmd.run_all']('modprobe {0}'.format(mod),
python_shell=False)
if response['retcode'] == 0:
post_mods = lsmod()
mods = _new_mods(pre_mods, post_mods)
persist_mods = set()
if persist:
persist_mods = _set_persistent_module(mod)
return sorted(list(mods | persist_mods))
else:
return 'Module {0} not found'.format(mod)
def is_loaded(mod):
return mod in mod_list()
def remove(mod, persist=False, comment=True):
pre_mods = lsmod()
__salt__['cmd.run_all']('rmmod {0}'.format(mod), python_shell=False)
post_mods = lsmod()
mods = _rm_mods(pre_mods, post_mods)
persist_mods = set()
if persist:
persist_mods = _remove_persistent_module(mod, comment)
return sorted(list(mods | persist_mods))
| true | true |
f7fc4f740c5d682298503e82253d52538deb549c | 2,505 | py | Python | flaskr/auth.py | bkoz/flask-tutorial | 8d0bda113e15cb712beb085c7da0a16c1ae03f24 | [
"Apache-2.0"
] | null | null | null | flaskr/auth.py | bkoz/flask-tutorial | 8d0bda113e15cb712beb085c7da0a16c1ae03f24 | [
"Apache-2.0"
] | null | null | null | flaskr/auth.py | bkoz/flask-tutorial | 8d0bda113e15cb712beb085c7da0a16c1ae03f24 | [
"Apache-2.0"
] | null | null | null | import functools
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for
)
from werkzeug.security import check_password_hash, generate_password_hash
from flaskr.db import get_db
bp = Blueprint('auth', __name__, url_prefix='/auth')
#
# register
#
@bp.route('/register', methods=('GET', 'POST'))
def register():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
db = get_db()
error = None
if not username:
error = 'Username is required.'
elif not password:
error = 'Password is required.'
elif db.execute(
'SELECT id FROM user WHERE username = ?', (username,)
).fetchone() is not None:
error = f"User {username} is already registered."
if error is None:
db.execute(
'INSERT INTO user (username, password) VALUES (?, ?)',
(username, generate_password_hash(password))
)
db.commit()
return redirect(url_for('auth.login'))
flash(error)
return render_template('auth/register.html')
#
# login
#
@bp.route('/login', methods=('GET', 'POST'))
def login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
db = get_db()
error = None
user = db.execute(
'SELECT * FROM user WHERE username = ?', (username,)
).fetchone()
if user is None:
error = 'Incorrect username.'
elif not check_password_hash(user['password'], password):
error = 'Incorrect password.'
if error is None:
session.clear()
session['user_id'] = user['id']
return redirect(url_for('index'))
flash(error)
return render_template('auth/login.html')
@bp.before_app_request
def load_logged_in_user():
user_id = session.get('user_id')
if user_id is None:
g.user = None
else:
g.user = get_db().execute(
'SELECT * FROM user WHERE id = ?', (user_id,)
).fetchone()
@bp.route('/logout')
def logout():
session.clear()
return redirect(url_for('index'))
def login_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if g.user is None:
return redirect(url_for('auth.login'))
return view(**kwargs)
return wrapped_view
| 25.30303 | 77 | 0.583234 | import functools
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for
)
from werkzeug.security import check_password_hash, generate_password_hash
from flaskr.db import get_db
bp = Blueprint('auth', __name__, url_prefix='/auth')
@bp.route('/register', methods=('GET', 'POST'))
def register():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
db = get_db()
error = None
if not username:
error = 'Username is required.'
elif not password:
error = 'Password is required.'
elif db.execute(
'SELECT id FROM user WHERE username = ?', (username,)
).fetchone() is not None:
error = f"User {username} is already registered."
if error is None:
db.execute(
'INSERT INTO user (username, password) VALUES (?, ?)',
(username, generate_password_hash(password))
)
db.commit()
return redirect(url_for('auth.login'))
flash(error)
return render_template('auth/register.html')
@bp.route('/login', methods=('GET', 'POST'))
def login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
db = get_db()
error = None
user = db.execute(
'SELECT * FROM user WHERE username = ?', (username,)
).fetchone()
if user is None:
error = 'Incorrect username.'
elif not check_password_hash(user['password'], password):
error = 'Incorrect password.'
if error is None:
session.clear()
session['user_id'] = user['id']
return redirect(url_for('index'))
flash(error)
return render_template('auth/login.html')
@bp.before_app_request
def load_logged_in_user():
user_id = session.get('user_id')
if user_id is None:
g.user = None
else:
g.user = get_db().execute(
'SELECT * FROM user WHERE id = ?', (user_id,)
).fetchone()
@bp.route('/logout')
def logout():
session.clear()
return redirect(url_for('index'))
def login_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if g.user is None:
return redirect(url_for('auth.login'))
return view(**kwargs)
return wrapped_view
| true | true |
f7fc4fb5e66d1021a5d3d8e4fa3489b9d352701f | 8,921 | py | Python | crossdock/server/server.py | sunset3000/jaeger-client-python | 6a10175d2fdb0c72fb804b9252ff4b40e3ebf104 | [
"Apache-2.0"
] | null | null | null | crossdock/server/server.py | sunset3000/jaeger-client-python | 6a10175d2fdb0c72fb804b9252ff4b40e3ebf104 | [
"Apache-2.0"
] | 1 | 2021-11-04T04:36:24.000Z | 2021-11-04T04:42:16.000Z | crossdock/server/server.py | sunset3000/jaeger-client-python | 6a10175d2fdb0c72fb804b9252ff4b40e3ebf104 | [
"Apache-2.0"
] | 4 | 2019-03-11T14:39:04.000Z | 2021-03-15T18:23:43.000Z | # Modified by SignalFx
# Copyright (c) 2016 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import tornado.web
import opentracing
import tornado.ioloop
import tornado.httpclient
from tornado.web import asynchronous
from jaeger_client import Tracer, ConstSampler
from jaeger_client.reporter import NullReporter
import crossdock.server.constants as constants
import crossdock.server.serializer as serializer
from crossdock.server.endtoend import EndToEndHandler
from opentracing.scope_managers.tornado import TornadoScopeManager
from opentracing_instrumentation import http_client, http_server, get_current_span, request_context
from opentracing_instrumentation.client_hooks import tornado_http
import opentracing.ext.tags as ext_tags
from crossdock.thrift_gen.tracetest.ttypes import ObservedSpan, TraceResponse, Transport, \
JoinTraceRequest
from tchannel import TChannel, thrift
from crossdock.server.thriftrw_serializer import trace_response_to_thriftrw, \
join_trace_request_to_thriftrw
DefaultClientPortHTTP = 8080
DefaultServerPortHTTP = 8081
DefaultServerPortTChannel = 8082
tracer = Tracer(
service_name='python',
reporter=NullReporter(),
sampler=ConstSampler(decision=True),
scope_manager=TornadoScopeManager)
opentracing.tracer = tracer
idl_path = 'idl/thrift/crossdock/tracetest.thrift'
thrift_services = {}
def get_thrift_service(service_name):
if service_name in thrift_services:
return thrift_services[service_name]
thrift_service = thrift.load(path=idl_path, service=service_name)
thrift_services[service_name] = thrift_service
return thrift_service
def serve():
"""main entry point"""
logging.getLogger().setLevel(logging.DEBUG)
logging.info('Python Tornado Crossdock Server Running ...')
server = Server(DefaultServerPortTChannel)
endtoend_handler = EndToEndHandler()
app = make_app(server, endtoend_handler)
app.listen(DefaultClientPortHTTP)
app.listen(DefaultServerPortHTTP)
server.tchannel.listen()
tornado.ioloop.IOLoop.current().start()
# Tornado Stuff
class MainHandler(tornado.web.RequestHandler):
def initialize(self, server=None, method=None):
self.server = server
self.method = method
@asynchronous
def get(self):
if self.server and self.method:
self.method(self.server, self.request, self)
else:
self.finish()
@asynchronous
def post(self):
if self.server and self.method:
self.method(self.server, self.request, self)
else:
self.finish()
def head(self):
pass
def make_app(server, endtoend_handler):
return tornado.web.Application(
[
(r'/', MainHandler),
(r'/start_trace', MainHandler, (dict(server=server, method=Server.start_trace))),
(r'/join_trace', MainHandler, (dict(server=server, method=Server.join_trace))),
(r'/create_traces', MainHandler, (dict(server=endtoend_handler,
method=EndToEndHandler.generate_traces))),
], debug=True, autoreload=False)
# HTTP Tracing Stuff
class UnknownTransportException(Exception):
pass
def get_observed_span(span):
return ObservedSpan(
traceId='%x' % span.trace_id,
sampled=span.is_sampled(),
baggage=span.get_baggage_item(constants.baggage_key)
)
class Server(object):
def __init__(self, port):
self.tracer = opentracing.tracer
self.tchannel = self.make_tchannel(port)
def make_tchannel(self, port):
tchannel = TChannel('python', hostport='localhost:%d' % port, trace=True)
service = get_thrift_service(service_name='python')
@tchannel.thrift.register(service.TracedService, method='joinTrace')
@tornado.gen.coroutine
def join_trace(request):
join_trace_request = request.body.request or None
logging.info('TChannel join_trace request: %s', join_trace_request)
response = yield self.prepare_response(get_current_span(),
join_trace_request.downstream)
logging.info('TChannel join_trace response: %s', response)
raise tornado.gen.Return(trace_response_to_thriftrw(service, response))
return tchannel
@tornado.gen.coroutine
def start_trace(self, request, response_writer):
start_trace_req = serializer.start_trace_request_from_json(request.body)
logging.info('HTTP start_trace request: %s', start_trace_req)
def update_span(span):
span.set_baggage_item(constants.baggage_key, start_trace_req.baggage)
span.set_tag(ext_tags.SAMPLING_PRIORITY, start_trace_req.sampled)
response = yield self.prepare_response(self.get_span(request, update_span),
start_trace_req.downstream)
logging.info('HTTP start_trace response: %s', response)
response_writer.write(serializer.traced_service_object_to_json(response))
response_writer.finish()
@tornado.gen.coroutine
def join_trace(self, request, response_writer):
join_trace_request = serializer.join_trace_request_from_json(request.body)
logging.info('HTTP join_trace request: %s', join_trace_request)
response = yield self.prepare_response(self.get_span(request, None),
join_trace_request.downstream)
logging.info('HTTP join_trace response: %s', response)
response_writer.write(serializer.traced_service_object_to_json(response))
response_writer.finish()
def get_span(self, http_request, update_span_func):
span = http_server.before_request(http_server.TornadoRequestWrapper(request=http_request),
self.tracer)
if update_span_func:
update_span_func(span)
return span
@tornado.gen.coroutine
def prepare_response(self, span, downstream):
observed_span = get_observed_span(span)
trace_response = TraceResponse(span=observed_span, notImplementedError='')
if downstream:
with request_context.span_in_stack_context(span):
future = self.call_downstream(span, downstream)
downstream_trace_resp = yield future
trace_response.downstream = downstream_trace_resp
raise tornado.gen.Return(trace_response)
@tornado.gen.coroutine
def call_downstream(self, span, downstream):
if downstream.transport == Transport.HTTP:
downstream_trace_resp = yield self.call_downstream_http(span, downstream)
elif downstream.transport == Transport.TCHANNEL:
downstream_trace_resp = yield self.call_downstream_tchannel(downstream)
else:
raise UnknownTransportException('%s' % downstream.transport)
raise tornado.gen.Return(downstream_trace_resp)
@tornado.gen.coroutine
def call_downstream_http(self, span, downstream):
url = 'http://%s:%s/join_trace' % (downstream.host, downstream.port)
body = serializer.join_trace_request_to_json(downstream.downstream, downstream.serverRole)
req = tornado.httpclient.HTTPRequest(url=url, method='POST',
headers={'Content-Type': 'application/json'},
body=body)
http_client.before_http_request(tornado_http.TornadoRequestWrapper(request=req),
lambda: span)
client = tornado.httpclient.AsyncHTTPClient()
http_result = yield client.fetch(req)
raise tornado.gen.Return(serializer.traceresponse_from_json(http_result.body))
@tornado.gen.coroutine
def call_downstream_tchannel(self, downstream):
downstream_service = get_thrift_service(downstream.serviceName)
jtr = JoinTraceRequest(downstream.serverRole, downstream.downstream)
jtr = join_trace_request_to_thriftrw(downstream_service, jtr)
hostport = '%s:%s' % (downstream.host, downstream.port)
thrift_result = yield self.tchannel.thrift(downstream_service.TracedService.joinTrace(jtr),
hostport=hostport)
raise tornado.gen.Return(thrift_result.body)
| 38.786957 | 99 | 0.696783 |
import logging
import tornado.web
import opentracing
import tornado.ioloop
import tornado.httpclient
from tornado.web import asynchronous
from jaeger_client import Tracer, ConstSampler
from jaeger_client.reporter import NullReporter
import crossdock.server.constants as constants
import crossdock.server.serializer as serializer
from crossdock.server.endtoend import EndToEndHandler
from opentracing.scope_managers.tornado import TornadoScopeManager
from opentracing_instrumentation import http_client, http_server, get_current_span, request_context
from opentracing_instrumentation.client_hooks import tornado_http
import opentracing.ext.tags as ext_tags
from crossdock.thrift_gen.tracetest.ttypes import ObservedSpan, TraceResponse, Transport, \
JoinTraceRequest
from tchannel import TChannel, thrift
from crossdock.server.thriftrw_serializer import trace_response_to_thriftrw, \
join_trace_request_to_thriftrw
DefaultClientPortHTTP = 8080
DefaultServerPortHTTP = 8081
DefaultServerPortTChannel = 8082
tracer = Tracer(
service_name='python',
reporter=NullReporter(),
sampler=ConstSampler(decision=True),
scope_manager=TornadoScopeManager)
opentracing.tracer = tracer
idl_path = 'idl/thrift/crossdock/tracetest.thrift'
thrift_services = {}
def get_thrift_service(service_name):
if service_name in thrift_services:
return thrift_services[service_name]
thrift_service = thrift.load(path=idl_path, service=service_name)
thrift_services[service_name] = thrift_service
return thrift_service
def serve():
logging.getLogger().setLevel(logging.DEBUG)
logging.info('Python Tornado Crossdock Server Running ...')
server = Server(DefaultServerPortTChannel)
endtoend_handler = EndToEndHandler()
app = make_app(server, endtoend_handler)
app.listen(DefaultClientPortHTTP)
app.listen(DefaultServerPortHTTP)
server.tchannel.listen()
tornado.ioloop.IOLoop.current().start()
class MainHandler(tornado.web.RequestHandler):
def initialize(self, server=None, method=None):
self.server = server
self.method = method
@asynchronous
def get(self):
if self.server and self.method:
self.method(self.server, self.request, self)
else:
self.finish()
@asynchronous
def post(self):
if self.server and self.method:
self.method(self.server, self.request, self)
else:
self.finish()
def head(self):
pass
def make_app(server, endtoend_handler):
return tornado.web.Application(
[
(r'/', MainHandler),
(r'/start_trace', MainHandler, (dict(server=server, method=Server.start_trace))),
(r'/join_trace', MainHandler, (dict(server=server, method=Server.join_trace))),
(r'/create_traces', MainHandler, (dict(server=endtoend_handler,
method=EndToEndHandler.generate_traces))),
], debug=True, autoreload=False)
class UnknownTransportException(Exception):
pass
def get_observed_span(span):
return ObservedSpan(
traceId='%x' % span.trace_id,
sampled=span.is_sampled(),
baggage=span.get_baggage_item(constants.baggage_key)
)
class Server(object):
def __init__(self, port):
self.tracer = opentracing.tracer
self.tchannel = self.make_tchannel(port)
def make_tchannel(self, port):
tchannel = TChannel('python', hostport='localhost:%d' % port, trace=True)
service = get_thrift_service(service_name='python')
@tchannel.thrift.register(service.TracedService, method='joinTrace')
@tornado.gen.coroutine
def join_trace(request):
join_trace_request = request.body.request or None
logging.info('TChannel join_trace request: %s', join_trace_request)
response = yield self.prepare_response(get_current_span(),
join_trace_request.downstream)
logging.info('TChannel join_trace response: %s', response)
raise tornado.gen.Return(trace_response_to_thriftrw(service, response))
return tchannel
@tornado.gen.coroutine
def start_trace(self, request, response_writer):
start_trace_req = serializer.start_trace_request_from_json(request.body)
logging.info('HTTP start_trace request: %s', start_trace_req)
def update_span(span):
span.set_baggage_item(constants.baggage_key, start_trace_req.baggage)
span.set_tag(ext_tags.SAMPLING_PRIORITY, start_trace_req.sampled)
response = yield self.prepare_response(self.get_span(request, update_span),
start_trace_req.downstream)
logging.info('HTTP start_trace response: %s', response)
response_writer.write(serializer.traced_service_object_to_json(response))
response_writer.finish()
@tornado.gen.coroutine
def join_trace(self, request, response_writer):
join_trace_request = serializer.join_trace_request_from_json(request.body)
logging.info('HTTP join_trace request: %s', join_trace_request)
response = yield self.prepare_response(self.get_span(request, None),
join_trace_request.downstream)
logging.info('HTTP join_trace response: %s', response)
response_writer.write(serializer.traced_service_object_to_json(response))
response_writer.finish()
def get_span(self, http_request, update_span_func):
span = http_server.before_request(http_server.TornadoRequestWrapper(request=http_request),
self.tracer)
if update_span_func:
update_span_func(span)
return span
@tornado.gen.coroutine
def prepare_response(self, span, downstream):
observed_span = get_observed_span(span)
trace_response = TraceResponse(span=observed_span, notImplementedError='')
if downstream:
with request_context.span_in_stack_context(span):
future = self.call_downstream(span, downstream)
downstream_trace_resp = yield future
trace_response.downstream = downstream_trace_resp
raise tornado.gen.Return(trace_response)
@tornado.gen.coroutine
def call_downstream(self, span, downstream):
if downstream.transport == Transport.HTTP:
downstream_trace_resp = yield self.call_downstream_http(span, downstream)
elif downstream.transport == Transport.TCHANNEL:
downstream_trace_resp = yield self.call_downstream_tchannel(downstream)
else:
raise UnknownTransportException('%s' % downstream.transport)
raise tornado.gen.Return(downstream_trace_resp)
@tornado.gen.coroutine
def call_downstream_http(self, span, downstream):
url = 'http://%s:%s/join_trace' % (downstream.host, downstream.port)
body = serializer.join_trace_request_to_json(downstream.downstream, downstream.serverRole)
req = tornado.httpclient.HTTPRequest(url=url, method='POST',
headers={'Content-Type': 'application/json'},
body=body)
http_client.before_http_request(tornado_http.TornadoRequestWrapper(request=req),
lambda: span)
client = tornado.httpclient.AsyncHTTPClient()
http_result = yield client.fetch(req)
raise tornado.gen.Return(serializer.traceresponse_from_json(http_result.body))
@tornado.gen.coroutine
def call_downstream_tchannel(self, downstream):
downstream_service = get_thrift_service(downstream.serviceName)
jtr = JoinTraceRequest(downstream.serverRole, downstream.downstream)
jtr = join_trace_request_to_thriftrw(downstream_service, jtr)
hostport = '%s:%s' % (downstream.host, downstream.port)
thrift_result = yield self.tchannel.thrift(downstream_service.TracedService.joinTrace(jtr),
hostport=hostport)
raise tornado.gen.Return(thrift_result.body)
| true | true |
f7fc5024f188039fda1d0c1a94d45b12bc8eefaa | 3,346 | py | Python | craftassist/agent/dialogue_objects/facing_helper.py | Dhiraj100892/droidlet | e4ea578672531524552b6ff021165fc9371b0ec8 | [
"MIT"
] | null | null | null | craftassist/agent/dialogue_objects/facing_helper.py | Dhiraj100892/droidlet | e4ea578672531524552b6ff021165fc9371b0ec8 | [
"MIT"
] | null | null | null | craftassist/agent/dialogue_objects/facing_helper.py | Dhiraj100892/droidlet | e4ea578672531524552b6ff021165fc9371b0ec8 | [
"MIT"
] | null | null | null | """
Copyright (c) Facebook, Inc. and its affiliates.
"""
from base_agent.base_util import ErrorWithResponse
from base_agent.dialogue_objects import interpret_relative_direction
from word2number.w2n import word_to_num
def number_from_span(span):
# this will fail in many cases....
words = span.split()
degrees = None
for w in words:
try:
degrees = int(w)
except:
pass
if not degrees:
try:
degrees = word_to_num(span)
except:
pass
return degrees
class FacingInterpreter:
def __call__(self, interpreter, speaker, d):
# get these from memory, not player struct!!!!! FIXME!!!
current_pitch = interpreter.agent.get_player().look.pitch
current_yaw = interpreter.agent.get_player().look.yaw
if d.get("yaw_pitch"):
span = d["yaw_pitch"]
# for now assumed in (yaw, pitch) or yaw, pitch or yaw pitch formats
yp = span.replace("(", "").replace(")", "").split()
return {"head_yaw_pitch": (int(yp[0]), int(yp[1]))}
elif d.get("yaw"):
# for now assumed span is yaw as word or number
w = d["yaw"].strip(" degrees").strip(" degree")
return {"head_yaw_pitch": (word_to_num(w), current_pitch)}
elif d.get("pitch"):
# for now assumed span is pitch as word or number
w = d["pitch"].strip(" degrees").strip(" degree")
return {"head_yaw_pitch": (current_yaw, word_to_num(w))}
elif d.get("relative_yaw"):
# TODO in the task use turn angle
if d["relative_yaw"].get("angle"):
return {"relative_yaw": int(d["relative_yaw"]["angle"])}
elif d["relative_yaw"].get("yaw_span"):
span = d["relative_yaw"].get("yaw_span")
left = "left" in span or "leave" in span # lemmatizer :)
degrees = number_from_span(span) or 90
if degrees > 0 and left:
return {"relative_yaw": -degrees}
else:
return {"relative_yaw": degrees}
else:
pass
elif d.get("relative_pitch"):
if d["relative_pitch"].get("angle"):
# TODO in the task make this relative!
return {"relative_pitch": int(d["relative_pitch"]["angle"])}
elif d["relative_pitch"].get("pitch_span"):
span = d["relative_pitch"].get("pitch_span")
down = "down" in span
degrees = number_from_span(span) or 90
if degrees > 0 and down:
return {"relative_pitch": -degrees}
else:
return {"relative_pitch": degrees}
else:
pass
elif d.get("location"):
mems = interpreter.subinterpret["reference_locations"](
interpreter, speaker, d["location"]
)
steps, reldir = interpret_relative_direction(interpreter, d["location"])
loc, _ = interpreter.subinterpret["specify_locations"](
interpreter, speaker, mems, steps, reldir
)
return {"head_xyz": loc}
else:
raise ErrorWithResponse("I am not sure where you want me to turn")
| 39.833333 | 84 | 0.547818 |
from base_agent.base_util import ErrorWithResponse
from base_agent.dialogue_objects import interpret_relative_direction
from word2number.w2n import word_to_num
def number_from_span(span):
words = span.split()
degrees = None
for w in words:
try:
degrees = int(w)
except:
pass
if not degrees:
try:
degrees = word_to_num(span)
except:
pass
return degrees
class FacingInterpreter:
def __call__(self, interpreter, speaker, d):
current_pitch = interpreter.agent.get_player().look.pitch
current_yaw = interpreter.agent.get_player().look.yaw
if d.get("yaw_pitch"):
span = d["yaw_pitch"]
yp = span.replace("(", "").replace(")", "").split()
return {"head_yaw_pitch": (int(yp[0]), int(yp[1]))}
elif d.get("yaw"):
w = d["yaw"].strip(" degrees").strip(" degree")
return {"head_yaw_pitch": (word_to_num(w), current_pitch)}
elif d.get("pitch"):
w = d["pitch"].strip(" degrees").strip(" degree")
return {"head_yaw_pitch": (current_yaw, word_to_num(w))}
elif d.get("relative_yaw"):
if d["relative_yaw"].get("angle"):
return {"relative_yaw": int(d["relative_yaw"]["angle"])}
elif d["relative_yaw"].get("yaw_span"):
span = d["relative_yaw"].get("yaw_span")
left = "left" in span or "leave" in span
degrees = number_from_span(span) or 90
if degrees > 0 and left:
return {"relative_yaw": -degrees}
else:
return {"relative_yaw": degrees}
else:
pass
elif d.get("relative_pitch"):
if d["relative_pitch"].get("angle"):
return {"relative_pitch": int(d["relative_pitch"]["angle"])}
elif d["relative_pitch"].get("pitch_span"):
span = d["relative_pitch"].get("pitch_span")
down = "down" in span
degrees = number_from_span(span) or 90
if degrees > 0 and down:
return {"relative_pitch": -degrees}
else:
return {"relative_pitch": degrees}
else:
pass
elif d.get("location"):
mems = interpreter.subinterpret["reference_locations"](
interpreter, speaker, d["location"]
)
steps, reldir = interpret_relative_direction(interpreter, d["location"])
loc, _ = interpreter.subinterpret["specify_locations"](
interpreter, speaker, mems, steps, reldir
)
return {"head_xyz": loc}
else:
raise ErrorWithResponse("I am not sure where you want me to turn")
| true | true |
f7fc502f018ff0a4b901789ed78e73f55ab73b9a | 843 | py | Python | tests/__init__.py | Cadair/synapse | 466866a1d9dd1fcf82348a36c0532cb0c6614767 | [
"Apache-2.0"
] | 1 | 2019-09-14T03:24:03.000Z | 2019-09-14T03:24:03.000Z | tests/__init__.py | Cadair/synapse | 466866a1d9dd1fcf82348a36c0532cb0c6614767 | [
"Apache-2.0"
] | 4 | 2020-03-04T23:47:05.000Z | 2021-12-09T21:41:44.000Z | tests/__init__.py | Cadair/synapse | 466866a1d9dd1fcf82348a36c0532cb0c6614767 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.trial import util
import tests.patch_inline_callbacks
# attempt to do the patch before we load any synapse code
tests.patch_inline_callbacks.do_patch()
util.DEFAULT_TIMEOUT_DURATION = 20
| 33.72 | 74 | 0.771056 |
from twisted.trial import util
import tests.patch_inline_callbacks
tests.patch_inline_callbacks.do_patch()
util.DEFAULT_TIMEOUT_DURATION = 20
| true | true |
f7fc514a52d7fd959e3b80d9c3b97a0d2c8a549c | 3,204 | py | Python | cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py | yanheven/cinder | 89797971f30d547acbf715fea099c52d90966d1f | [
"Apache-2.0"
] | null | null | null | cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py | yanheven/cinder | 89797971f30d547acbf715fea099c52d90966d1f | [
"Apache-2.0"
] | null | null | null | cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py | yanheven/cinder | 89797971f30d547acbf715fea099c52d90966d1f | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_config import cfg
from oslo_log import log as logging
from sqlalchemy import MetaData, Table
from cinder.i18n import _LE, _LI
# Get default values via config. The defaults will either
# come from the default values set in the quota option
# configuration or via cinder.conf if the user has configured
# default values for quotas there.
CONF = cfg.CONF
CONF.import_opt('quota_volumes', 'cinder.quota')
CONF.import_opt('quota_snapshots', 'cinder.quota')
CONF.import_opt('quota_gigabytes', 'cinder.quota')
LOG = logging.getLogger(__name__)
CLASS_NAME = 'default'
CREATED_AT = datetime.datetime.now() # noqa
def upgrade(migrate_engine):
"""Add default quota class data into DB."""
meta = MetaData()
meta.bind = migrate_engine
quota_classes = Table('quota_classes', meta, autoload=True)
rows = quota_classes.count().\
where(quota_classes.c.class_name == 'default').execute().scalar()
# Do not add entries if there are already 'default' entries. We don't
# want to write over something the user added.
if rows:
LOG.info(_LI("Found existing 'default' entries in the quota_classes "
"table. Skipping insertion of default values."))
return
try:
# Set default volumes
qci = quota_classes.insert()
qci.execute({'created_at': CREATED_AT,
'class_name': CLASS_NAME,
'resource': 'volumes',
'hard_limit': CONF.quota_volumes,
'deleted': False, })
# Set default snapshots
qci.execute({'created_at': CREATED_AT,
'class_name': CLASS_NAME,
'resource': 'snapshots',
'hard_limit': CONF.quota_snapshots,
'deleted': False, })
# Set default gigabytes
qci.execute({'created_at': CREATED_AT,
'class_name': CLASS_NAME,
'resource': 'gigabytes',
'hard_limit': CONF.quota_gigabytes,
'deleted': False, })
LOG.info(_LI("Added default quota class data into the DB."))
except Exception:
LOG.error(_LE("Default quota class data not inserted into the DB."))
raise
def downgrade(migrate_engine):
"""Don't delete the 'default' entries at downgrade time.
We don't know if the user had default entries when we started.
If they did, we wouldn't want to remove them. So, the safest
thing to do is just leave the 'default' entries at downgrade time.
"""
pass
| 36.409091 | 78 | 0.645443 |
import datetime
from oslo_config import cfg
from oslo_log import log as logging
from sqlalchemy import MetaData, Table
from cinder.i18n import _LE, _LI
CONF = cfg.CONF
CONF.import_opt('quota_volumes', 'cinder.quota')
CONF.import_opt('quota_snapshots', 'cinder.quota')
CONF.import_opt('quota_gigabytes', 'cinder.quota')
LOG = logging.getLogger(__name__)
CLASS_NAME = 'default'
CREATED_AT = datetime.datetime.now()
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
quota_classes = Table('quota_classes', meta, autoload=True)
rows = quota_classes.count().\
where(quota_classes.c.class_name == 'default').execute().scalar()
# want to write over something the user added.
if rows:
LOG.info(_LI("Found existing 'default' entries in the quota_classes "
"table. Skipping insertion of default values."))
return
try:
# Set default volumes
qci = quota_classes.insert()
qci.execute({'created_at': CREATED_AT,
'class_name': CLASS_NAME,
'resource': 'volumes',
'hard_limit': CONF.quota_volumes,
'deleted': False, })
# Set default snapshots
qci.execute({'created_at': CREATED_AT,
'class_name': CLASS_NAME,
'resource': 'snapshots',
'hard_limit': CONF.quota_snapshots,
'deleted': False, })
# Set default gigabytes
qci.execute({'created_at': CREATED_AT,
'class_name': CLASS_NAME,
'resource': 'gigabytes',
'hard_limit': CONF.quota_gigabytes,
'deleted': False, })
LOG.info(_LI("Added default quota class data into the DB."))
except Exception:
LOG.error(_LE("Default quota class data not inserted into the DB."))
raise
def downgrade(migrate_engine):
pass
| true | true |
f7fc520a7fa9cfbf5c91d46a60e8a8c682a4a51f | 612 | py | Python | fileuploads/migrations/0016_attachment.py | fr33ky/signalserver | ce360cd89732c9d9270d7af04e38e55f6570d6a7 | [
"MIT"
] | 23 | 2016-03-24T00:31:47.000Z | 2022-02-10T21:27:53.000Z | fileuploads/migrations/0016_attachment.py | fr33ky/signalserver | ce360cd89732c9d9270d7af04e38e55f6570d6a7 | [
"MIT"
] | 148 | 2016-04-03T00:22:55.000Z | 2020-08-01T20:08:03.000Z | fileuploads/migrations/0016_attachment.py | fr33ky/signalserver | ce360cd89732c9d9270d7af04e38e55f6570d6a7 | [
"MIT"
] | 11 | 2016-04-24T03:31:31.000Z | 2019-09-03T16:51:08.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.dev20160107235441 on 2016-06-08 07:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fileuploads', '0015_auto_20160606_0712'),
]
operations = [
migrations.CreateModel(
name='Attachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(upload_to='attachments')),
],
),
]
| 26.608696 | 114 | 0.614379 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fileuploads', '0015_auto_20160606_0712'),
]
operations = [
migrations.CreateModel(
name='Attachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(upload_to='attachments')),
],
),
]
| true | true |
f7fc522b7620c2b8cb95f33879398ac00dcbe666 | 377 | py | Python | excursions/migrations/0004_alter_excursion_date.py | timptner/farafmb.de | 2b154278d8b44ea3adecafcb8554c1b0b0055e01 | [
"MIT"
] | 1 | 2017-04-06T09:12:45.000Z | 2017-04-06T09:12:45.000Z | excursions/migrations/0004_alter_excursion_date.py | timptner/farafmb.de | 2b154278d8b44ea3adecafcb8554c1b0b0055e01 | [
"MIT"
] | 2 | 2017-09-07T22:09:50.000Z | 2020-06-09T14:46:30.000Z | excursions/migrations/0004_alter_excursion_date.py | timptner/farafmb.de | 2b154278d8b44ea3adecafcb8554c1b0b0055e01 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.4 on 2021-09-18 14:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('excursions', '0003_auto_20210918_1341'),
]
operations = [
migrations.AlterField(
model_name='excursion',
name='date',
field=models.DateField(),
),
]
| 19.842105 | 50 | 0.591512 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('excursions', '0003_auto_20210918_1341'),
]
operations = [
migrations.AlterField(
model_name='excursion',
name='date',
field=models.DateField(),
),
]
| true | true |
f7fc527a89d72629d6026538921e96b1a060b7d6 | 6,380 | py | Python | frappe/desk/doctype/dashboard_chart/dashboard_chart.py | vigneshpp/globaas-dev-frappe | 2ecc45b4949adefa083574d46c8d5a23da76b92e | [
"MIT"
] | null | null | null | frappe/desk/doctype/dashboard_chart/dashboard_chart.py | vigneshpp/globaas-dev-frappe | 2ecc45b4949adefa083574d46c8d5a23da76b92e | [
"MIT"
] | 5 | 2020-12-04T21:18:22.000Z | 2022-03-12T00:43:08.000Z | frappe/desk/doctype/dashboard_chart/dashboard_chart.py | vigneshpp/globaas-dev-frappe | 2ecc45b4949adefa083574d46c8d5a23da76b92e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
import datetime
from frappe.core.page.dashboard.dashboard import cache_source, get_from_date_from_timespan
from frappe.utils import nowdate, add_to_date, getdate, get_last_day, formatdate
from frappe.model.document import Document
@frappe.whitelist()
@cache_source
def get(chart_name = None, chart = None, no_cache = None, from_date = None, to_date = None, refresh = None):
if chart_name:
chart = frappe.get_doc('Dashboard Chart', chart_name)
else:
chart = frappe._dict(frappe.parse_json(chart))
timespan = chart.timespan
if chart.timespan == 'Select Date Range':
from_date = chart.from_date
to_date = chart.to_date
timegrain = chart.time_interval
filters = frappe.parse_json(chart.filters_json)
# don't include cancelled documents
filters['docstatus'] = ('<', 2)
if chart.chart_type == 'Group By':
chart_config = get_group_by_chart_config(chart, filters)
else:
chart_config = get_chart_config(chart, filters, timespan, timegrain, from_date, to_date)
return chart_config
def get_chart_config(chart, filters, timespan, timegrain, from_date, to_date):
if not from_date:
from_date = get_from_date_from_timespan(to_date, timespan)
if not to_date:
to_date = datetime.datetime.now()
# get conditions from filters
conditions, values = frappe.db.build_conditions(filters)
# query will return year, unit and aggregate value
data = frappe.db.sql('''
select
{unit} as _unit,
{aggregate_function}({value_field})
from `tab{doctype}`
where
{conditions}
and {datefield} BETWEEN '{from_date}' and '{to_date}'
group by _unit
order by _unit asc
'''.format(
unit = chart.based_on,
datefield = chart.based_on,
aggregate_function = get_aggregate_function(chart.chart_type),
value_field = chart.value_based_on or '1',
doctype = chart.document_type,
conditions = conditions,
from_date = from_date.strftime('%Y-%m-%d'),
to_date = to_date
), values)
# add missing data points for periods where there was no result
result = get_result(data, timegrain, from_date, to_date)
chart_config = {
"labels": [formatdate(r[0].strftime('%Y-%m-%d')) for r in result],
"datasets": [{
"name": chart.name,
"values": [r[1] for r in result]
}]
}
return chart_config
def get_group_by_chart_config(chart, filters):
conditions, values = frappe.db.build_conditions(filters)
data = frappe.db.sql('''
select
{aggregate_function}({value_field}) as count,
{group_by_field} as name
from `tab{doctype}`
where {conditions}
group by {group_by_field}
order by count desc
'''.format(
aggregate_function = get_aggregate_function(chart.group_by_type),
value_field = chart.aggregate_function_based_on or '1',
field = chart.aggregate_function_based_on or chart.group_by_based_on,
group_by_field = chart.group_by_based_on,
doctype = chart.document_type,
conditions = conditions,
), values, as_dict = True)
if data:
if chart.number_of_groups and chart.number_of_groups < len(data):
other_count = 0
for i in range(chart.number_of_groups - 1, len(data)):
other_count += data[i]['count']
data = data[0: chart.number_of_groups - 1]
data.append({'name': 'Other', 'count': other_count})
chart_config = {
"labels": [item['name'] if item['name'] else 'Not Specified' for item in data],
"datasets": [{
"name": chart.name,
"values": [item['count'] for item in data]
}]
}
return chart_config
else:
return None
def get_aggregate_function(chart_type):
return {
"Sum": "SUM",
"Count": "COUNT",
"Average": "AVG",
}[chart_type]
def get_result(data, timegrain, from_date, to_date):
start_date = getdate(from_date)
end_date = getdate(to_date)
result = [[start_date, 0.0]]
while start_date < end_date:
next_date = get_next_expected_date(start_date, timegrain)
result.append([next_date, 0.0])
start_date = next_date
data_index = 0
if data:
for i, d in enumerate(result):
while data_index < len(data) and getdate(data[data_index][0]) <= d[0]:
d[1] += data[data_index][1]
data_index += 1
return result
def get_next_expected_date(date, timegrain):
next_date = None
# given date is always assumed to be the period ending date
next_date = get_period_ending(add_to_date(date, days=1), timegrain)
return getdate(next_date)
def get_period_ending(date, timegrain):
date = getdate(date)
if timegrain=='Daily':
pass
elif timegrain=='Weekly':
date = get_week_ending(date)
elif timegrain=='Monthly':
date = get_month_ending(date)
elif timegrain=='Quarterly':
date = get_quarter_ending(date)
return getdate(date)
def get_week_ending(date):
# week starts on monday
from datetime import timedelta
start = date - timedelta(days = date.weekday())
end = start + timedelta(days=6)
return end
def get_month_ending(date):
month_of_the_year = int(date.strftime('%m'))
# first day of next month (note month starts from 1)
date = add_to_date('{}-01-01'.format(date.year), months = month_of_the_year)
# last day of this month
return add_to_date(date, days = -1)
def get_quarter_ending(date):
date = getdate(date)
# find the earliest quarter ending date that is after
# the given date
for month in (3, 6, 9, 12):
quarter_end_month = getdate('{}-{}-01'.format(date.year, month))
quarter_end_date = getdate(get_last_day(quarter_end_month))
if date <= quarter_end_date:
date = quarter_end_date
break
return date
class DashboardChart(Document):
def on_update(self):
frappe.cache().delete_key('chart-data:{}'.format(self.name))
def validate(self):
if self.chart_type != 'Custom':
self.check_required_field()
def check_required_field(self):
if not self.document_type:
frappe.throw(_("Document type is required to create a dashboard chart"))
if self.chart_type == 'Group By':
if not self.group_by_based_on:
frappe.throw(_("Group By field is required to create a dashboard chart"))
if self.group_by_type in ['Sum', 'Average'] and not self.aggregate_function_based_on:
frappe.throw(_("Aggregate Function field is required to create a dashboard chart"))
else:
if not self.based_on:
frappe.throw(_("Time series based on is required to create a dashboard chart"))
| 28.609865 | 108 | 0.726646 |
from __future__ import unicode_literals
import frappe
from frappe import _
import datetime
from frappe.core.page.dashboard.dashboard import cache_source, get_from_date_from_timespan
from frappe.utils import nowdate, add_to_date, getdate, get_last_day, formatdate
from frappe.model.document import Document
@frappe.whitelist()
@cache_source
def get(chart_name = None, chart = None, no_cache = None, from_date = None, to_date = None, refresh = None):
if chart_name:
chart = frappe.get_doc('Dashboard Chart', chart_name)
else:
chart = frappe._dict(frappe.parse_json(chart))
timespan = chart.timespan
if chart.timespan == 'Select Date Range':
from_date = chart.from_date
to_date = chart.to_date
timegrain = chart.time_interval
filters = frappe.parse_json(chart.filters_json)
filters['docstatus'] = ('<', 2)
if chart.chart_type == 'Group By':
chart_config = get_group_by_chart_config(chart, filters)
else:
chart_config = get_chart_config(chart, filters, timespan, timegrain, from_date, to_date)
return chart_config
def get_chart_config(chart, filters, timespan, timegrain, from_date, to_date):
if not from_date:
from_date = get_from_date_from_timespan(to_date, timespan)
if not to_date:
to_date = datetime.datetime.now()
# get conditions from filters
conditions, values = frappe.db.build_conditions(filters)
# query will return year, unit and aggregate value
data = frappe.db.sql('''
select
{unit} as _unit,
{aggregate_function}({value_field})
from `tab{doctype}`
where
{conditions}
and {datefield} BETWEEN '{from_date}' and '{to_date}'
group by _unit
order by _unit asc
'''.format(
unit = chart.based_on,
datefield = chart.based_on,
aggregate_function = get_aggregate_function(chart.chart_type),
value_field = chart.value_based_on or '1',
doctype = chart.document_type,
conditions = conditions,
from_date = from_date.strftime('%Y-%m-%d'),
to_date = to_date
), values)
# add missing data points for periods where there was no result
result = get_result(data, timegrain, from_date, to_date)
chart_config = {
"labels": [formatdate(r[0].strftime('%Y-%m-%d')) for r in result],
"datasets": [{
"name": chart.name,
"values": [r[1] for r in result]
}]
}
return chart_config
def get_group_by_chart_config(chart, filters):
conditions, values = frappe.db.build_conditions(filters)
data = frappe.db.sql('''
select
{aggregate_function}({value_field}) as count,
{group_by_field} as name
from `tab{doctype}`
where {conditions}
group by {group_by_field}
order by count desc
'''.format(
aggregate_function = get_aggregate_function(chart.group_by_type),
value_field = chart.aggregate_function_based_on or '1',
field = chart.aggregate_function_based_on or chart.group_by_based_on,
group_by_field = chart.group_by_based_on,
doctype = chart.document_type,
conditions = conditions,
), values, as_dict = True)
if data:
if chart.number_of_groups and chart.number_of_groups < len(data):
other_count = 0
for i in range(chart.number_of_groups - 1, len(data)):
other_count += data[i]['count']
data = data[0: chart.number_of_groups - 1]
data.append({'name': 'Other', 'count': other_count})
chart_config = {
"labels": [item['name'] if item['name'] else 'Not Specified' for item in data],
"datasets": [{
"name": chart.name,
"values": [item['count'] for item in data]
}]
}
return chart_config
else:
return None
def get_aggregate_function(chart_type):
return {
"Sum": "SUM",
"Count": "COUNT",
"Average": "AVG",
}[chart_type]
def get_result(data, timegrain, from_date, to_date):
start_date = getdate(from_date)
end_date = getdate(to_date)
result = [[start_date, 0.0]]
while start_date < end_date:
next_date = get_next_expected_date(start_date, timegrain)
result.append([next_date, 0.0])
start_date = next_date
data_index = 0
if data:
for i, d in enumerate(result):
while data_index < len(data) and getdate(data[data_index][0]) <= d[0]:
d[1] += data[data_index][1]
data_index += 1
return result
def get_next_expected_date(date, timegrain):
next_date = None
# given date is always assumed to be the period ending date
next_date = get_period_ending(add_to_date(date, days=1), timegrain)
return getdate(next_date)
def get_period_ending(date, timegrain):
date = getdate(date)
if timegrain=='Daily':
pass
elif timegrain=='Weekly':
date = get_week_ending(date)
elif timegrain=='Monthly':
date = get_month_ending(date)
elif timegrain=='Quarterly':
date = get_quarter_ending(date)
return getdate(date)
def get_week_ending(date):
# week starts on monday
from datetime import timedelta
start = date - timedelta(days = date.weekday())
end = start + timedelta(days=6)
return end
def get_month_ending(date):
month_of_the_year = int(date.strftime('%m'))
# first day of next month (note month starts from 1)
date = add_to_date('{}-01-01'.format(date.year), months = month_of_the_year)
# last day of this month
return add_to_date(date, days = -1)
def get_quarter_ending(date):
date = getdate(date)
# find the earliest quarter ending date that is after
# the given date
for month in (3, 6, 9, 12):
quarter_end_month = getdate('{}-{}-01'.format(date.year, month))
quarter_end_date = getdate(get_last_day(quarter_end_month))
if date <= quarter_end_date:
date = quarter_end_date
break
return date
class DashboardChart(Document):
def on_update(self):
frappe.cache().delete_key('chart-data:{}'.format(self.name))
def validate(self):
if self.chart_type != 'Custom':
self.check_required_field()
def check_required_field(self):
if not self.document_type:
frappe.throw(_("Document type is required to create a dashboard chart"))
if self.chart_type == 'Group By':
if not self.group_by_based_on:
frappe.throw(_("Group By field is required to create a dashboard chart"))
if self.group_by_type in ['Sum', 'Average'] and not self.aggregate_function_based_on:
frappe.throw(_("Aggregate Function field is required to create a dashboard chart"))
else:
if not self.based_on:
frappe.throw(_("Time series based on is required to create a dashboard chart"))
| true | true |
f7fc52c835209cfbcd7e5ec513d91b7f010974ac | 4,859 | py | Python | ephypype/power.py | annapasca/ephypype | 6dbacdd6913234a28b690b401862ff062accecc7 | [
"BSD-3-Clause"
] | 18 | 2018-04-18T12:14:52.000Z | 2022-02-25T19:31:44.000Z | ephypype/power.py | annapasca/ephypype | 6dbacdd6913234a28b690b401862ff062accecc7 | [
"BSD-3-Clause"
] | 106 | 2017-12-09T13:34:30.000Z | 2022-03-12T01:02:17.000Z | ephypype/power.py | annapasca/ephypype | 6dbacdd6913234a28b690b401862ff062accecc7 | [
"BSD-3-Clause"
] | 13 | 2017-05-28T20:38:56.000Z | 2022-03-06T15:58:02.000Z | """Power functions."""
# Author: Dmitrii Altukhov <dm-altukhov@ya.ru>
# Annalisa Pascarella <a.pascarella@iac.cnr.it>
import os
import numpy as np
from nipype.utils.filemanip import split_filename
from mne import read_epochs
from mne.io import read_raw_fif
from scipy.signal import welch
from .fif2array import _get_raw_array
def _compute_and_save_psd(data_fname, fmin=0, fmax=120,
method='welch', is_epoched=False,
n_fft=256, n_overlap=0,
picks=None, proj=False, n_jobs=1, verbose=None):
"""Load epochs/raw from file, compute psd and save the result."""
if is_epoched:
epochs = read_epochs(data_fname)
else:
epochs = read_raw_fif(data_fname, preload=True)
epochs_meg = epochs.pick_types(meg=True, eeg=False, eog=False, ecg=False)
if method == 'welch':
from mne.time_frequency import psd_welch
psds, freqs = psd_welch(epochs_meg, fmin=fmin, fmax=fmax)
elif method == 'multitaper':
from mne.time_frequency import psd_multitaper
psds, freqs = psd_multitaper(epochs_meg, fmin=fmin, fmax=fmax)
else:
raise Exception('nonexistent method for psd computation')
_get_raw_array(data_fname, save_data=False)
psds_fname = _save_psd(data_fname, psds, freqs)
_save_psd_img(data_fname, psds, freqs, is_epoched, method)
return psds_fname
def _compute_and_save_src_psd(data_fname, sfreq, fmin=0, fmax=120,
is_epoched=False,
n_fft=256, n_overlap=0,
n_jobs=1, verbose=None):
"""Load epochs/raw from file, compute psd and save the result."""
src_data = np.load(data_fname)
dim = src_data.shape
if len(dim) == 3 and dim[0] == 1:
src_data = np.squeeze(src_data)
print(('src data dim: {}'.format(src_data.shape)))
if n_fft > src_data.shape[1]:
nperseg = src_data.shape[1]
else:
nperseg = n_fft
n_freqs = nperseg // 2 + 1
psds = np.empty([src_data.shape[0], n_freqs])
for i in range(src_data.shape[0]):
freqs, Pxx = welch(src_data[i, :], fs=sfreq, window='hamming',
nperseg=nperseg, noverlap=n_overlap, nfft=None)
psds[i, :] = Pxx
psds_fname = _save_psd(data_fname, psds, freqs)
_save_psd_img(data_fname, psds, freqs, is_epoched)
return psds_fname
def _compute_mean_band_psd(psds_file, freq_bands):
"""Compute mean band psd."""
npzfile = np.load(psds_file)
print(('the .npz file contain {} \n'.format(npzfile.files)))
# is a matrix with dim n_channels(n_voxel) x n_freqs
psds = npzfile['psds']
print(('psds is a matrix {} \n'.format(psds.shape)))
# list of frequencies in which psds was computed;
# its length = columns of psds
freqs = npzfile['freqs']
print(('freqs contains {} frequencies \n'.format(len(freqs))))
n_row, _ = psds.shape
n_fr = len(freq_bands)
m_px = np.empty([n_row, n_fr])
for f in range(n_fr):
min_fr = freq_bands[f][0]
max_fr = freq_bands[f][1]
print(('*** frequency band [{}, {}] ***\n'.format(min_fr, max_fr)))
m_px[:, f] = np.mean(psds[:, (freqs >= min_fr) * (freqs <= max_fr)], 1)
psds_mean_fname = _save_m_px(psds_file, m_px)
return psds_mean_fname
def _save_m_px(psds_file, m_px):
data_path, basename, ext = split_filename(psds_file)
psds_mean_fname = basename + '-mean_band.npy'
psds_mean_fname = os.path.abspath(psds_mean_fname)
print((m_px.shape))
np.save(psds_mean_fname, m_px)
return psds_mean_fname
def _save_psd(data_fname, psds, freqs):
data_path, basename, ext = split_filename(data_fname)
psds_fname = basename + '-psds.npz'
psds_fname = os.path.abspath(psds_fname)
print((psds.shape))
print(('*** save {} ***'.format(psds_fname)))
np.savez(psds_fname, psds=psds, freqs=freqs)
return psds_fname
def _save_psd_img(data_fname, psds, freqs, is_epoched=False, method=''):
import matplotlib.pyplot as plt
data_path, basename, ext = split_filename(data_fname)
psds_img_fname = basename + '-psds.png'
psds_img_fname = os.path.abspath(psds_img_fname)
# save PSD as img
f, ax = plt.subplots()
psds = 10 * np.log10(psds)
if is_epoched:
psds_mean = psds.mean(0).mean(0)
psds_std = psds.mean(0).std(0)
else:
psds_mean = psds.mean(0)
psds_std = psds.std(0)
ax.plot(freqs, psds_mean, color='g')
ax.fill_between(freqs, psds_mean - psds_std, psds_mean + psds_std,
color='g', alpha=.5)
ax.set(title='{} PSD'.format(method), xlabel='Frequency',
ylabel='Power Spectral Density (dB)')
print(('*** save {} ***'.format(psds_img_fname)))
plt.savefig(psds_img_fname)
| 31.75817 | 79 | 0.63902 |
import os
import numpy as np
from nipype.utils.filemanip import split_filename
from mne import read_epochs
from mne.io import read_raw_fif
from scipy.signal import welch
from .fif2array import _get_raw_array
def _compute_and_save_psd(data_fname, fmin=0, fmax=120,
method='welch', is_epoched=False,
n_fft=256, n_overlap=0,
picks=None, proj=False, n_jobs=1, verbose=None):
if is_epoched:
epochs = read_epochs(data_fname)
else:
epochs = read_raw_fif(data_fname, preload=True)
epochs_meg = epochs.pick_types(meg=True, eeg=False, eog=False, ecg=False)
if method == 'welch':
from mne.time_frequency import psd_welch
psds, freqs = psd_welch(epochs_meg, fmin=fmin, fmax=fmax)
elif method == 'multitaper':
from mne.time_frequency import psd_multitaper
psds, freqs = psd_multitaper(epochs_meg, fmin=fmin, fmax=fmax)
else:
raise Exception('nonexistent method for psd computation')
_get_raw_array(data_fname, save_data=False)
psds_fname = _save_psd(data_fname, psds, freqs)
_save_psd_img(data_fname, psds, freqs, is_epoched, method)
return psds_fname
def _compute_and_save_src_psd(data_fname, sfreq, fmin=0, fmax=120,
is_epoched=False,
n_fft=256, n_overlap=0,
n_jobs=1, verbose=None):
src_data = np.load(data_fname)
dim = src_data.shape
if len(dim) == 3 and dim[0] == 1:
src_data = np.squeeze(src_data)
print(('src data dim: {}'.format(src_data.shape)))
if n_fft > src_data.shape[1]:
nperseg = src_data.shape[1]
else:
nperseg = n_fft
n_freqs = nperseg // 2 + 1
psds = np.empty([src_data.shape[0], n_freqs])
for i in range(src_data.shape[0]):
freqs, Pxx = welch(src_data[i, :], fs=sfreq, window='hamming',
nperseg=nperseg, noverlap=n_overlap, nfft=None)
psds[i, :] = Pxx
psds_fname = _save_psd(data_fname, psds, freqs)
_save_psd_img(data_fname, psds, freqs, is_epoched)
return psds_fname
def _compute_mean_band_psd(psds_file, freq_bands):
npzfile = np.load(psds_file)
print(('the .npz file contain {} \n'.format(npzfile.files)))
psds = npzfile['psds']
print(('psds is a matrix {} \n'.format(psds.shape)))
freqs = npzfile['freqs']
print(('freqs contains {} frequencies \n'.format(len(freqs))))
n_row, _ = psds.shape
n_fr = len(freq_bands)
m_px = np.empty([n_row, n_fr])
for f in range(n_fr):
min_fr = freq_bands[f][0]
max_fr = freq_bands[f][1]
print(('*** frequency band [{}, {}] ***\n'.format(min_fr, max_fr)))
m_px[:, f] = np.mean(psds[:, (freqs >= min_fr) * (freqs <= max_fr)], 1)
psds_mean_fname = _save_m_px(psds_file, m_px)
return psds_mean_fname
def _save_m_px(psds_file, m_px):
data_path, basename, ext = split_filename(psds_file)
psds_mean_fname = basename + '-mean_band.npy'
psds_mean_fname = os.path.abspath(psds_mean_fname)
print((m_px.shape))
np.save(psds_mean_fname, m_px)
return psds_mean_fname
def _save_psd(data_fname, psds, freqs):
data_path, basename, ext = split_filename(data_fname)
psds_fname = basename + '-psds.npz'
psds_fname = os.path.abspath(psds_fname)
print((psds.shape))
print(('*** save {} ***'.format(psds_fname)))
np.savez(psds_fname, psds=psds, freqs=freqs)
return psds_fname
def _save_psd_img(data_fname, psds, freqs, is_epoched=False, method=''):
import matplotlib.pyplot as plt
data_path, basename, ext = split_filename(data_fname)
psds_img_fname = basename + '-psds.png'
psds_img_fname = os.path.abspath(psds_img_fname)
f, ax = plt.subplots()
psds = 10 * np.log10(psds)
if is_epoched:
psds_mean = psds.mean(0).mean(0)
psds_std = psds.mean(0).std(0)
else:
psds_mean = psds.mean(0)
psds_std = psds.std(0)
ax.plot(freqs, psds_mean, color='g')
ax.fill_between(freqs, psds_mean - psds_std, psds_mean + psds_std,
color='g', alpha=.5)
ax.set(title='{} PSD'.format(method), xlabel='Frequency',
ylabel='Power Spectral Density (dB)')
print(('*** save {} ***'.format(psds_img_fname)))
plt.savefig(psds_img_fname)
| true | true |
f7fc5376a52ebd7b7f95d9e276104aba47fd2418 | 3,136 | py | Python | lib/society/test.py | Sciteb-code/CODiT | f70bf57e6d4710066e2c9ef8f2203f04245715d1 | [
"MIT"
] | null | null | null | lib/society/test.py | Sciteb-code/CODiT | f70bf57e6d4710066e2c9ef8f2203f04245715d1 | [
"MIT"
] | null | null | null | lib/society/test.py | Sciteb-code/CODiT | f70bf57e6d4710066e2c9ef8f2203f04245715d1 | [
"MIT"
] | null | null | null | import logging
from collections import defaultdict
class Test:
def __init__(self, person, notes, time_to_complete, days_delayed_start=0):
self.days_elapsed = 0
self.person = person
self.positive = None
self.days_to_complete = time_to_complete + days_delayed_start
self.notes = notes
self.days_delayed_start = days_delayed_start
targets = [q for q in person.contacts if not q.infected]
self._succeptible_contacts = len(targets)
self._succeptible_contacts_of_contacts = len([s for v in targets for s in v.contacts if not s.infected])
self._days_infected = person.days_infected() if person.infected else None
self._isolating = person.isolating
self.swab_taken = False
def update_time(self, timedelta):
if self.time_to_swab(timedelta):
assert not self.swab_taken
self.swab()
self.days_elapsed += timedelta
def swab(self):
self.positive = self.person.infectious
self.swab_taken = True
def time_to_swab(self, timedelta):
return self.days_delayed_start + timedelta > self.days_elapsed >= self.days_delayed_start
class TestQueue:
def __init__(self):
self._taken_and_planned = []
self.completed_tests = []
self._tests_of = defaultdict(list)
@property
def tests(self):
"""
:return: for past reasons, this attribute only returns tests whose swabs have been taken
"""
return (t for t in self._taken_and_planned if t.swab_taken)
def remove_test(self, test):
self._taken_and_planned.remove(test)
self._tests_of[test.person].remove(test)
def add_test(self, person, notes, time_to_complete, front_of_queue=False, days_delayed_start=0):
if notes in [t.notes for t in self._tests_of[person]]:
# then there's already a test being planned or processed in this queue with the same purpose as this one
# do nothing ...
return
test = Test(person, notes, time_to_complete, days_delayed_start=days_delayed_start)
if front_of_queue:
self._taken_and_planned.insert(0, test)
else:
self._taken_and_planned.append(test)
self._tests_of[person].append(test)
def tests_of(self, person):
return [t for t in self._tests_of[person] if t.swab_taken]
def contains_planned_test_of(self, person):
return [t for t in self._tests_of[person] if not t.swab_taken]
def pick_actionable_tests(self, max_processed, logging_overrun=None):
actionable_tests = []
tests = [t for t in self.tests]
for i, t in enumerate(tests):
if max_processed is not None and i >= max_processed:
if logging_overrun:
logging.info(logging_overrun)
continue
if t.days_elapsed >= t.days_to_complete:
actionable_tests.append(t)
return actionable_tests
def update_tests(self, time_delta):
for t in self._taken_and_planned:
t.update_time(time_delta)
| 35.636364 | 116 | 0.656888 | import logging
from collections import defaultdict
class Test:
def __init__(self, person, notes, time_to_complete, days_delayed_start=0):
self.days_elapsed = 0
self.person = person
self.positive = None
self.days_to_complete = time_to_complete + days_delayed_start
self.notes = notes
self.days_delayed_start = days_delayed_start
targets = [q for q in person.contacts if not q.infected]
self._succeptible_contacts = len(targets)
self._succeptible_contacts_of_contacts = len([s for v in targets for s in v.contacts if not s.infected])
self._days_infected = person.days_infected() if person.infected else None
self._isolating = person.isolating
self.swab_taken = False
def update_time(self, timedelta):
if self.time_to_swab(timedelta):
assert not self.swab_taken
self.swab()
self.days_elapsed += timedelta
def swab(self):
self.positive = self.person.infectious
self.swab_taken = True
def time_to_swab(self, timedelta):
return self.days_delayed_start + timedelta > self.days_elapsed >= self.days_delayed_start
class TestQueue:
def __init__(self):
self._taken_and_planned = []
self.completed_tests = []
self._tests_of = defaultdict(list)
@property
def tests(self):
return (t for t in self._taken_and_planned if t.swab_taken)
def remove_test(self, test):
self._taken_and_planned.remove(test)
self._tests_of[test.person].remove(test)
def add_test(self, person, notes, time_to_complete, front_of_queue=False, days_delayed_start=0):
if notes in [t.notes for t in self._tests_of[person]]:
# do nothing ...
return
test = Test(person, notes, time_to_complete, days_delayed_start=days_delayed_start)
if front_of_queue:
self._taken_and_planned.insert(0, test)
else:
self._taken_and_planned.append(test)
self._tests_of[person].append(test)
def tests_of(self, person):
return [t for t in self._tests_of[person] if t.swab_taken]
def contains_planned_test_of(self, person):
return [t for t in self._tests_of[person] if not t.swab_taken]
def pick_actionable_tests(self, max_processed, logging_overrun=None):
actionable_tests = []
tests = [t for t in self.tests]
for i, t in enumerate(tests):
if max_processed is not None and i >= max_processed:
if logging_overrun:
logging.info(logging_overrun)
continue
if t.days_elapsed >= t.days_to_complete:
actionable_tests.append(t)
return actionable_tests
def update_tests(self, time_delta):
for t in self._taken_and_planned:
t.update_time(time_delta)
| true | true |
f7fc5386c69541a6b6eb6011d702e49b1549aa6a | 2,166 | py | Python | tools/trainval.py | carpedkm/vedatad | 55f8dced57f698ee9fc0da9bcf471d171e718d0c | [
"Apache-2.0"
] | null | null | null | tools/trainval.py | carpedkm/vedatad | 55f8dced57f698ee9fc0da9bcf471d171e718d0c | [
"Apache-2.0"
] | null | null | null | tools/trainval.py | carpedkm/vedatad | 55f8dced57f698ee9fc0da9bcf471d171e718d0c | [
"Apache-2.0"
] | null | null | null | import argparse
import os.path as osp
import shutil
import time
from vedacore.misc import Config, mkdir_or_exist, set_random_seed
from vedacore.parallel import init_dist
from vedatad.assembler import trainval # calls for the first time in the code
from vedatad.misc import get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path') # configs/trainval/daotad/daotad_i3d_r50_e700_thumos14_rgb.py
parser.add_argument('--workdir', help='the dir to save logs and models')
# workdir
parser.add_argument(
'--launcher',
choices=['none', 'pytorch'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0) # TODO
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config) # loads the configuration file as dictionary type
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# workdir is determined in this priority: CLI > segment in file > filename
if args.workdir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.workdir = args.workdir
elif cfg.get('workdir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.workdir = osp.join('./workdir',
osp.splitext(osp.basename(args.config))[0])
seed = cfg.get('seed', None)
deterministic = cfg.get('deterministic', False)
set_random_seed(seed, deterministic)
# create work_dir
mkdir_or_exist(osp.abspath(cfg.workdir))
shutil.copy(args.config, cfg.workdir)
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
print(cfg.workdir)
log_file = osp.join(cfg.workdir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
trainval(cfg, distributed, logger)
if __name__ == '__main__':
main()
| 32.328358 | 126 | 0.685134 | import argparse
import os.path as osp
import shutil
import time
from vedacore.misc import Config, mkdir_or_exist, set_random_seed
from vedacore.parallel import init_dist
from vedatad.assembler import trainval
from vedatad.misc import get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--workdir', help='the dir to save logs and models')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
if args.workdir is not None:
cfg.workdir = args.workdir
elif cfg.get('workdir', None) is None:
cfg.workdir = osp.join('./workdir',
osp.splitext(osp.basename(args.config))[0])
seed = cfg.get('seed', None)
deterministic = cfg.get('deterministic', False)
set_random_seed(seed, deterministic)
mkdir_or_exist(osp.abspath(cfg.workdir))
shutil.copy(args.config, cfg.workdir)
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
print(cfg.workdir)
log_file = osp.join(cfg.workdir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
trainval(cfg, distributed, logger)
if __name__ == '__main__':
main()
| true | true |
f7fc541ab96dd0d95df2f48a893452765495b1ff | 1,428 | py | Python | examples/peripherals/timer_group/example_test.py | mishafarms/esp-idf | b886dc699880e6f068d5abc0433deb60a6466fee | [
"Apache-2.0"
] | 5 | 2021-11-22T06:47:54.000Z | 2022-01-04T06:58:43.000Z | examples/peripherals/timer_group/example_test.py | mishafarms/esp-idf | b886dc699880e6f068d5abc0433deb60a6466fee | [
"Apache-2.0"
] | null | null | null | examples/peripherals/timer_group/example_test.py | mishafarms/esp-idf | b886dc699880e6f068d5abc0433deb60a6466fee | [
"Apache-2.0"
] | 1 | 2021-03-01T11:52:31.000Z | 2021-03-01T11:52:31.000Z | #!/usr/bin/env python
#
# SPDX-FileCopyrightText: 2019-2021 Espressif Systems (Shanghai) CO LTD
#
# SPDX-License-Identifier: CC0-1.0
#
from __future__ import unicode_literals
import re
from typing import Any
import ttfw_idf
@ttfw_idf.idf_example_test(env_tag='Example_GENERIC', target=['esp32', 'esp32s2', 'esp32c3'])
def test_examples_timergroup(env, extra_data): # type: (Any, Any) -> None
dut = env.get_dut('timer_group', 'examples/peripherals/timer_group')
dut.start_app()
# check auto reload function
with_auto_reload = dut.expect(re.compile(r'Timer Group (\S+) auto reload'), timeout=30)[0]
assert with_auto_reload == 'with'
select_groups = dut.expect(re.compile(r'Group\[(\d)\], timer\[(\d)\] alarm event'))
timer_group_num = int(select_groups[0])
timer_instance_num = int(select_groups[1])
assert timer_group_num == 0 and timer_instance_num == 0
dut.expect('EVENT TIME')
counter_value = dut.expect(re.compile(r'Counter:\s+(0x\d+)'))[0]
counter_value = int(counter_value, 16)
print('counter value at auto reload event: ', counter_value)
assert counter_value < 20
# check timer interval
dut.expect('Timer Group without auto reload', timeout=5)
dut.expect('EVENT TIME')
event_time0 = dut.expect(re.compile(r'Time\s+:\s+(\d+\.\d+)\s+s'))[0]
print('event0={}'.format(event_time0))
if __name__ == '__main__':
test_examples_timergroup()
| 33.209302 | 94 | 0.69958 |
from __future__ import unicode_literals
import re
from typing import Any
import ttfw_idf
@ttfw_idf.idf_example_test(env_tag='Example_GENERIC', target=['esp32', 'esp32s2', 'esp32c3'])
def test_examples_timergroup(env, extra_data):
dut = env.get_dut('timer_group', 'examples/peripherals/timer_group')
dut.start_app()
with_auto_reload = dut.expect(re.compile(r'Timer Group (\S+) auto reload'), timeout=30)[0]
assert with_auto_reload == 'with'
select_groups = dut.expect(re.compile(r'Group\[(\d)\], timer\[(\d)\] alarm event'))
timer_group_num = int(select_groups[0])
timer_instance_num = int(select_groups[1])
assert timer_group_num == 0 and timer_instance_num == 0
dut.expect('EVENT TIME')
counter_value = dut.expect(re.compile(r'Counter:\s+(0x\d+)'))[0]
counter_value = int(counter_value, 16)
print('counter value at auto reload event: ', counter_value)
assert counter_value < 20
dut.expect('Timer Group without auto reload', timeout=5)
dut.expect('EVENT TIME')
event_time0 = dut.expect(re.compile(r'Time\s+:\s+(\d+\.\d+)\s+s'))[0]
print('event0={}'.format(event_time0))
if __name__ == '__main__':
test_examples_timergroup()
| true | true |
f7fc54d91b35df22264277bb1c9469b1d61e960e | 98,018 | py | Python | top2vec/Top2Vec.py | taghizad3h/Top2Vec | 0237989ecd6a28df184b6a2b245239c501676da2 | [
"BSD-3-Clause"
] | null | null | null | top2vec/Top2Vec.py | taghizad3h/Top2Vec | 0237989ecd6a28df184b6a2b245239c501676da2 | [
"BSD-3-Clause"
] | null | null | null | top2vec/Top2Vec.py | taghizad3h/Top2Vec | 0237989ecd6a28df184b6a2b245239c501676da2 | [
"BSD-3-Clause"
] | null | null | null | # Author: Dimo Angelov
#
# License: BSD 3 clause
import logging
import numpy as np
import pandas as pd
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from gensim.utils import simple_preprocess
from gensim.parsing.preprocessing import strip_tags
import umap
import hdbscan
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from joblib import dump, load
from sklearn.cluster import dbscan
import tempfile
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import normalize
from scipy.special import softmax
try:
import hnswlib
_HAVE_HNSWLIB = True
except ImportError:
_HAVE_HNSWLIB = False
try:
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_text
_HAVE_TENSORFLOW = True
except ImportError:
_HAVE_TENSORFLOW = False
try:
from sentence_transformers import SentenceTransformer
_HAVE_TORCH = True
except ImportError:
_HAVE_TORCH = False
logger = logging.getLogger('top2vec')
logger.setLevel(logging.WARNING)
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(sh)
def default_tokenizer(doc):
"""Tokenize documents for training and remove too long/short words"""
return simple_preprocess(strip_tags(doc), deacc=True)
class Top2Vec:
"""
Top2Vec
Creates jointly embedded topic, document and word vectors.
Parameters
----------
embedding_model: string
This will determine which model is used to generate the document and
word embeddings. The valid string options are:
* doc2vec
* universal-sentence-encoder
* universal-sentence-encoder-multilingual
* distiluse-base-multilingual-cased
For large data sets and data sets with very unique vocabulary doc2vec
could produce better results. This will train a doc2vec model from
scratch. This method is language agnostic. However multiple languages
will not be aligned.
Using the universal sentence encoder options will be much faster since
those are pre-trained and efficient models. The universal sentence
encoder options are suggested for smaller data sets. They are also
good options for large data sets that are in English or in languages
covered by the multilingual model. It is also suggested for data sets
that are multilingual.
For more information on universal-sentence-encoder visit:
https://tfhub.dev/google/universal-sentence-encoder/4
For more information on universal-sentence-encoder-multilingual visit:
https://tfhub.dev/google/universal-sentence-encoder-multilingual/3
The distiluse-base-multilingual-cased pre-trained sentence transformer
is suggested for multilingual datasets and languages that are not
covered by the multilingual universal sentence encoder. The
transformer is significantly slower than the universal sentence
encoder options.
For more informati ond istiluse-base-multilingual-cased visit:
https://www.sbert.net/docs/pretrained_models.html
embedding_model_path: string (Optional)
Pre-trained embedding models will be downloaded automatically by
default. However they can also be uploaded from a file that is in the
location of embedding_model_path.
Warning: the model at embedding_model_path must match the
embedding_model parameter type.
documents: List of str
Input corpus, should be a list of strings.
min_count: int (Optional, default 50)
Ignores all words with total frequency lower than this. For smaller
corpora a smaller min_count will be necessary.
speed: string (Optional, default 'learn')
This parameter is only used when using doc2vec as embedding_model.
It will determine how fast the model takes to train. The
fast-learn option is the fastest and will generate the lowest quality
vectors. The learn option will learn better quality vectors but take
a longer time to train. The deep-learn option will learn the best
quality vectors but will take significant time to train. The valid
string speed options are:
* fast-learn
* learn
* deep-learn
use_corpus_file: bool (Optional, default False)
This parameter is only used when using doc2vec as embedding_model.
Setting use_corpus_file to True can sometimes provide speedup for
large datasets when multiple worker threads are available. Documents
are still passed to the model as a list of str, the model will create
a temporary corpus file for training.
document_ids: List of str, int (Optional)
A unique value per document that will be used for referring to
documents in search results. If ids are not given to the model, the
index of each document in the original corpus will become the id.
keep_documents: bool (Optional, default True)
If set to False documents will only be used for training and not saved
as part of the model. This will reduce model size. When using search
functions only document ids will be returned, not the actual
documents.
workers: int (Optional)
The amount of worker threads to be used in training the model. Larger
amount will lead to faster training.
tokenizer: callable (Optional, default None)
Override the default tokenization method. If None then
gensim.utils.simple_preprocess will be used.
use_embedding_model_tokenizer: bool (Optional, default False)
If using an embedding model other than doc2vec, use the model's
tokenizer for document embedding. If set to True the tokenizer, either
default or passed callable will be used to tokenize the text to
extract the vocabulary for word embedding.
umap_args: dict (Optional, default None)
Pass custom arguments to UMAP.
hdbscan_args: dict (Optional, default None)
Pass custom arguments to HDBSCAN.
verbose: bool (Optional, default True)
Whether to print status data during training.
"""
def __init__(self,
documents,
min_count=50,
embedding_model='doc2vec',
embedding_model_path=None,
speed='learn',
use_corpus_file=False,
document_ids=None,
keep_documents=True,
workers=None,
tokenizer=None,
use_embedding_model_tokenizer=False,
umap_args=None,
hdbscan_args=None,
verbose=True
):
if verbose:
logger.setLevel(logging.DEBUG)
self.verbose = True
else:
logger.setLevel(logging.WARNING)
self.verbose = False
if tokenizer is None:
tokenizer = default_tokenizer
# validate documents
if not (isinstance(documents, list) or isinstance(documents, np.ndarray)):
raise ValueError("Documents need to be a list of strings")
if not all((isinstance(doc, str) or isinstance(doc, np.str_)) for doc in documents):
raise ValueError("Documents need to be a list of strings")
if keep_documents:
self.documents = np.array(documents, dtype="object")
else:
self.documents = None
# validate document ids
if document_ids is not None:
if not (isinstance(document_ids, list) or isinstance(document_ids, np.ndarray)):
raise ValueError("Documents ids need to be a list of str or int")
if len(documents) != len(document_ids):
raise ValueError("Document ids need to match number of documents")
elif len(document_ids) != len(set(document_ids)):
raise ValueError("Document ids need to be unique")
if all((isinstance(doc_id, str) or isinstance(doc_id, np.str_)) for doc_id in document_ids):
self.doc_id_type = np.str_
elif all((isinstance(doc_id, int) or isinstance(doc_id, np.int_)) for doc_id in document_ids):
self.doc_id_type = np.int_
else:
raise ValueError("Document ids need to be str or int")
self.document_ids_provided = True
self.document_ids = np.array(document_ids)
self.doc_id2index = dict(zip(document_ids, list(range(0, len(document_ids)))))
else:
self.document_ids_provided = False
self.document_ids = np.array(range(0, len(documents)))
self.doc_id2index = dict(zip(self.document_ids, list(range(0, len(self.document_ids)))))
self.doc_id_type = np.int_
acceptable_embedding_models = ["universal-sentence-encoder-multilingual",
"universal-sentence-encoder",
"distiluse-base-multilingual-cased"]
self.embedding_model_path = embedding_model_path
if embedding_model == 'doc2vec':
# validate training inputs
if speed == "fast-learn":
hs = 0
negative = 5
epochs = 40
elif speed == "learn":
hs = 1
negative = 0
epochs = 40
elif speed == "deep-learn":
hs = 1
negative = 0
epochs = 400
elif speed == "test-learn":
hs = 0
negative = 5
epochs = 1
else:
raise ValueError("speed parameter needs to be one of: fast-learn, learn or deep-learn")
if workers is None:
pass
elif isinstance(workers, int):
pass
else:
raise ValueError("workers needs to be an int")
doc2vec_args = {"vector_size": 300,
"min_count": min_count,
"window": 15,
"sample": 1e-5,
"negative": negative,
"hs": hs,
"epochs": epochs,
"dm": 0,
"dbow_words": 1}
if workers is not None:
doc2vec_args["workers"] = workers
logger.info('Pre-processing documents for training')
if use_corpus_file:
processed = [' '.join(tokenizer(doc)) for doc in documents]
lines = "\n".join(processed)
temp = tempfile.NamedTemporaryFile(mode='w+t')
temp.write(lines)
doc2vec_args["corpus_file"] = temp.name
else:
train_corpus = [TaggedDocument(tokenizer(doc), [i]) for i, doc in enumerate(documents)]
doc2vec_args["documents"] = train_corpus
logger.info('Creating joint document/word embedding')
self.embedding_model = 'doc2vec'
self.model = Doc2Vec(**doc2vec_args)
if use_corpus_file:
temp.close()
elif embedding_model in acceptable_embedding_models:
self.embed = None
self.embedding_model = embedding_model
self._check_import_status()
logger.info('Pre-processing documents for training')
# preprocess documents
tokenized_corpus = [tokenizer(doc) for doc in documents]
def return_doc(doc):
return doc
# preprocess vocabulary
vectorizer = CountVectorizer(tokenizer=return_doc, preprocessor=return_doc)
doc_word_counts = vectorizer.fit_transform(tokenized_corpus)
words = vectorizer.get_feature_names()
word_counts = np.array(np.sum(doc_word_counts, axis=0).tolist()[0])
vocab_inds = np.where(word_counts > min_count)[0]
if len(vocab_inds) == 0:
raise ValueError(f"A min_count of {min_count} results in "
f"all words being ignored, choose a lower value.")
self.vocab = [words[ind] for ind in vocab_inds]
self._check_model_status()
logger.info('Creating joint document/word embedding')
# embed words
self.word_indexes = dict(zip(self.vocab, range(len(self.vocab))))
self.word_vectors = self._l2_normalize(np.array(self.embed(self.vocab)))
# embed documents
if use_embedding_model_tokenizer:
self.document_vectors = self._embed_documents(documents)
else:
train_corpus = [' '.join(tokens) for tokens in tokenized_corpus]
self.document_vectors = self._embed_documents(train_corpus)
else:
raise ValueError(f"{embedding_model} is an invalid embedding model.")
# create 5D embeddings of documents
logger.info('Creating lower dimension embedding of documents')
if umap_args is None:
umap_args = {'n_neighbors': 15,
'n_components': 5,
'metric': 'cosine'}
umap_model = umap.UMAP(**umap_args).fit(self._get_document_vectors(norm=False))
# find dense areas of document vectors
logger.info('Finding dense areas of documents')
if hdbscan_args is None:
hdbscan_args = {'min_cluster_size': 15,
'metric': 'euclidean',
'cluster_selection_method': 'eom'}
cluster = hdbscan.HDBSCAN(**hdbscan_args).fit(umap_model.embedding_)
# calculate topic vectors from dense areas of documents
logger.info('Finding topics')
# create topic vectors
self._create_topic_vectors(cluster.labels_)
# deduplicate topics
self._deduplicate_topics()
# find topic words and scores
self.topic_words, self.topic_word_scores = self._find_topic_words_and_scores(topic_vectors=self.topic_vectors)
# assign documents to topic
self.doc_top, self.doc_dist = self._calculate_documents_topic(self.topic_vectors,
self._get_document_vectors())
# calculate topic sizes
self.topic_sizes = self._calculate_topic_sizes(hierarchy=False)
# re-order topics
self._reorder_topics(hierarchy=False)
# initialize variables for hierarchical topic reduction
self.topic_vectors_reduced = None
self.doc_top_reduced = None
self.doc_dist_reduced = None
self.topic_sizes_reduced = None
self.topic_words_reduced = None
self.topic_word_scores_reduced = None
self.hierarchy = None
# initialize document indexing variables
self.document_index = None
self.serialized_document_index = None
self.documents_indexed = False
self.index_id2doc_id = None
self.doc_id2index_id = None
# initialize word indexing variables
self.word_index = None
self.serialized_word_index = None
self.words_indexed = False
def save(self, file):
"""
Saves the current model to the specified file.
Parameters
----------
file: str
File where model will be saved.
"""
document_index_temp = None
word_index_temp = None
# do not save sentence encoders and sentence transformers
if self.embedding_model != "doc2vec":
self.embed = None
# serialize document index so that it can be saved
if self.documents_indexed:
temp = tempfile.NamedTemporaryFile(mode='w+b')
self.document_index.save_index(temp.name)
self.serialized_document_index = temp.read()
temp.close()
document_index_temp = self.document_index
self.document_index = None
# serialize word index so that it can be saved
if self.words_indexed:
temp = tempfile.NamedTemporaryFile(mode='w+b')
self.word_index.save_index(temp.name)
self.serialized_word_index = temp.read()
temp.close()
word_index_temp = self.word_index
self.word_index = None
dump(self, file)
self.document_index = document_index_temp
self.word_index = word_index_temp
@classmethod
def load(cls, file):
"""
Load a pre-trained model from the specified file.
Parameters
----------
file: str
File where model will be loaded from.
"""
top2vec_model = load(file)
# load document index
if top2vec_model.documents_indexed:
if not _HAVE_HNSWLIB:
raise ImportError(f"Cannot load document index.\n\n"
"Try: pip install top2vec[indexing]\n\n"
"Alternatively try: pip install hnswlib")
temp = tempfile.NamedTemporaryFile(mode='w+b')
temp.write(top2vec_model.serialized_document_index)
if top2vec_model.embedding_model == 'doc2vec':
document_vectors = top2vec_model.model.docvecs.vectors_docs
else:
document_vectors = top2vec_model.document_vectors
top2vec_model.document_index = hnswlib.Index(space='ip',
dim=document_vectors.shape[1])
top2vec_model.document_index.load_index(temp.name, max_elements=document_vectors.shape[0])
temp.close()
top2vec_model.serialized_document_index = None
# load word index
if top2vec_model.words_indexed:
if not _HAVE_HNSWLIB:
raise ImportError(f"Cannot load word index.\n\n"
"Try: pip install top2vec[indexing]\n\n"
"Alternatively try: pip install hnswlib")
temp = tempfile.NamedTemporaryFile(mode='w+b')
temp.write(top2vec_model.serialized_word_index)
if top2vec_model.embedding_model == 'doc2vec':
word_vectors = top2vec_model.model.wv.vectors
else:
word_vectors = top2vec_model.word_vectors
top2vec_model.word_index = hnswlib.Index(space='ip',
dim=word_vectors.shape[1])
top2vec_model.word_index.load_index(temp.name, max_elements=word_vectors.shape[0])
temp.close()
top2vec_model.serialized_word_index = None
return top2vec_model
@staticmethod
def _l2_normalize(vectors):
if vectors.ndim == 2:
return normalize(vectors)
else:
return normalize(vectors.reshape(1, -1))[0]
def _embed_documents(self, train_corpus):
self._check_import_status()
self._check_model_status()
# embed documents
batch_size = 500
document_vectors = []
current = 0
batches = int(len(train_corpus) / batch_size)
extra = len(train_corpus) % batch_size
for ind in range(0, batches):
document_vectors.append(self.embed(train_corpus[current:current + batch_size]))
current += batch_size
if extra > 0:
document_vectors.append(self.embed(train_corpus[current:current + extra]))
document_vectors = self._l2_normalize(np.array(np.vstack(document_vectors)))
return document_vectors
def _embed_query(self, query):
self._check_import_status()
self._check_model_status()
return self._l2_normalize(np.array(self.embed([query])[0]))
def _set_document_vectors(self, document_vectors):
if self.embedding_model == 'doc2vec':
self.model.docvecs.vectors_docs = document_vectors
else:
self.document_vectors = document_vectors
def _get_document_vectors(self, norm=True):
if self.embedding_model == 'doc2vec':
if norm:
self.model.docvecs.init_sims()
return self.model.docvecs.vectors_docs_norm
else:
return self.model.docvecs.vectors_docs
else:
return self.document_vectors
def _index2word(self, index):
if self.embedding_model == 'doc2vec':
return self.model.wv.index2word[index]
else:
return self.vocab[index]
def _get_word_vectors(self):
if self.embedding_model == 'doc2vec':
self.model.wv.init_sims()
return self.model.wv.vectors_norm
else:
return self.word_vectors
def _create_topic_vectors(self, cluster_labels):
unique_labels = set(cluster_labels)
if -1 in unique_labels:
unique_labels.remove(-1)
self.topic_vectors = self._l2_normalize(
np.vstack([self._get_document_vectors(norm=False)[np.where(cluster_labels == label)[0]]
.mean(axis=0) for label in unique_labels]))
def _deduplicate_topics(self):
core_samples, labels = dbscan(X=self.topic_vectors,
eps=0.1,
min_samples=2,
metric="cosine")
duplicate_clusters = set(labels)
if len(duplicate_clusters) > 1 or -1 not in duplicate_clusters:
# unique topics
unique_topics = self.topic_vectors[np.where(labels == -1)[0]]
if -1 in duplicate_clusters:
duplicate_clusters.remove(-1)
# merge duplicate topics
for unique_label in duplicate_clusters:
unique_topics = np.vstack(
[unique_topics, self._l2_normalize(self.topic_vectors[np.where(labels == unique_label)[0]]
.mean(axis=0))])
self.topic_vectors = unique_topics
def _calculate_topic_sizes(self, hierarchy=False):
if hierarchy:
topic_sizes = pd.Series(self.doc_top_reduced).value_counts()
else:
topic_sizes = pd.Series(self.doc_top).value_counts()
return topic_sizes
def _reorder_topics(self, hierarchy=False):
if hierarchy:
self.topic_vectors_reduced = self.topic_vectors_reduced[self.topic_sizes_reduced.index]
self.topic_words_reduced = self.topic_words_reduced[self.topic_sizes_reduced.index]
self.topic_word_scores_reduced = self.topic_word_scores_reduced[self.topic_sizes_reduced.index]
old2new = dict(zip(self.topic_sizes_reduced.index, range(self.topic_sizes_reduced.index.shape[0])))
self.doc_top_reduced = np.array([old2new[i] for i in self.doc_top_reduced])
self.hierarchy = [self.hierarchy[i] for i in self.topic_sizes_reduced.index]
self.topic_sizes_reduced.reset_index(drop=True, inplace=True)
else:
self.topic_vectors = self.topic_vectors[self.topic_sizes.index]
self.topic_words = self.topic_words[self.topic_sizes.index]
self.topic_word_scores = self.topic_word_scores[self.topic_sizes.index]
old2new = dict(zip(self.topic_sizes.index, range(self.topic_sizes.index.shape[0])))
self.doc_top = np.array([old2new[i] for i in self.doc_top])
self.topic_sizes.reset_index(drop=True, inplace=True)
@staticmethod
def _calculate_documents_topic(topic_vectors, document_vectors, dist=True, num_topics=None):
batch_size = 10000
doc_top = []
if dist:
doc_dist = []
if document_vectors.shape[0] > batch_size:
current = 0
batches = int(document_vectors.shape[0] / batch_size)
extra = document_vectors.shape[0] % batch_size
for ind in range(0, batches):
res = np.inner(document_vectors[current:current + batch_size], topic_vectors)
if num_topics is None:
doc_top.extend(np.argmax(res, axis=1))
if dist:
doc_dist.extend(np.max(res, axis=1))
else:
doc_top.extend(np.flip(np.argsort(res), axis=1)[:, :num_topics])
if dist:
doc_dist.extend(np.flip(np.sort(res), axis=1)[:, :num_topics])
current += batch_size
if extra > 0:
res = np.inner(document_vectors[current:current + extra], topic_vectors)
if num_topics is None:
doc_top.extend(np.argmax(res, axis=1))
if dist:
doc_dist.extend(np.max(res, axis=1))
else:
doc_top.extend(np.flip(np.argsort(res), axis=1)[:, :num_topics])
if dist:
doc_dist.extend(np.flip(np.sort(res), axis=1)[:, :num_topics])
if dist:
doc_dist = np.array(doc_dist)
else:
res = np.inner(document_vectors, topic_vectors)
if num_topics is None:
doc_top = np.argmax(res, axis=1)
if dist:
doc_dist = np.max(res, axis=1)
else:
doc_top.extend(np.flip(np.argsort(res), axis=1)[:, :num_topics])
if dist:
doc_dist.extend(np.flip(np.sort(res), axis=1)[:, :num_topics])
if num_topics is not None:
doc_top = np.array(doc_top)
if dist:
doc_dist = np.array(doc_dist)
if dist:
return doc_top, doc_dist
else:
return doc_top
def _find_topic_words_and_scores(self, topic_vectors):
topic_words = []
topic_word_scores = []
res = np.inner(topic_vectors, self._get_word_vectors())
top_words = np.flip(np.argsort(res, axis=1), axis=1)
top_scores = np.flip(np.sort(res, axis=1), axis=1)
for words, scores in zip(top_words, top_scores):
topic_words.append([self._index2word(i) for i in words[0:50]])
topic_word_scores.append(scores[0:50])
topic_words = np.array(topic_words)
topic_word_scores = np.array(topic_word_scores)
return topic_words, topic_word_scores
def _assign_documents_to_topic(self, document_vectors, hierarchy=False):
if hierarchy:
doc_top_new, doc_dist_new = self._calculate_documents_topic(self.topic_vectors_reduced,
document_vectors,
dist=True)
self.doc_top_reduced = np.append(self.doc_top_reduced, doc_top_new)
self.doc_dist_reduced = np.append(self.doc_dist_reduced, doc_dist_new)
topic_sizes_new = pd.Series(doc_top_new).value_counts()
for top in topic_sizes_new.index.tolist():
self.topic_sizes_reduced[top] += topic_sizes_new[top]
self.topic_sizes_reduced.sort_values(ascending=False, inplace=True)
self._reorder_topics(hierarchy)
else:
doc_top_new, doc_dist_new = self._calculate_documents_topic(self.topic_vectors, document_vectors, dist=True)
self.doc_top = np.append(self.doc_top, doc_top_new)
self.doc_dist = np.append(self.doc_dist, doc_dist_new)
topic_sizes_new = pd.Series(doc_top_new).value_counts()
for top in topic_sizes_new.index.tolist():
self.topic_sizes[top] += topic_sizes_new[top]
self.topic_sizes.sort_values(ascending=False, inplace=True)
self._reorder_topics(hierarchy)
def _unassign_documents_from_topic(self, doc_indexes, hierarchy=False):
if hierarchy:
doc_top_remove = self.doc_top_reduced[doc_indexes]
self.doc_top_reduced = np.delete(self.doc_top_reduced, doc_indexes, 0)
self.doc_dist_reduced = np.delete(self.doc_dist_reduced, doc_indexes, 0)
topic_sizes_remove = pd.Series(doc_top_remove).value_counts()
for top in topic_sizes_remove.index.tolist():
self.topic_sizes_reduced[top] -= topic_sizes_remove[top]
self.topic_sizes_reduced.sort_values(ascending=False, inplace=True)
self._reorder_topics(hierarchy)
else:
doc_top_remove = self.doc_top[doc_indexes]
self.doc_top = np.delete(self.doc_top, doc_indexes, 0)
self.doc_dist = np.delete(self.doc_dist, doc_indexes, 0)
topic_sizes_remove = pd.Series(doc_top_remove).value_counts()
for top in topic_sizes_remove.index.tolist():
self.topic_sizes[top] -= topic_sizes_remove[top]
self.topic_sizes.sort_values(ascending=False, inplace=True)
self._reorder_topics(hierarchy)
def _get_document_ids(self, doc_index):
return self.document_ids[doc_index]
def _get_document_indexes(self, doc_ids):
if self.document_ids is None:
return doc_ids
else:
return [self.doc_id2index[doc_id] for doc_id in doc_ids]
def _words2word_vectors(self, keywords):
return self._get_word_vectors()[[self._word2index(word) for word in keywords]]
def _word2index(self, word):
if self.embedding_model == 'doc2vec':
return self.model.wv.vocab[word].index
else:
return self.word_indexes[word]
def _get_combined_vec(self, vecs, vecs_neg):
combined_vector = np.zeros(self._get_document_vectors().shape[1], dtype=np.float64)
for vec in vecs:
combined_vector += vec
for vec in vecs_neg:
combined_vector -= vec
combined_vector /= (len(vecs) + len(vecs_neg))
combined_vector = self._l2_normalize(combined_vector)
return combined_vector
@staticmethod
def _search_vectors_by_vector(vectors, vector, num_res):
ranks = np.inner(vectors, vector)
indexes = np.flip(np.argsort(ranks)[-num_res:])
scores = np.array([ranks[res] for res in indexes])
return indexes, scores
@staticmethod
def _check_hnswlib_status():
if not _HAVE_HNSWLIB:
raise ImportError(f"Indexing is not available.\n\n"
"Try: pip install top2vec[indexing]\n\n"
"Alternatively try: pip install hnswlib")
def _check_document_index_status(self):
if self.document_index is None:
raise ImportError("There is no document index.\n\n"
"Call index_document_vectors method before setting use_index=True.")
def _check_word_index_status(self):
if self.word_index is None:
raise ImportError("There is no word index.\n\n"
"Call index_word_vectors method before setting use_index=True.")
def _check_import_status(self):
if self.embedding_model != 'distiluse-base-multilingual-cased':
if not _HAVE_TENSORFLOW:
raise ImportError(f"{self.embedding_model} is not available.\n\n"
"Try: pip install top2vec[sentence_encoders]\n\n"
"Alternatively try: pip install tensorflow tensorflow_hub tensorflow_text")
else:
if not _HAVE_TORCH:
raise ImportError(f"{self.embedding_model} is not available.\n\n"
"Try: pip install top2vec[sentence_transformers]\n\n"
"Alternatively try: pip install torch sentence_transformers")
def _check_model_status(self):
if self.embed is None:
if self.verbose is False:
logger.setLevel(logging.DEBUG)
if self.embedding_model != "distiluse-base-multilingual-cased":
if self.embedding_model_path is None:
logger.info(f'Downloading {self.embedding_model} model')
if self.embedding_model == "universal-sentence-encoder-multilingual":
module = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
else:
module = "https://tfhub.dev/google/universal-sentence-encoder/4"
else:
logger.info(f'Loading {self.embedding_model} model at {self.embedding_model_path}')
module = self.embedding_model_path
self.embed = hub.load(module)
else:
if self.embedding_model_path is None:
logger.info(f'Downloading {self.embedding_model} model')
module = 'distiluse-base-multilingual-cased'
else:
logger.info(f'Loading {self.embedding_model} model at {self.embedding_model_path}')
module = self.embedding_model_path
model = SentenceTransformer(module)
self.embed = model.encode
if self.verbose is False:
logger.setLevel(logging.WARNING)
@staticmethod
def _less_than_zero(num, var_name):
if num < 0:
raise ValueError(f"{var_name} cannot be less than 0.")
def _validate_hierarchical_reduction(self):
if self.hierarchy is None:
raise ValueError("Hierarchical topic reduction has not been performed.")
def _validate_hierarchical_reduction_num_topics(self, num_topics):
current_num_topics = len(self.topic_vectors)
if num_topics >= current_num_topics:
raise ValueError(f"Number of topics must be less than {current_num_topics}.")
def _validate_num_docs(self, num_docs):
self._less_than_zero(num_docs, "num_docs")
document_count = len(self.doc_top)
if num_docs > document_count:
raise ValueError(f"num_docs cannot exceed the number of documents: {document_count}.")
def _validate_num_topics(self, num_topics, reduced):
self._less_than_zero(num_topics, "num_topics")
if reduced:
topic_count = len(self.topic_vectors_reduced)
if num_topics > topic_count:
raise ValueError(f"num_topics cannot exceed the number of reduced topics: {topic_count}.")
else:
topic_count = len(self.topic_vectors)
if num_topics > topic_count:
raise ValueError(f"num_topics cannot exceed the number of topics: {topic_count}.")
def _validate_topic_num(self, topic_num, reduced):
self._less_than_zero(topic_num, "topic_num")
if reduced:
topic_count = len(self.topic_vectors_reduced) - 1
if topic_num > topic_count:
raise ValueError(f"Invalid topic number: valid reduced topics numbers are 0 to {topic_count}.")
else:
topic_count = len(self.topic_vectors) - 1
if topic_num > topic_count:
raise ValueError(f"Invalid topic number: valid original topics numbers are 0 to {topic_count}.")
def _validate_topic_search(self, topic_num, num_docs, reduced):
self._less_than_zero(num_docs, "num_docs")
if reduced:
if num_docs > self.topic_sizes_reduced[topic_num]:
raise ValueError(f"Invalid number of documents: reduced topic {topic_num}"
f" only has {self.topic_sizes_reduced[topic_num]} documents.")
else:
if num_docs > self.topic_sizes[topic_num]:
raise ValueError(f"Invalid number of documents: original topic {topic_num}"
f" only has {self.topic_sizes[topic_num]} documents.")
def _validate_doc_ids(self, doc_ids, doc_ids_neg):
if not (isinstance(doc_ids, list) or isinstance(doc_ids, np.ndarray)):
raise ValueError("doc_ids must be a list of string or int.")
if not (isinstance(doc_ids_neg, list) or isinstance(doc_ids_neg, np.ndarray)):
raise ValueError("doc_ids_neg must be a list of string or int.")
if isinstance(doc_ids, np.ndarray):
doc_ids = list(doc_ids)
if isinstance(doc_ids_neg, np.ndarray):
doc_ids_neg = list(doc_ids_neg)
doc_ids_all = doc_ids + doc_ids_neg
if self.document_ids is not None:
for doc_id in doc_ids_all:
if doc_id not in self.doc_id2index:
raise ValueError(f"{doc_id} is not a valid document id.")
elif min(doc_ids) < 0:
raise ValueError(f"{min(doc_ids)} is not a valid document id.")
elif max(doc_ids) > len(self.doc_top) - 1:
raise ValueError(f"{max(doc_ids)} is not a valid document id.")
def _validate_keywords(self, keywords, keywords_neg):
if not (isinstance(keywords, list) or isinstance(keywords, np.ndarray)):
raise ValueError("keywords must be a list of strings.")
if not (isinstance(keywords_neg, list) or isinstance(keywords_neg, np.ndarray)):
raise ValueError("keywords_neg must be a list of strings.")
keywords_lower = [keyword.lower() for keyword in keywords]
keywords_neg_lower = [keyword.lower() for keyword in keywords_neg]
if self.embedding_model == 'doc2vec':
vocab = self.model.wv.vocab
else:
vocab = self.vocab
for word in keywords_lower + keywords_neg_lower:
if word not in vocab:
raise ValueError(f"'{word}' has not been learned by the model so it cannot be searched.")
return keywords_lower, keywords_neg_lower
def _validate_document_ids_add_doc(self, documents, document_ids):
if document_ids is None:
raise ValueError("Document ids need to be provided.")
if len(documents) != len(document_ids):
raise ValueError("Document ids need to match number of documents.")
if len(document_ids) != len(set(document_ids)):
raise ValueError("Document ids need to be unique.")
if len(set(document_ids).intersection(self.document_ids)) > 0:
raise ValueError("Some document ids already exist in model.")
if self.doc_id_type == np.str_:
if not all((isinstance(doc_id, str) or isinstance(doc_id, np.str_)) for doc_id in document_ids):
raise ValueError("Document ids need to be of type str.")
if self.doc_id_type == np.int_:
if not all((isinstance(doc_id, int) or isinstance(doc_id, np.int_)) for doc_id in document_ids):
raise ValueError("Document ids need to be of type int.")
@staticmethod
def _validate_documents(documents):
if not all((isinstance(doc, str) or isinstance(doc, np.str_)) for doc in documents):
raise ValueError("Documents need to be a list of strings.")
@staticmethod
def _validate_query(query):
if not isinstance(query, str) or isinstance(query, np.str_):
raise ValueError("Query needs to be a string.")
def _validate_vector(self, vector):
if not isinstance(vector, np.ndarray):
raise ValueError("Vector needs to be a numpy array.")
vec_size = self._get_document_vectors().shape[1]
if not vector.shape[0] == vec_size:
raise ValueError(f"Vector needs to be of {vec_size} dimensions.")
def index_document_vectors(self, ef_construction=200, M=64):
"""
Creates an index of the document vectors using hnswlib. This will
lead to faster search times for models with a large number of
documents.
For more information on hnswlib see: https://github.com/nmslib/hnswlib
Parameters
----------
ef_construction: int (Optional default 200)
This parameter controls the trade-off between index construction
time and index accuracy. Larger values will lead to greater
accuracy but will take longer to construct.
M: int (Optional default 64)
This parameter controls the trade-off between both index size as
well as construction time and accuracy. Larger values will lead to
greater accuracy but will result in a larger index as well as
longer construction time.
For more information on the parameters see:
https://github.com/nmslib/hnswlib/blob/master/ALGO_PARAMS.md
"""
self._check_hnswlib_status()
document_vectors = self._get_document_vectors()
vec_dim = document_vectors.shape[1]
num_vecs = document_vectors.shape[0]
index_ids = list(range(0, len(self.document_ids)))
self.index_id2doc_id = dict(zip(index_ids, self.document_ids))
self.doc_id2index_id = dict(zip(self.document_ids, index_ids))
self.document_index = hnswlib.Index(space='ip', dim=vec_dim)
self.document_index.init_index(max_elements=num_vecs, ef_construction=ef_construction, M=M)
self.document_index.add_items(document_vectors, index_ids)
self.documents_indexed = True
def index_word_vectors(self, ef_construction=200, M=64):
"""
Creates an index of the word vectors using hnswlib. This will
lead to faster search times for models with a large number of
words.
For more information on hnswlib see: https://github.com/nmslib/hnswlib
Parameters
----------
ef_construction: int (Optional default 200)
This parameter controls the trade-off between index construction
time and index accuracy. Larger values will lead to greater
accuracy but will take longer to construct.
M: int (Optional default 64)
This parameter controls the trade-off between both index size as
well as construction time and accuracy. Larger values will lead to
greater accuracy but will result in a larger index as well as
longer construction time.
For more information on the parameters see:
https://github.com/nmslib/hnswlib/blob/master/ALGO_PARAMS.md
"""
self._check_hnswlib_status()
word_vectors = self._get_word_vectors()
vec_dim = word_vectors.shape[1]
num_vecs = word_vectors.shape[0]
index_ids = list(range(0, num_vecs))
self.word_index = hnswlib.Index(space='ip', dim=vec_dim)
self.word_index.init_index(max_elements=num_vecs, ef_construction=ef_construction, M=M)
self.word_index.add_items(word_vectors, index_ids)
self.words_indexed = True
def update_embedding_model_path(self, embedding_model_path):
"""
Update the path of the embedding model to be loaded. The model will
no longer be downloaded but loaded from the path location.
Warning: the model at embedding_model_path must match the
embedding_model parameter type.
Parameters
----------
embedding_model_path: Str
Path to downloaded embedding model.
"""
self.embedding_model_path = embedding_model_path
def change_to_download_embedding_model(self):
"""
Use automatic download to load embedding model used for training.
Top2Vec will no longer try and load the embedding model from a file
if a embedding_model path was previously added.
"""
self.embedding_model_path = None
def get_documents_topics(self, doc_ids, reduced=False, num_topics=1):
"""
Get document topics.
The topic of each document will be returned.
The corresponding original topics are returned unless reduced=True,
in which case the reduced topics will be returned.
Parameters
----------
doc_ids: List of str, int
A unique value per document that is used for referring to
documents in search results. If ids were not given to the model,
the index of each document in the model is the id.
reduced: bool (Optional, default False)
Original topics are returned by default. If True the
reduced topics will be returned.
num_topics: int (Optional, default 1)
The number of topics to return per document.
Returns
-------
topic_nums: array of int, shape(len(doc_ids), num_topics)
The topic number(s) of the document corresponding to each doc_id.
topic_score: array of float, shape(len(doc_ids), num_topics)
Semantic similarity of document to topic(s). The cosine similarity
of the document and topic vector.
topics_words: array of shape(len(doc_ids), num_topics, 50)
For each topic the top 50 words are returned, in order
of semantic similarity to topic.
Example:
[['data', 'deep', 'learning' ... 'artificial'], <Topic 4>
['environment', 'warming', 'climate ... 'temperature'] <Topic 21>
...]
word_scores: array of shape(num_topics, 50)
For each topic the cosine similarity scores of the
top 50 words to the topic are returned.
Example:
[[0.7132, 0.6473, 0.5700 ... 0.3455], <Topic 4>
[0.7818', 0.7671, 0.7603 ... 0.6769] <Topic 21>
...]
"""
if reduced:
self._validate_hierarchical_reduction()
# make sure documents exist
self._validate_doc_ids(doc_ids, doc_ids_neg=[])
# get document indexes from ids
doc_indexes = self._get_document_indexes(doc_ids)
if num_topics == 1:
if reduced:
doc_topics = self.doc_top_reduced[doc_indexes]
doc_dist = self.doc_dist_reduced[doc_indexes]
topic_words = self.topic_words_reduced[doc_topics]
topic_word_scores = self.topic_word_scores_reduced[doc_topics]
else:
doc_topics = self.doc_top[doc_indexes]
doc_dist = self.doc_dist[doc_indexes]
topic_words = self.topic_words[doc_topics]
topic_word_scores = self.topic_word_scores[doc_topics]
else:
if reduced:
topic_vectors = self.topic_vectors_reduced
else:
topic_vectors = self.topic_vectors
doc_topics, doc_dist = self._calculate_documents_topic(topic_vectors,
self._get_document_vectors()[doc_indexes],
num_topics=num_topics)
topic_words = np.array([self.topic_words[topics] for topics in doc_topics])
topic_word_scores = np.array([self.topic_word_scores[topics] for topics in doc_topics])
return doc_topics, doc_dist, topic_words, topic_word_scores
def add_documents(self, documents, doc_ids=None, tokenizer=None, use_embedding_model_tokenizer=False):
"""
Update the model with new documents.
The documents will be added to the current model without changing
existing document, word and topic vectors. Topic sizes will be updated.
If adding a large quantity of documents relative to the current model
size, or documents containing a largely new vocabulary, a new model
should be trained for best results.
Parameters
----------
documents: List of str
doc_ids: List of str, int (Optional)
Only required when doc_ids were given to the original model.
A unique value per document that will be used for referring to
documents in search results.
tokenizer: callable (Optional, default None)
Override the default tokenization method. If None then
gensim.utils.simple_preprocess will be used.
use_embedding_model_tokenizer: bool (Optional, default False)
If using an embedding model other than doc2vec, use the model's
tokenizer for document embedding.
"""
# if tokenizer is not passed use default
if tokenizer is None:
tokenizer = default_tokenizer
# add documents
self._validate_documents(documents)
if self.documents is not None:
self.documents = np.append(self.documents, documents)
# add document ids
if self.document_ids_provided is True:
self._validate_document_ids_add_doc(documents, doc_ids)
doc_ids_len = len(self.document_ids)
self.document_ids = np.append(self.document_ids, doc_ids)
self.doc_id2index.update(dict(zip(doc_ids, list(range(doc_ids_len, doc_ids_len + len(doc_ids))))))
elif doc_ids is None:
num_docs = len(documents)
start_id = max(self.document_ids) + 1
doc_ids = list(range(start_id, start_id + num_docs))
doc_ids_len = len(self.document_ids)
self.document_ids = np.append(self.document_ids, doc_ids)
self.doc_id2index.update(dict(zip(doc_ids, list(range(doc_ids_len, doc_ids_len + len(doc_ids))))))
else:
raise ValueError("doc_ids cannot be used because they were not provided to model during training.")
if self.embedding_model == "doc2vec":
docs_processed = [tokenizer(doc) for doc in documents]
document_vectors = np.vstack([self.model.infer_vector(doc_words=doc,
alpha=0.025,
min_alpha=0.01,
epochs=100) for doc in docs_processed])
num_docs = len(documents)
self.model.docvecs.count += num_docs
self.model.docvecs.max_rawint += num_docs
self.model.docvecs.vectors_docs_norm = None
self._set_document_vectors(np.vstack([self._get_document_vectors(norm=False), document_vectors]))
self.model.docvecs.init_sims()
document_vectors = self._l2_normalize(document_vectors)
else:
if use_embedding_model_tokenizer:
docs_training = documents
else:
docs_processed = [tokenizer(doc) for doc in documents]
docs_training = [' '.join(doc) for doc in docs_processed]
document_vectors = self._embed_documents(docs_training)
self._set_document_vectors(np.vstack([self._get_document_vectors(), document_vectors]))
# update index
if self.documents_indexed:
# update capacity of index
current_max = self.document_index.get_max_elements()
updated_max = current_max + len(documents)
self.document_index.resize_index(updated_max)
# update index_id and doc_ids
start_index_id = max(self.index_id2doc_id.keys()) + 1
new_index_ids = list(range(start_index_id, start_index_id + len(doc_ids)))
self.index_id2doc_id.update(dict(zip(new_index_ids, doc_ids)))
self.doc_id2index_id.update(dict(zip(doc_ids, new_index_ids)))
self.document_index.add_items(document_vectors, new_index_ids)
# update topics
self._assign_documents_to_topic(document_vectors, hierarchy=False)
if self.hierarchy is not None:
self._assign_documents_to_topic(document_vectors, hierarchy=True)
def delete_documents(self, doc_ids):
"""
Delete documents from current model.
Warning: If document ids were not used in original model, deleting
documents will change the indexes and therefore doc_ids.
The documents will be deleted from the current model without changing
existing document, word and topic vectors. Topic sizes will be updated.
If deleting a large quantity of documents relative to the current model
size a new model should be trained for best results.
Parameters
----------
doc_ids: List of str, int
A unique value per document that is used for referring to documents
in search results.
"""
# make sure documents exist
self._validate_doc_ids(doc_ids, doc_ids_neg=[])
# update index
if self.documents_indexed:
# delete doc_ids from index
index_ids = [self.doc_id2index_id(doc_id) for doc_id in doc_ids]
for index_id in index_ids:
self.document_index.mark_deleted(index_id)
# update index_id and doc_ids
for doc_id in doc_ids:
self.doc_id2index_id.pop(doc_id)
for index_id in index_ids:
self.index_id2doc_id.pop(index_id)
# get document indexes from ids
doc_indexes = self._get_document_indexes(doc_ids)
# delete documents
if self.documents is not None:
self.documents = np.delete(self.documents, doc_indexes, 0)
# delete document ids
if self.document_ids is not None:
for doc_id in doc_ids:
self.doc_id2index.pop(doc_id)
keys = list(self.doc_id2index.keys())
self.document_ids = np.array(keys)
values = list(range(0, len(self.doc_id2index.values())))
self.doc_id2index = dict(zip(keys, values))
# delete document vectors
self._set_document_vectors(np.delete(self._get_document_vectors(norm=False), doc_indexes, 0))
if self.embedding_model == 'doc2vec':
num_docs = len(doc_indexes)
self.model.docvecs.count -= num_docs
self.model.docvecs.max_rawint -= num_docs
self.model.docvecs.vectors_docs_norm = None
self.model.docvecs.init_sims()
# update topics
self._unassign_documents_from_topic(doc_indexes, hierarchy=False)
if self.hierarchy is not None:
self._unassign_documents_from_topic(doc_indexes, hierarchy=True)
def get_num_topics(self, reduced=False):
"""
Get number of topics.
This is the number of topics Top2Vec has found in the data by default.
If reduced is True, the number of reduced topics is returned.
Parameters
----------
reduced: bool (Optional, default False)
The number of original topics will be returned by default. If True
will return the number of reduced topics, if hierarchical topic
reduction has been performed.
Returns
-------
num_topics: int
"""
if reduced:
self._validate_hierarchical_reduction()
return len(self.topic_vectors_reduced)
else:
return len(self.topic_vectors)
def get_topic_sizes(self, reduced=False):
"""
Get topic sizes.
The number of documents most similar to each topic. Topics are
in increasing order of size.
The sizes of the original topics is returned unless reduced=True,
in which case the sizes of the reduced topics will be returned.
Parameters
----------
reduced: bool (Optional, default False)
Original topic sizes are returned by default. If True the
reduced topic sizes will be returned.
Returns
-------
topic_sizes: array of int, shape(num_topics)
The number of documents most similar to the topic.
topic_nums: array of int, shape(num_topics)
The unique number of every topic will be returned.
"""
if reduced:
self._validate_hierarchical_reduction()
return np.array(self.topic_sizes_reduced.values), np.array(self.topic_sizes_reduced.index)
else:
return np.array(self.topic_sizes.values), np.array(self.topic_sizes.index)
def get_topics(self, num_topics=None, reduced=False):
"""
Get topics, ordered by decreasing size. All topics are returned
if num_topics is not specified.
The original topics found are returned unless reduced=True,
in which case reduced topics will be returned.
Each topic will consist of the top 50 semantically similar words
to the topic. These are the 50 words closest to topic vector
along with cosine similarity of each word from vector. The
higher the score the more relevant the word is to the topic.
Parameters
----------
num_topics: int, (Optional)
Number of topics to return.
reduced: bool (Optional, default False)
Original topics are returned by default. If True the
reduced topics will be returned.
Returns
-------
topics_words: array of shape(num_topics, 50)
For each topic the top 50 words are returned, in order
of semantic similarity to topic.
Example:
[['data', 'deep', 'learning' ... 'artificial'], <Topic 0>
['environment', 'warming', 'climate ... 'temperature'] <Topic 1>
...]
word_scores: array of shape(num_topics, 50)
For each topic the cosine similarity scores of the
top 50 words to the topic are returned.
Example:
[[0.7132, 0.6473, 0.5700 ... 0.3455], <Topic 0>
[0.7818', 0.7671, 0.7603 ... 0.6769] <Topic 1>
...]
topic_nums: array of int, shape(num_topics)
The unique number of every topic will be returned.
"""
if reduced:
self._validate_hierarchical_reduction()
if num_topics is None:
num_topics = len(self.topic_vectors_reduced)
else:
self._validate_num_topics(num_topics, reduced)
return self.topic_words_reduced[0:num_topics], self.topic_word_scores_reduced[0:num_topics], np.array(
range(0, num_topics))
else:
if num_topics is None:
num_topics = len(self.topic_vectors)
else:
self._validate_num_topics(num_topics, reduced)
return self.topic_words[0:num_topics], self.topic_word_scores[0:num_topics], np.array(range(0, num_topics))
def get_topic_hierarchy(self):
"""
Get the hierarchy of reduced topics. The mapping of each original topic
to the reduced topics is returned.
Hierarchical topic reduction must be performed before calling this
method.
Returns
-------
hierarchy: list of ints
Each index of the hierarchy corresponds to the topic number of a
reduced topic. For each reduced topic the topic numbers of the
original topics that were merged to create it are listed.
Example:
[[3] <Reduced Topic 0> contains original Topic 3
[2,4] <Reduced Topic 1> contains original Topics 2 and 4
[0,1] <Reduced Topic 3> contains original Topics 0 and 1
...]
"""
self._validate_hierarchical_reduction()
return self.hierarchy
def hierarchical_topic_reduction(self, num_topics):
"""
Reduce the number of topics discovered by Top2Vec.
The most representative topics of the corpus will be found, by
iteratively merging each smallest topic to the most similar topic until
num_topics is reached.
Parameters
----------
num_topics: int
The number of topics to reduce to.
Returns
-------
hierarchy: list of ints
Each index of hierarchy corresponds to the reduced topics, for each
reduced topic the indexes of the original topics that were merged
to create it are listed.
Example:
[[3] <Reduced Topic 0> contains original Topic 3
[2,4] <Reduced Topic 1> contains original Topics 2 and 4
[0,1] <Reduced Topic 3> contains original Topics 0 and 1
...]
"""
self._validate_hierarchical_reduction_num_topics(num_topics)
num_topics_current = self.topic_vectors.shape[0]
top_vecs = self.topic_vectors
top_sizes = [self.topic_sizes[i] for i in range(0, len(self.topic_sizes))]
hierarchy = [[i] for i in range(self.topic_vectors.shape[0])]
count = 0
interval = max(int(self._get_document_vectors().shape[0] / 50000), 1)
while num_topics_current > num_topics:
# find smallest and most similar topics
smallest = np.argmin(top_sizes)
res = np.inner(top_vecs[smallest], top_vecs)
sims = np.flip(np.argsort(res))
most_sim = sims[1]
if most_sim == smallest:
most_sim = sims[0]
# calculate combined topic vector
top_vec_smallest = top_vecs[smallest]
smallest_size = top_sizes[smallest]
top_vec_most_sim = top_vecs[most_sim]
most_sim_size = top_sizes[most_sim]
combined_vec = self._l2_normalize(((top_vec_smallest * smallest_size) +
(top_vec_most_sim * most_sim_size)) / (smallest_size + most_sim_size))
# update topic vectors
ix_keep = list(range(len(top_vecs)))
ix_keep.remove(smallest)
ix_keep.remove(most_sim)
top_vecs = top_vecs[ix_keep]
top_vecs = np.vstack([top_vecs, combined_vec])
num_topics_current = top_vecs.shape[0]
# update topics sizes
if count % interval == 0:
doc_top = self._calculate_documents_topic(topic_vectors=top_vecs,
document_vectors=self._get_document_vectors(),
dist=False)
topic_sizes = pd.Series(doc_top).value_counts()
top_sizes = [topic_sizes[i] for i in range(0, len(topic_sizes))]
else:
smallest_size = top_sizes.pop(smallest)
if most_sim < smallest:
most_sim_size = top_sizes.pop(most_sim)
else:
most_sim_size = top_sizes.pop(most_sim - 1)
combined_size = smallest_size + most_sim_size
top_sizes.append(combined_size)
count += 1
# update topic hierarchy
smallest_inds = hierarchy.pop(smallest)
if most_sim < smallest:
most_sim_inds = hierarchy.pop(most_sim)
else:
most_sim_inds = hierarchy.pop(most_sim - 1)
combined_inds = smallest_inds + most_sim_inds
hierarchy.append(combined_inds)
# re-calculate topic vectors from clusters
doc_top = self._calculate_documents_topic(topic_vectors=top_vecs,
document_vectors=self._get_document_vectors(),
dist=False)
self.topic_vectors_reduced = self._l2_normalize(np.vstack([self._get_document_vectors()
[np.where(doc_top == label)[0]]
.mean(axis=0) for label in set(doc_top)]))
self.hierarchy = hierarchy
# assign documents to topic
self.doc_top_reduced, self.doc_dist_reduced = self._calculate_documents_topic(self.topic_vectors_reduced,
self._get_document_vectors())
# find topic words and scores
self.topic_words_reduced, self.topic_word_scores_reduced = self._find_topic_words_and_scores(
topic_vectors=self.topic_vectors_reduced)
# calculate topic sizes
self.topic_sizes_reduced = self._calculate_topic_sizes(hierarchy=True)
# re-order topics
self._reorder_topics(hierarchy=True)
return self.hierarchy
def query_documents(self, query, num_docs, return_documents=True, use_index=False, ef=None, tokenizer=None):
"""
Semantic search of documents using a text query.
The most semantically similar documents to the query will be returned.
Parameters
----------
query: string
Any sequence of text. This could be an actual question, a sentence,
a paragraph or a document.
num_docs: int
Number of documents to return.
return_documents: bool (Optional default True)
Determines if the documents will be returned. If they were not
saved in the model they will not be returned.
use_index: bool (Optional default False)
If index_documents method has been called, setting this to True
will speed up search for models with large number of documents.
ef: int (Optional default None)
Higher ef leads to more accurate but slower search. This value
must be higher than num_docs.
For more information see:
https://github.com/nmslib/hnswlib/blob/master/ALGO_PARAMS.md
tokenizer: callable (Optional, default None)
** For doc2vec embedding model only **
Override the default tokenization method. If None then
gensim.utils.simple_preprocess will be used.
Returns
-------
documents: (Optional) array of str, shape(num_docs)
The documents in a list, the most similar are first.
Will only be returned if the documents were saved and if
return_documents is set to True.
doc_scores: array of float, shape(num_docs)
Semantic similarity of document to vector. The cosine similarity of
the document and vector.
doc_ids: array of int, shape(num_docs)
Unique ids of documents. If ids were not given to the model, the
index of the document in the model will be returned.
"""
self._validate_query(query)
self._validate_num_docs(num_docs)
if self.embedding_model != "doc2vec":
query_vec = self._embed_query(query)
else:
# if tokenizer is not passed use default
if tokenizer is None:
tokenizer = default_tokenizer
tokenized_query = tokenizer(query)
query_vec = self.model.infer_vector(doc_words=tokenized_query,
alpha=0.025,
min_alpha=0.01,
epochs=100)
return self.search_documents_by_vector(query_vec, num_docs, return_documents=return_documents,
use_index=use_index, ef=ef)
def query_topics(self, query, num_topics, reduced=False, tokenizer=None):
"""
Semantic search of topics using text query.
These are the topics closest to the vector. Topics are ordered by
proximity to the vector. Successive topics in the list are less
semantically similar to the vector.
Parameters
----------
query: string
Any sequence of text. This could be an actual question, a sentence,
a paragraph or a document.
num_topics: int
Number of documents to return.
reduced: bool (Optional, default False)
Original topics are searched by default. If True the
reduced topics will be searched.
tokenizer: callable (Optional, default None)
** For doc2vec embedding model only **
Override the default tokenization method. If None then
gensim.utils.simple_preprocess will be used.
Returns
-------
topics_words: array of shape (num_topics, 50)
For each topic the top 50 words are returned, in order of semantic
similarity to topic.
Example:
[['data', 'deep', 'learning' ... 'artificial'], <Topic 0>
['environment', 'warming', 'climate ... 'temperature'] <Topic 1>
...]
word_scores: array of shape (num_topics, 50)
For each topic the cosine similarity scores of the top 50 words
to the topic are returned.
Example:
[[0.7132, 0.6473, 0.5700 ... 0.3455], <Topic 0>
[0.7818', 0.7671, 0.7603 ... 0.6769] <Topic 1>
...]
topic_scores: array of float, shape(num_topics)
For each topic the cosine similarity to the search keywords will be
returned.
topic_nums: array of int, shape(num_topics)
The unique number of every topic will be returned.
"""
self._validate_query(query)
if self.embedding_model != "doc2vec":
query_vec = self._embed_query(query)
else:
# if tokenizer is not passed use default
if tokenizer is None:
tokenizer = default_tokenizer
tokenized_query = tokenizer(query)
query_vec = self.model.infer_vector(doc_words=tokenized_query,
alpha=0.025,
min_alpha=0.01,
epochs=100)
return self.search_topics_by_vector(query_vec, num_topics=num_topics, reduced=reduced)
def search_documents_by_vector(self, vector, num_docs, return_documents=True, use_index=False, ef=None):
"""
Semantic search of documents using a vector.
These are the documents closest to the vector. Documents are
ordered by proximity to the vector. Successive documents in the
list are less semantically similar to the vector.
Parameters
----------
vector: array of shape(vector dimension, 1)
The vector dimension should be the same as the vectors in
the topic_vectors variable. (i.e. model.topic_vectors.shape[1])
num_docs: int
Number of documents to return.
return_documents: bool (Optional default True)
Determines if the documents will be returned. If they were not
saved in the model they will not be returned.
use_index: bool (Optional default False)
If index_documents method has been called, setting this to True
will speed up search for models with large number of documents.
ef: int (Optional default None)
Higher ef leads to more accurate but slower search. This value
must be higher than num_docs.
For more information see:
https://github.com/nmslib/hnswlib/blob/master/ALGO_PARAMS.md
Returns
-------
documents: (Optional) array of str, shape(num_docs)
The documents in a list, the most similar are first.
Will only be returned if the documents were saved and if
return_documents is set to True.
doc_scores: array of float, shape(num_docs)
Semantic similarity of document to vector. The cosine similarity of
the document and vector.
doc_ids: array of int, shape(num_docs)
Unique ids of documents. If ids were not given to the model, the
index of the document in the model will be returned.
"""
self._validate_vector(vector)
self._validate_num_docs(num_docs)
vector = self._l2_normalize(vector)
if use_index:
self._check_document_index_status()
if ef is not None:
self.document_index.set_ef(ef)
else:
self.document_index.set_ef(num_docs)
index_ids, doc_scores = self.document_index.knn_query(vector, k=num_docs)
index_ids = index_ids[0]
doc_ids = np.array([self.index_id2doc_id[index_id] for index_id in index_ids])
doc_scores = doc_scores[0]
doc_scores = np.array([1 - score for score in doc_scores])
doc_indexes = self._get_document_indexes(doc_ids)
else:
doc_indexes, doc_scores = self._search_vectors_by_vector(self._get_document_vectors(),
vector, num_docs)
doc_ids = self._get_document_ids(doc_indexes)
if self.documents is not None and return_documents:
documents = self.documents[doc_indexes]
return documents, doc_scores, doc_ids
else:
return doc_scores, doc_ids
def search_words_by_vector(self, vector, num_words, use_index=False, ef=None):
"""
Semantic search of words using a vector.
These are the words closest to the vector. Words are ordered by
proximity to the vector. Successive words in the list are less
semantically similar to the vector.
Parameters
----------
vector: array of shape(vector dimension, 1)
The vector dimension should be the same as the vectors in
the topic_vectors variable. (i.e. model.topic_vectors.shape[1])
num_words: int
Number of words to return.
use_index: bool (Optional default False)
If index_words method has been called, setting this to True will
speed up search for models with large number of words.
ef: int (Optional default None)
Higher ef leads to more accurate but slower search. This value
must be higher than num_docs.
For more information see:
https://github.com/nmslib/hnswlib/blob/master/ALGO_PARAMS.md
Returns
-------
words: array of str, shape(num_words)
The words in a list, the most similar are first.
word_scores: array of float, shape(num_words)
Semantic similarity of word to vector. The cosine similarity of
the word and vector.
"""
self._validate_vector(vector)
vector = self._l2_normalize(vector)
if use_index:
self._check_word_index_status()
if ef is not None:
self.word_index.set_ef(ef)
else:
self.word_index.set_ef(num_words)
word_indexes, word_scores = self.word_index.knn_query(vector, k=num_words)
word_indexes = word_indexes[0]
word_scores = word_scores[0]
word_scores = np.array([1 - score for score in word_scores])
else:
word_indexes, word_scores = self._search_vectors_by_vector(self._get_word_vectors(),
vector, num_words)
words = np.array([self._index2word(index) for index in word_indexes])
return words, word_scores
def search_topics_by_vector(self, vector, num_topics, reduced=False):
"""
Semantic search of topics using keywords.
These are the topics closest to the vector. Topics are ordered by
proximity to the vector. Successive topics in the list are less
semantically similar to the vector.
Parameters
----------
vector: array of shape(vector dimension, 1)
The vector dimension should be the same as the vectors in
the topic_vectors variable. (i.e. model.topic_vectors.shape[1])
num_topics: int
Number of documents to return.
reduced: bool (Optional, default False)
Original topics are searched by default. If True the
reduced topics will be searched.
Returns
-------
topics_words: array of shape (num_topics, 50)
For each topic the top 50 words are returned, in order of semantic
similarity to topic.
Example:
[['data', 'deep', 'learning' ... 'artificial'], <Topic 0>
['environment', 'warming', 'climate ... 'temperature'] <Topic 1>
...]
word_scores: array of shape (num_topics, 50)
For each topic the cosine similarity scores of the top 50 words
to the topic are returned.
Example:
[[0.7132, 0.6473, 0.5700 ... 0.3455], <Topic 0>
[0.7818', 0.7671, 0.7603 ... 0.6769] <Topic 1>
...]
topic_scores: array of float, shape(num_topics)
For each topic the cosine similarity to the search keywords will be
returned.
topic_nums: array of int, shape(num_topics)
The unique number of every topic will be returned.
"""
self._validate_vector(vector)
self._validate_num_topics(num_topics, reduced)
vector = self._l2_normalize(vector)
if reduced:
self._validate_hierarchical_reduction()
topic_nums, topic_scores = self._search_vectors_by_vector(self.topic_vectors_reduced,
vector, num_topics)
topic_words = [self.topic_words_reduced[topic] for topic in topic_nums]
word_scores = [self.topic_word_scores_reduced[topic] for topic in topic_nums]
else:
topic_nums, topic_scores = self._search_vectors_by_vector(self.topic_vectors,
vector, num_topics)
topic_words = [self.topic_words[topic] for topic in topic_nums]
word_scores = [self.topic_word_scores[topic] for topic in topic_nums]
return topic_words, word_scores, topic_scores, topic_nums
def search_documents_by_topic(self, topic_num, num_docs, return_documents=True, reduced=False):
"""
Get the most semantically similar documents to the topic.
These are the documents closest to the topic vector. Documents are
ordered by proximity to the topic vector. Successive documents in the
list are less semantically similar to the topic.
Parameters
----------
topic_num: int
The topic number to search.
num_docs: int
Number of documents to return.
return_documents: bool (Optional default True)
Determines if the documents will be returned. If they were not
saved in the model they will not be returned.
reduced: bool (Optional, default False)
Original topics are used to search by default. If True the
reduced topics will be used.
Returns
-------
documents: (Optional) array of str, shape(num_docs)
The documents in a list, the most similar are first.
Will only be returned if the documents were saved and if
return_documents is set to True.
doc_scores: array of float, shape(num_docs)
Semantic similarity of document to topic. The cosine similarity of
the document and topic vector.
doc_ids: array of int, shape(num_docs)
Unique ids of documents. If ids were not given to the model, the
index of the document in the model will be returned.
"""
if reduced:
self._validate_hierarchical_reduction()
self._validate_topic_num(topic_num, reduced)
self._validate_topic_search(topic_num, num_docs, reduced)
topic_document_indexes = np.where(self.doc_top_reduced == topic_num)[0]
topic_document_indexes_ordered = np.flip(np.argsort(self.doc_dist_reduced[topic_document_indexes]))
doc_indexes = topic_document_indexes[topic_document_indexes_ordered][0:num_docs]
doc_scores = self.doc_dist_reduced[doc_indexes]
doc_ids = self._get_document_ids(doc_indexes)
else:
self._validate_topic_num(topic_num, reduced)
self._validate_topic_search(topic_num, num_docs, reduced)
topic_document_indexes = np.where(self.doc_top == topic_num)[0]
topic_document_indexes_ordered = np.flip(np.argsort(self.doc_dist[topic_document_indexes]))
doc_indexes = topic_document_indexes[topic_document_indexes_ordered][0:num_docs]
doc_scores = self.doc_dist[doc_indexes]
doc_ids = self._get_document_ids(doc_indexes)
if self.documents is not None and return_documents:
documents = self.documents[doc_indexes]
return documents, doc_scores, doc_ids
else:
return doc_scores, doc_ids
def search_documents_by_keywords(self, keywords, num_docs, keywords_neg=None, return_documents=True,
use_index=False, ef=None):
"""
Semantic search of documents using keywords.
The most semantically similar documents to the combination of the
keywords will be returned. If negative keywords are provided, the
documents will be semantically dissimilar to those words. Too many
keywords or certain combinations of words may give strange results.
This method finds an average vector(negative keywords are subtracted)
of all the keyword vectors and returns the documents closest to the
resulting vector.
Parameters
----------
keywords: List of str
List of positive keywords being used for search of semantically
similar documents.
keywords_neg: List of str (Optional)
List of negative keywords being used for search of semantically
dissimilar documents.
num_docs: int
Number of documents to return.
return_documents: bool (Optional default True)
Determines if the documents will be returned. If they were not
saved in the model they will also not be returned.
use_index: bool (Optional default False)
If index_documents method has been called, setting this to True
will speed up search for models with large number of documents.
ef: int (Optional default None)
Higher ef leads to more accurate but slower search. This value
must be higher than num_docs.
For more information see:
https://github.com/nmslib/hnswlib/blob/master/ALGO_PARAMS.md
Returns
-------
documents: (Optional) array of str, shape(num_docs)
The documents in a list, the most similar are first.
Will only be returned if the documents were saved and if
return_documents is set to True.
doc_scores: array of float, shape(num_docs)
Semantic similarity of document to keywords. The cosine similarity
of the document and average of keyword vectors.
doc_ids: array of int, shape(num_docs)
Unique ids of documents. If ids were not given to the model, the
index of the document in the model will be returned.
"""
if keywords_neg is None:
keywords_neg = []
self._validate_num_docs(num_docs)
keywords, keywords_neg = self._validate_keywords(keywords, keywords_neg)
word_vecs = self._words2word_vectors(keywords)
neg_word_vecs = self._words2word_vectors(keywords_neg)
if use_index:
self._check_document_index_status()
combined_vector = self._get_combined_vec(word_vecs, neg_word_vecs)
return self.search_documents_by_vector(combined_vector, num_docs, return_documents=return_documents,
use_index=True, ef=ef)
if self.embedding_model == 'doc2vec':
sim_docs = self.model.docvecs.most_similar(positive=word_vecs,
negative=neg_word_vecs,
topn=num_docs)
doc_indexes = [doc[0] for doc in sim_docs]
doc_scores = np.array([doc[1] for doc in sim_docs])
else:
combined_vector = self._get_combined_vec(word_vecs, neg_word_vecs)
doc_indexes, doc_scores = self._search_vectors_by_vector(self._get_document_vectors(),
combined_vector, num_docs)
doc_ids = self._get_document_ids(doc_indexes)
if self.documents is not None and return_documents:
documents = self.documents[doc_indexes]
return documents, doc_scores, doc_ids
else:
return doc_scores, doc_ids
def similar_words(self, keywords, num_words, keywords_neg=None, use_index=False, ef=None):
"""
Semantic similarity search of words.
The most semantically similar word to the combination of the keywords
will be returned. If negative keywords are provided, the words will be
semantically dissimilar to those words. Too many keywords or certain
combinations of words may give strange results. This method finds an
average vector(negative keywords are subtracted) of all the keyword
vectors and returns the words closest to the resulting vector.
Parameters
----------
keywords: List of str
List of positive keywords being used for search of semantically
similar words.
keywords_neg: List of str
List of negative keywords being used for search of semantically
dissimilar words.
num_words: int
Number of words to return.
use_index: bool (Optional default False)
If index_words method has been called, setting this to True will
speed up search for models with large number of words.
ef: int (Optional default None)
Higher ef leads to more accurate but slower search. This value
must be higher than num_docs.
For more information see:
https://github.com/nmslib/hnswlib/blob/master/ALGO_PARAMS.md
Returns
-------
words: array of str, shape(num_words)
The words in a list, the most similar are first.
word_scores: array of float, shape(num_words)
Semantic similarity of word to keywords. The cosine similarity of
the word and average of keyword vectors.
"""
if keywords_neg is None:
keywords_neg = []
keywords, keywords_neg = self._validate_keywords(keywords, keywords_neg)
word_vecs = self._words2word_vectors(keywords)
neg_word_vecs = self._words2word_vectors(keywords_neg)
combined_vector = self._get_combined_vec(word_vecs, neg_word_vecs)
num_res = min(num_words + len(keywords) + len(keywords_neg), self._get_word_vectors().shape[0])
# if use_index:
words, word_scores = self.search_words_by_vector(vector=combined_vector,
num_words=num_res,
use_index=use_index,
ef=ef)
res_indexes = [index for index, word in enumerate(words)
if word not in list(keywords) + list(keywords_neg)][:num_words]
words = words[res_indexes]
word_scores = word_scores[res_indexes]
return words, word_scores
def search_topics(self, keywords, num_topics, keywords_neg=None, reduced=False):
"""
Semantic search of topics using keywords.
The most semantically similar topics to the combination of the keywords
will be returned. If negative keywords are provided, the topics will be
semantically dissimilar to those words. Topics will be ordered by
decreasing similarity to the keywords. Too many keywords or certain
combinations of words may give strange results. This method finds an
average vector(negative keywords are subtracted) of all the keyword
vectors and returns the topics closest to the resulting vector.
Parameters
----------
keywords: List of str
List of positive keywords being used for search of semantically
similar documents.
keywords_neg: (Optional) List of str
List of negative keywords being used for search of semantically
dissimilar documents.
num_topics: int
Number of documents to return.
reduced: bool (Optional, default False)
Original topics are searched by default. If True the
reduced topics will be searched.
Returns
-------
topics_words: array of shape (num_topics, 50)
For each topic the top 50 words are returned, in order of semantic
similarity to topic.
Example:
[['data', 'deep', 'learning' ... 'artificial'], <Topic 0>
['environment', 'warming', 'climate ... 'temperature'] <Topic 1>
...]
word_scores: array of shape (num_topics, 50)
For each topic the cosine similarity scores of the top 50 words
to the topic are returned.
Example:
[[0.7132, 0.6473, 0.5700 ... 0.3455], <Topic 0>
[0.7818', 0.7671, 0.7603 ... 0.6769] <Topic 1>
...]
topic_scores: array of float, shape(num_topics)
For each topic the cosine similarity to the search keywords will be
returned.
topic_nums: array of int, shape(num_topics)
The unique number of every topic will be returned.
"""
if keywords_neg is None:
keywords_neg = []
keywords, keywords_neg = self._validate_keywords(keywords, keywords_neg)
word_vecs = self._words2word_vectors(keywords)
neg_word_vecs = self._words2word_vectors(keywords_neg)
combined_vector = self._get_combined_vec(word_vecs, neg_word_vecs)
return self.search_topics_by_vector(combined_vector, num_topics=num_topics, reduced=reduced)
def search_documents_by_documents(self, doc_ids, num_docs, doc_ids_neg=None, return_documents=True,
use_index=False, ef=None):
"""
Semantic similarity search of documents.
The most semantically similar documents to the semantic combination of
document ids provided will be returned. If negative document ids are
provided, the documents will be semantically dissimilar to those
document ids. Documents will be ordered by decreasing similarity. This
method finds the closest document vectors to the provided documents
averaged.
Parameters
----------
doc_ids: List of int, str
Unique ids of document. If ids were not given, the index of
document in the original corpus.
doc_ids_neg: (Optional) List of int, str
Unique ids of document. If ids were not given, the index of
document in the original corpus.
num_docs: int
Number of documents to return.
return_documents: bool (Optional default True)
Determines if the documents will be returned. If they were not
saved in the model they will also not be returned.
use_index: bool (Optional default False)
If index_documents method has been called, setting this to True
will speed up search for models with large number of documents.
ef: int (Optional default None)
Higher ef leads to more accurate but slower search. This value
must be higher than num_docs.
For more information see:
https://github.com/nmslib/hnswlib/blob/master/ALGO_PARAMS.md
Returns
-------
documents: (Optional) array of str, shape(num_docs)
The documents in a list, the most similar are first.
Will only be returned if the documents were saved and if
return_documents is set to True.
doc_scores: array of float, shape(num_docs)
Semantic similarity of document to keywords. The cosine similarity
of the document and average of keyword vectors.
doc_ids: array of int, shape(num_docs)
Unique ids of documents. If ids were not given to the model, the
index of the document in the model will be returned.
"""
if doc_ids_neg is None:
doc_ids_neg = []
self._validate_num_docs(num_docs)
self._validate_doc_ids(doc_ids, doc_ids_neg)
doc_indexes = self._get_document_indexes(doc_ids)
doc_indexes_neg = self._get_document_indexes(doc_ids_neg)
if use_index:
self._check_document_index_status()
document_vectors = self._get_document_vectors()
doc_vecs = [document_vectors[ind] for ind in doc_indexes]
doc_vecs_neg = [document_vectors[ind] for ind in doc_indexes_neg]
combined_vector = self._get_combined_vec(doc_vecs, doc_vecs_neg)
return self.search_documents_by_vector(combined_vector, num_docs, return_documents=return_documents,
use_index=True, ef=ef)
if self.embedding_model == 'doc2vec':
sim_docs = self.model.docvecs.most_similar(positive=doc_indexes,
negative=doc_indexes_neg,
topn=num_docs)
doc_indexes = [doc[0] for doc in sim_docs]
doc_scores = np.array([doc[1] for doc in sim_docs])
else:
doc_vecs = [self.document_vectors[ind] for ind in doc_indexes]
doc_vecs_neg = [self.document_vectors[ind] for ind in doc_indexes_neg]
combined_vector = self._get_combined_vec(doc_vecs, doc_vecs_neg)
num_res = min(num_docs + len(doc_indexes) + len(doc_indexes_neg),
self._get_document_vectors().shape[0])
# don't return documents that were searched
search_doc_indexes = list(doc_indexes) + list(doc_indexes_neg)
doc_indexes, doc_scores = self._search_vectors_by_vector(self._get_document_vectors(),
combined_vector, num_res)
res_indexes = [index for index, doc_ind in enumerate(doc_indexes)
if doc_ind not in search_doc_indexes][:num_docs]
doc_indexes = doc_indexes[res_indexes]
doc_scores = doc_scores[res_indexes]
doc_ids = self._get_document_ids(doc_indexes)
if self.documents is not None and return_documents:
documents = self.documents[doc_indexes]
return documents, doc_scores, doc_ids
else:
return doc_scores, doc_ids
def generate_topic_wordcloud(self, topic_num, background_color="black", reduced=False):
"""
Create a word cloud for a topic.
A word cloud will be generated and displayed. The most semantically
similar words to the topic will have the largest size, less similar
words will be smaller. The size is determined using the cosine distance
of the word vectors from the topic vector.
Parameters
----------
topic_num: int
The topic number to search.
background_color : str (Optional, default='white')
Background color for the word cloud image. Suggested options are:
* white
* black
reduced: bool (Optional, default False)
Original topics are used by default. If True the
reduced topics will be used.
Returns
-------
A matplotlib plot of the word cloud with the topic number will be
displayed.
"""
if reduced:
self._validate_hierarchical_reduction()
self._validate_topic_num(topic_num, reduced)
word_score_dict = dict(zip(self.topic_words_reduced[topic_num],
softmax(self.topic_word_scores_reduced[topic_num])))
else:
self._validate_topic_num(topic_num, reduced)
word_score_dict = dict(zip(self.topic_words[topic_num],
softmax(self.topic_word_scores[topic_num])))
plt.figure(figsize=(16, 4),
dpi=200)
plt.axis("off")
plt.imshow(
WordCloud(width=1600,
height=400,
background_color=background_color,
font_path='B Zar.ttf').generate_from_frequencies(word_score_dict))
plt.title("Topic " + str(topic_num), loc='left', fontsize=25, pad=20)
| 40.419794 | 120 | 0.613357 |
import logging
import numpy as np
import pandas as pd
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from gensim.utils import simple_preprocess
from gensim.parsing.preprocessing import strip_tags
import umap
import hdbscan
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from joblib import dump, load
from sklearn.cluster import dbscan
import tempfile
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import normalize
from scipy.special import softmax
try:
import hnswlib
_HAVE_HNSWLIB = True
except ImportError:
_HAVE_HNSWLIB = False
try:
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_text
_HAVE_TENSORFLOW = True
except ImportError:
_HAVE_TENSORFLOW = False
try:
from sentence_transformers import SentenceTransformer
_HAVE_TORCH = True
except ImportError:
_HAVE_TORCH = False
logger = logging.getLogger('top2vec')
logger.setLevel(logging.WARNING)
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(sh)
def default_tokenizer(doc):
return simple_preprocess(strip_tags(doc), deacc=True)
class Top2Vec:
def __init__(self,
documents,
min_count=50,
embedding_model='doc2vec',
embedding_model_path=None,
speed='learn',
use_corpus_file=False,
document_ids=None,
keep_documents=True,
workers=None,
tokenizer=None,
use_embedding_model_tokenizer=False,
umap_args=None,
hdbscan_args=None,
verbose=True
):
if verbose:
logger.setLevel(logging.DEBUG)
self.verbose = True
else:
logger.setLevel(logging.WARNING)
self.verbose = False
if tokenizer is None:
tokenizer = default_tokenizer
if not (isinstance(documents, list) or isinstance(documents, np.ndarray)):
raise ValueError("Documents need to be a list of strings")
if not all((isinstance(doc, str) or isinstance(doc, np.str_)) for doc in documents):
raise ValueError("Documents need to be a list of strings")
if keep_documents:
self.documents = np.array(documents, dtype="object")
else:
self.documents = None
if document_ids is not None:
if not (isinstance(document_ids, list) or isinstance(document_ids, np.ndarray)):
raise ValueError("Documents ids need to be a list of str or int")
if len(documents) != len(document_ids):
raise ValueError("Document ids need to match number of documents")
elif len(document_ids) != len(set(document_ids)):
raise ValueError("Document ids need to be unique")
if all((isinstance(doc_id, str) or isinstance(doc_id, np.str_)) for doc_id in document_ids):
self.doc_id_type = np.str_
elif all((isinstance(doc_id, int) or isinstance(doc_id, np.int_)) for doc_id in document_ids):
self.doc_id_type = np.int_
else:
raise ValueError("Document ids need to be str or int")
self.document_ids_provided = True
self.document_ids = np.array(document_ids)
self.doc_id2index = dict(zip(document_ids, list(range(0, len(document_ids)))))
else:
self.document_ids_provided = False
self.document_ids = np.array(range(0, len(documents)))
self.doc_id2index = dict(zip(self.document_ids, list(range(0, len(self.document_ids)))))
self.doc_id_type = np.int_
acceptable_embedding_models = ["universal-sentence-encoder-multilingual",
"universal-sentence-encoder",
"distiluse-base-multilingual-cased"]
self.embedding_model_path = embedding_model_path
if embedding_model == 'doc2vec':
if speed == "fast-learn":
hs = 0
negative = 5
epochs = 40
elif speed == "learn":
hs = 1
negative = 0
epochs = 40
elif speed == "deep-learn":
hs = 1
negative = 0
epochs = 400
elif speed == "test-learn":
hs = 0
negative = 5
epochs = 1
else:
raise ValueError("speed parameter needs to be one of: fast-learn, learn or deep-learn")
if workers is None:
pass
elif isinstance(workers, int):
pass
else:
raise ValueError("workers needs to be an int")
doc2vec_args = {"vector_size": 300,
"min_count": min_count,
"window": 15,
"sample": 1e-5,
"negative": negative,
"hs": hs,
"epochs": epochs,
"dm": 0,
"dbow_words": 1}
if workers is not None:
doc2vec_args["workers"] = workers
logger.info('Pre-processing documents for training')
if use_corpus_file:
processed = [' '.join(tokenizer(doc)) for doc in documents]
lines = "\n".join(processed)
temp = tempfile.NamedTemporaryFile(mode='w+t')
temp.write(lines)
doc2vec_args["corpus_file"] = temp.name
else:
train_corpus = [TaggedDocument(tokenizer(doc), [i]) for i, doc in enumerate(documents)]
doc2vec_args["documents"] = train_corpus
logger.info('Creating joint document/word embedding')
self.embedding_model = 'doc2vec'
self.model = Doc2Vec(**doc2vec_args)
if use_corpus_file:
temp.close()
elif embedding_model in acceptable_embedding_models:
self.embed = None
self.embedding_model = embedding_model
self._check_import_status()
logger.info('Pre-processing documents for training')
tokenized_corpus = [tokenizer(doc) for doc in documents]
def return_doc(doc):
return doc
vectorizer = CountVectorizer(tokenizer=return_doc, preprocessor=return_doc)
doc_word_counts = vectorizer.fit_transform(tokenized_corpus)
words = vectorizer.get_feature_names()
word_counts = np.array(np.sum(doc_word_counts, axis=0).tolist()[0])
vocab_inds = np.where(word_counts > min_count)[0]
if len(vocab_inds) == 0:
raise ValueError(f"A min_count of {min_count} results in "
f"all words being ignored, choose a lower value.")
self.vocab = [words[ind] for ind in vocab_inds]
self._check_model_status()
logger.info('Creating joint document/word embedding')
self.word_indexes = dict(zip(self.vocab, range(len(self.vocab))))
self.word_vectors = self._l2_normalize(np.array(self.embed(self.vocab)))
if use_embedding_model_tokenizer:
self.document_vectors = self._embed_documents(documents)
else:
train_corpus = [' '.join(tokens) for tokens in tokenized_corpus]
self.document_vectors = self._embed_documents(train_corpus)
else:
raise ValueError(f"{embedding_model} is an invalid embedding model.")
logger.info('Creating lower dimension embedding of documents')
if umap_args is None:
umap_args = {'n_neighbors': 15,
'n_components': 5,
'metric': 'cosine'}
umap_model = umap.UMAP(**umap_args).fit(self._get_document_vectors(norm=False))
logger.info('Finding dense areas of documents')
if hdbscan_args is None:
hdbscan_args = {'min_cluster_size': 15,
'metric': 'euclidean',
'cluster_selection_method': 'eom'}
cluster = hdbscan.HDBSCAN(**hdbscan_args).fit(umap_model.embedding_)
logger.info('Finding topics')
self._create_topic_vectors(cluster.labels_)
self._deduplicate_topics()
self.topic_words, self.topic_word_scores = self._find_topic_words_and_scores(topic_vectors=self.topic_vectors)
self.doc_top, self.doc_dist = self._calculate_documents_topic(self.topic_vectors,
self._get_document_vectors())
self.topic_sizes = self._calculate_topic_sizes(hierarchy=False)
self._reorder_topics(hierarchy=False)
self.topic_vectors_reduced = None
self.doc_top_reduced = None
self.doc_dist_reduced = None
self.topic_sizes_reduced = None
self.topic_words_reduced = None
self.topic_word_scores_reduced = None
self.hierarchy = None
self.document_index = None
self.serialized_document_index = None
self.documents_indexed = False
self.index_id2doc_id = None
self.doc_id2index_id = None
self.word_index = None
self.serialized_word_index = None
self.words_indexed = False
def save(self, file):
document_index_temp = None
word_index_temp = None
if self.embedding_model != "doc2vec":
self.embed = None
if self.documents_indexed:
temp = tempfile.NamedTemporaryFile(mode='w+b')
self.document_index.save_index(temp.name)
self.serialized_document_index = temp.read()
temp.close()
document_index_temp = self.document_index
self.document_index = None
if self.words_indexed:
temp = tempfile.NamedTemporaryFile(mode='w+b')
self.word_index.save_index(temp.name)
self.serialized_word_index = temp.read()
temp.close()
word_index_temp = self.word_index
self.word_index = None
dump(self, file)
self.document_index = document_index_temp
self.word_index = word_index_temp
@classmethod
def load(cls, file):
top2vec_model = load(file)
if top2vec_model.documents_indexed:
if not _HAVE_HNSWLIB:
raise ImportError(f"Cannot load document index.\n\n"
"Try: pip install top2vec[indexing]\n\n"
"Alternatively try: pip install hnswlib")
temp = tempfile.NamedTemporaryFile(mode='w+b')
temp.write(top2vec_model.serialized_document_index)
if top2vec_model.embedding_model == 'doc2vec':
document_vectors = top2vec_model.model.docvecs.vectors_docs
else:
document_vectors = top2vec_model.document_vectors
top2vec_model.document_index = hnswlib.Index(space='ip',
dim=document_vectors.shape[1])
top2vec_model.document_index.load_index(temp.name, max_elements=document_vectors.shape[0])
temp.close()
top2vec_model.serialized_document_index = None
if top2vec_model.words_indexed:
if not _HAVE_HNSWLIB:
raise ImportError(f"Cannot load word index.\n\n"
"Try: pip install top2vec[indexing]\n\n"
"Alternatively try: pip install hnswlib")
temp = tempfile.NamedTemporaryFile(mode='w+b')
temp.write(top2vec_model.serialized_word_index)
if top2vec_model.embedding_model == 'doc2vec':
word_vectors = top2vec_model.model.wv.vectors
else:
word_vectors = top2vec_model.word_vectors
top2vec_model.word_index = hnswlib.Index(space='ip',
dim=word_vectors.shape[1])
top2vec_model.word_index.load_index(temp.name, max_elements=word_vectors.shape[0])
temp.close()
top2vec_model.serialized_word_index = None
return top2vec_model
@staticmethod
def _l2_normalize(vectors):
if vectors.ndim == 2:
return normalize(vectors)
else:
return normalize(vectors.reshape(1, -1))[0]
def _embed_documents(self, train_corpus):
self._check_import_status()
self._check_model_status()
batch_size = 500
document_vectors = []
current = 0
batches = int(len(train_corpus) / batch_size)
extra = len(train_corpus) % batch_size
for ind in range(0, batches):
document_vectors.append(self.embed(train_corpus[current:current + batch_size]))
current += batch_size
if extra > 0:
document_vectors.append(self.embed(train_corpus[current:current + extra]))
document_vectors = self._l2_normalize(np.array(np.vstack(document_vectors)))
return document_vectors
def _embed_query(self, query):
self._check_import_status()
self._check_model_status()
return self._l2_normalize(np.array(self.embed([query])[0]))
def _set_document_vectors(self, document_vectors):
if self.embedding_model == 'doc2vec':
self.model.docvecs.vectors_docs = document_vectors
else:
self.document_vectors = document_vectors
def _get_document_vectors(self, norm=True):
if self.embedding_model == 'doc2vec':
if norm:
self.model.docvecs.init_sims()
return self.model.docvecs.vectors_docs_norm
else:
return self.model.docvecs.vectors_docs
else:
return self.document_vectors
def _index2word(self, index):
if self.embedding_model == 'doc2vec':
return self.model.wv.index2word[index]
else:
return self.vocab[index]
def _get_word_vectors(self):
if self.embedding_model == 'doc2vec':
self.model.wv.init_sims()
return self.model.wv.vectors_norm
else:
return self.word_vectors
def _create_topic_vectors(self, cluster_labels):
unique_labels = set(cluster_labels)
if -1 in unique_labels:
unique_labels.remove(-1)
self.topic_vectors = self._l2_normalize(
np.vstack([self._get_document_vectors(norm=False)[np.where(cluster_labels == label)[0]]
.mean(axis=0) for label in unique_labels]))
def _deduplicate_topics(self):
core_samples, labels = dbscan(X=self.topic_vectors,
eps=0.1,
min_samples=2,
metric="cosine")
duplicate_clusters = set(labels)
if len(duplicate_clusters) > 1 or -1 not in duplicate_clusters:
unique_topics = self.topic_vectors[np.where(labels == -1)[0]]
if -1 in duplicate_clusters:
duplicate_clusters.remove(-1)
for unique_label in duplicate_clusters:
unique_topics = np.vstack(
[unique_topics, self._l2_normalize(self.topic_vectors[np.where(labels == unique_label)[0]]
.mean(axis=0))])
self.topic_vectors = unique_topics
def _calculate_topic_sizes(self, hierarchy=False):
if hierarchy:
topic_sizes = pd.Series(self.doc_top_reduced).value_counts()
else:
topic_sizes = pd.Series(self.doc_top).value_counts()
return topic_sizes
def _reorder_topics(self, hierarchy=False):
if hierarchy:
self.topic_vectors_reduced = self.topic_vectors_reduced[self.topic_sizes_reduced.index]
self.topic_words_reduced = self.topic_words_reduced[self.topic_sizes_reduced.index]
self.topic_word_scores_reduced = self.topic_word_scores_reduced[self.topic_sizes_reduced.index]
old2new = dict(zip(self.topic_sizes_reduced.index, range(self.topic_sizes_reduced.index.shape[0])))
self.doc_top_reduced = np.array([old2new[i] for i in self.doc_top_reduced])
self.hierarchy = [self.hierarchy[i] for i in self.topic_sizes_reduced.index]
self.topic_sizes_reduced.reset_index(drop=True, inplace=True)
else:
self.topic_vectors = self.topic_vectors[self.topic_sizes.index]
self.topic_words = self.topic_words[self.topic_sizes.index]
self.topic_word_scores = self.topic_word_scores[self.topic_sizes.index]
old2new = dict(zip(self.topic_sizes.index, range(self.topic_sizes.index.shape[0])))
self.doc_top = np.array([old2new[i] for i in self.doc_top])
self.topic_sizes.reset_index(drop=True, inplace=True)
@staticmethod
def _calculate_documents_topic(topic_vectors, document_vectors, dist=True, num_topics=None):
batch_size = 10000
doc_top = []
if dist:
doc_dist = []
if document_vectors.shape[0] > batch_size:
current = 0
batches = int(document_vectors.shape[0] / batch_size)
extra = document_vectors.shape[0] % batch_size
for ind in range(0, batches):
res = np.inner(document_vectors[current:current + batch_size], topic_vectors)
if num_topics is None:
doc_top.extend(np.argmax(res, axis=1))
if dist:
doc_dist.extend(np.max(res, axis=1))
else:
doc_top.extend(np.flip(np.argsort(res), axis=1)[:, :num_topics])
if dist:
doc_dist.extend(np.flip(np.sort(res), axis=1)[:, :num_topics])
current += batch_size
if extra > 0:
res = np.inner(document_vectors[current:current + extra], topic_vectors)
if num_topics is None:
doc_top.extend(np.argmax(res, axis=1))
if dist:
doc_dist.extend(np.max(res, axis=1))
else:
doc_top.extend(np.flip(np.argsort(res), axis=1)[:, :num_topics])
if dist:
doc_dist.extend(np.flip(np.sort(res), axis=1)[:, :num_topics])
if dist:
doc_dist = np.array(doc_dist)
else:
res = np.inner(document_vectors, topic_vectors)
if num_topics is None:
doc_top = np.argmax(res, axis=1)
if dist:
doc_dist = np.max(res, axis=1)
else:
doc_top.extend(np.flip(np.argsort(res), axis=1)[:, :num_topics])
if dist:
doc_dist.extend(np.flip(np.sort(res), axis=1)[:, :num_topics])
if num_topics is not None:
doc_top = np.array(doc_top)
if dist:
doc_dist = np.array(doc_dist)
if dist:
return doc_top, doc_dist
else:
return doc_top
def _find_topic_words_and_scores(self, topic_vectors):
topic_words = []
topic_word_scores = []
res = np.inner(topic_vectors, self._get_word_vectors())
top_words = np.flip(np.argsort(res, axis=1), axis=1)
top_scores = np.flip(np.sort(res, axis=1), axis=1)
for words, scores in zip(top_words, top_scores):
topic_words.append([self._index2word(i) for i in words[0:50]])
topic_word_scores.append(scores[0:50])
topic_words = np.array(topic_words)
topic_word_scores = np.array(topic_word_scores)
return topic_words, topic_word_scores
def _assign_documents_to_topic(self, document_vectors, hierarchy=False):
if hierarchy:
doc_top_new, doc_dist_new = self._calculate_documents_topic(self.topic_vectors_reduced,
document_vectors,
dist=True)
self.doc_top_reduced = np.append(self.doc_top_reduced, doc_top_new)
self.doc_dist_reduced = np.append(self.doc_dist_reduced, doc_dist_new)
topic_sizes_new = pd.Series(doc_top_new).value_counts()
for top in topic_sizes_new.index.tolist():
self.topic_sizes_reduced[top] += topic_sizes_new[top]
self.topic_sizes_reduced.sort_values(ascending=False, inplace=True)
self._reorder_topics(hierarchy)
else:
doc_top_new, doc_dist_new = self._calculate_documents_topic(self.topic_vectors, document_vectors, dist=True)
self.doc_top = np.append(self.doc_top, doc_top_new)
self.doc_dist = np.append(self.doc_dist, doc_dist_new)
topic_sizes_new = pd.Series(doc_top_new).value_counts()
for top in topic_sizes_new.index.tolist():
self.topic_sizes[top] += topic_sizes_new[top]
self.topic_sizes.sort_values(ascending=False, inplace=True)
self._reorder_topics(hierarchy)
def _unassign_documents_from_topic(self, doc_indexes, hierarchy=False):
if hierarchy:
doc_top_remove = self.doc_top_reduced[doc_indexes]
self.doc_top_reduced = np.delete(self.doc_top_reduced, doc_indexes, 0)
self.doc_dist_reduced = np.delete(self.doc_dist_reduced, doc_indexes, 0)
topic_sizes_remove = pd.Series(doc_top_remove).value_counts()
for top in topic_sizes_remove.index.tolist():
self.topic_sizes_reduced[top] -= topic_sizes_remove[top]
self.topic_sizes_reduced.sort_values(ascending=False, inplace=True)
self._reorder_topics(hierarchy)
else:
doc_top_remove = self.doc_top[doc_indexes]
self.doc_top = np.delete(self.doc_top, doc_indexes, 0)
self.doc_dist = np.delete(self.doc_dist, doc_indexes, 0)
topic_sizes_remove = pd.Series(doc_top_remove).value_counts()
for top in topic_sizes_remove.index.tolist():
self.topic_sizes[top] -= topic_sizes_remove[top]
self.topic_sizes.sort_values(ascending=False, inplace=True)
self._reorder_topics(hierarchy)
def _get_document_ids(self, doc_index):
return self.document_ids[doc_index]
def _get_document_indexes(self, doc_ids):
if self.document_ids is None:
return doc_ids
else:
return [self.doc_id2index[doc_id] for doc_id in doc_ids]
def _words2word_vectors(self, keywords):
return self._get_word_vectors()[[self._word2index(word) for word in keywords]]
def _word2index(self, word):
if self.embedding_model == 'doc2vec':
return self.model.wv.vocab[word].index
else:
return self.word_indexes[word]
def _get_combined_vec(self, vecs, vecs_neg):
combined_vector = np.zeros(self._get_document_vectors().shape[1], dtype=np.float64)
for vec in vecs:
combined_vector += vec
for vec in vecs_neg:
combined_vector -= vec
combined_vector /= (len(vecs) + len(vecs_neg))
combined_vector = self._l2_normalize(combined_vector)
return combined_vector
@staticmethod
def _search_vectors_by_vector(vectors, vector, num_res):
ranks = np.inner(vectors, vector)
indexes = np.flip(np.argsort(ranks)[-num_res:])
scores = np.array([ranks[res] for res in indexes])
return indexes, scores
@staticmethod
def _check_hnswlib_status():
if not _HAVE_HNSWLIB:
raise ImportError(f"Indexing is not available.\n\n"
"Try: pip install top2vec[indexing]\n\n"
"Alternatively try: pip install hnswlib")
def _check_document_index_status(self):
if self.document_index is None:
raise ImportError("There is no document index.\n\n"
"Call index_document_vectors method before setting use_index=True.")
def _check_word_index_status(self):
if self.word_index is None:
raise ImportError("There is no word index.\n\n"
"Call index_word_vectors method before setting use_index=True.")
def _check_import_status(self):
if self.embedding_model != 'distiluse-base-multilingual-cased':
if not _HAVE_TENSORFLOW:
raise ImportError(f"{self.embedding_model} is not available.\n\n"
"Try: pip install top2vec[sentence_encoders]\n\n"
"Alternatively try: pip install tensorflow tensorflow_hub tensorflow_text")
else:
if not _HAVE_TORCH:
raise ImportError(f"{self.embedding_model} is not available.\n\n"
"Try: pip install top2vec[sentence_transformers]\n\n"
"Alternatively try: pip install torch sentence_transformers")
def _check_model_status(self):
if self.embed is None:
if self.verbose is False:
logger.setLevel(logging.DEBUG)
if self.embedding_model != "distiluse-base-multilingual-cased":
if self.embedding_model_path is None:
logger.info(f'Downloading {self.embedding_model} model')
if self.embedding_model == "universal-sentence-encoder-multilingual":
module = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
else:
module = "https://tfhub.dev/google/universal-sentence-encoder/4"
else:
logger.info(f'Loading {self.embedding_model} model at {self.embedding_model_path}')
module = self.embedding_model_path
self.embed = hub.load(module)
else:
if self.embedding_model_path is None:
logger.info(f'Downloading {self.embedding_model} model')
module = 'distiluse-base-multilingual-cased'
else:
logger.info(f'Loading {self.embedding_model} model at {self.embedding_model_path}')
module = self.embedding_model_path
model = SentenceTransformer(module)
self.embed = model.encode
if self.verbose is False:
logger.setLevel(logging.WARNING)
@staticmethod
def _less_than_zero(num, var_name):
if num < 0:
raise ValueError(f"{var_name} cannot be less than 0.")
def _validate_hierarchical_reduction(self):
if self.hierarchy is None:
raise ValueError("Hierarchical topic reduction has not been performed.")
def _validate_hierarchical_reduction_num_topics(self, num_topics):
current_num_topics = len(self.topic_vectors)
if num_topics >= current_num_topics:
raise ValueError(f"Number of topics must be less than {current_num_topics}.")
def _validate_num_docs(self, num_docs):
self._less_than_zero(num_docs, "num_docs")
document_count = len(self.doc_top)
if num_docs > document_count:
raise ValueError(f"num_docs cannot exceed the number of documents: {document_count}.")
def _validate_num_topics(self, num_topics, reduced):
self._less_than_zero(num_topics, "num_topics")
if reduced:
topic_count = len(self.topic_vectors_reduced)
if num_topics > topic_count:
raise ValueError(f"num_topics cannot exceed the number of reduced topics: {topic_count}.")
else:
topic_count = len(self.topic_vectors)
if num_topics > topic_count:
raise ValueError(f"num_topics cannot exceed the number of topics: {topic_count}.")
def _validate_topic_num(self, topic_num, reduced):
self._less_than_zero(topic_num, "topic_num")
if reduced:
topic_count = len(self.topic_vectors_reduced) - 1
if topic_num > topic_count:
raise ValueError(f"Invalid topic number: valid reduced topics numbers are 0 to {topic_count}.")
else:
topic_count = len(self.topic_vectors) - 1
if topic_num > topic_count:
raise ValueError(f"Invalid topic number: valid original topics numbers are 0 to {topic_count}.")
def _validate_topic_search(self, topic_num, num_docs, reduced):
self._less_than_zero(num_docs, "num_docs")
if reduced:
if num_docs > self.topic_sizes_reduced[topic_num]:
raise ValueError(f"Invalid number of documents: reduced topic {topic_num}"
f" only has {self.topic_sizes_reduced[topic_num]} documents.")
else:
if num_docs > self.topic_sizes[topic_num]:
raise ValueError(f"Invalid number of documents: original topic {topic_num}"
f" only has {self.topic_sizes[topic_num]} documents.")
def _validate_doc_ids(self, doc_ids, doc_ids_neg):
if not (isinstance(doc_ids, list) or isinstance(doc_ids, np.ndarray)):
raise ValueError("doc_ids must be a list of string or int.")
if not (isinstance(doc_ids_neg, list) or isinstance(doc_ids_neg, np.ndarray)):
raise ValueError("doc_ids_neg must be a list of string or int.")
if isinstance(doc_ids, np.ndarray):
doc_ids = list(doc_ids)
if isinstance(doc_ids_neg, np.ndarray):
doc_ids_neg = list(doc_ids_neg)
doc_ids_all = doc_ids + doc_ids_neg
if self.document_ids is not None:
for doc_id in doc_ids_all:
if doc_id not in self.doc_id2index:
raise ValueError(f"{doc_id} is not a valid document id.")
elif min(doc_ids) < 0:
raise ValueError(f"{min(doc_ids)} is not a valid document id.")
elif max(doc_ids) > len(self.doc_top) - 1:
raise ValueError(f"{max(doc_ids)} is not a valid document id.")
def _validate_keywords(self, keywords, keywords_neg):
if not (isinstance(keywords, list) or isinstance(keywords, np.ndarray)):
raise ValueError("keywords must be a list of strings.")
if not (isinstance(keywords_neg, list) or isinstance(keywords_neg, np.ndarray)):
raise ValueError("keywords_neg must be a list of strings.")
keywords_lower = [keyword.lower() for keyword in keywords]
keywords_neg_lower = [keyword.lower() for keyword in keywords_neg]
if self.embedding_model == 'doc2vec':
vocab = self.model.wv.vocab
else:
vocab = self.vocab
for word in keywords_lower + keywords_neg_lower:
if word not in vocab:
raise ValueError(f"'{word}' has not been learned by the model so it cannot be searched.")
return keywords_lower, keywords_neg_lower
def _validate_document_ids_add_doc(self, documents, document_ids):
if document_ids is None:
raise ValueError("Document ids need to be provided.")
if len(documents) != len(document_ids):
raise ValueError("Document ids need to match number of documents.")
if len(document_ids) != len(set(document_ids)):
raise ValueError("Document ids need to be unique.")
if len(set(document_ids).intersection(self.document_ids)) > 0:
raise ValueError("Some document ids already exist in model.")
if self.doc_id_type == np.str_:
if not all((isinstance(doc_id, str) or isinstance(doc_id, np.str_)) for doc_id in document_ids):
raise ValueError("Document ids need to be of type str.")
if self.doc_id_type == np.int_:
if not all((isinstance(doc_id, int) or isinstance(doc_id, np.int_)) for doc_id in document_ids):
raise ValueError("Document ids need to be of type int.")
@staticmethod
def _validate_documents(documents):
if not all((isinstance(doc, str) or isinstance(doc, np.str_)) for doc in documents):
raise ValueError("Documents need to be a list of strings.")
@staticmethod
def _validate_query(query):
if not isinstance(query, str) or isinstance(query, np.str_):
raise ValueError("Query needs to be a string.")
def _validate_vector(self, vector):
if not isinstance(vector, np.ndarray):
raise ValueError("Vector needs to be a numpy array.")
vec_size = self._get_document_vectors().shape[1]
if not vector.shape[0] == vec_size:
raise ValueError(f"Vector needs to be of {vec_size} dimensions.")
def index_document_vectors(self, ef_construction=200, M=64):
self._check_hnswlib_status()
document_vectors = self._get_document_vectors()
vec_dim = document_vectors.shape[1]
num_vecs = document_vectors.shape[0]
index_ids = list(range(0, len(self.document_ids)))
self.index_id2doc_id = dict(zip(index_ids, self.document_ids))
self.doc_id2index_id = dict(zip(self.document_ids, index_ids))
self.document_index = hnswlib.Index(space='ip', dim=vec_dim)
self.document_index.init_index(max_elements=num_vecs, ef_construction=ef_construction, M=M)
self.document_index.add_items(document_vectors, index_ids)
self.documents_indexed = True
def index_word_vectors(self, ef_construction=200, M=64):
self._check_hnswlib_status()
word_vectors = self._get_word_vectors()
vec_dim = word_vectors.shape[1]
num_vecs = word_vectors.shape[0]
index_ids = list(range(0, num_vecs))
self.word_index = hnswlib.Index(space='ip', dim=vec_dim)
self.word_index.init_index(max_elements=num_vecs, ef_construction=ef_construction, M=M)
self.word_index.add_items(word_vectors, index_ids)
self.words_indexed = True
def update_embedding_model_path(self, embedding_model_path):
self.embedding_model_path = embedding_model_path
def change_to_download_embedding_model(self):
self.embedding_model_path = None
def get_documents_topics(self, doc_ids, reduced=False, num_topics=1):
if reduced:
self._validate_hierarchical_reduction()
self._validate_doc_ids(doc_ids, doc_ids_neg=[])
doc_indexes = self._get_document_indexes(doc_ids)
if num_topics == 1:
if reduced:
doc_topics = self.doc_top_reduced[doc_indexes]
doc_dist = self.doc_dist_reduced[doc_indexes]
topic_words = self.topic_words_reduced[doc_topics]
topic_word_scores = self.topic_word_scores_reduced[doc_topics]
else:
doc_topics = self.doc_top[doc_indexes]
doc_dist = self.doc_dist[doc_indexes]
topic_words = self.topic_words[doc_topics]
topic_word_scores = self.topic_word_scores[doc_topics]
else:
if reduced:
topic_vectors = self.topic_vectors_reduced
else:
topic_vectors = self.topic_vectors
doc_topics, doc_dist = self._calculate_documents_topic(topic_vectors,
self._get_document_vectors()[doc_indexes],
num_topics=num_topics)
topic_words = np.array([self.topic_words[topics] for topics in doc_topics])
topic_word_scores = np.array([self.topic_word_scores[topics] for topics in doc_topics])
return doc_topics, doc_dist, topic_words, topic_word_scores
def add_documents(self, documents, doc_ids=None, tokenizer=None, use_embedding_model_tokenizer=False):
if tokenizer is None:
tokenizer = default_tokenizer
self._validate_documents(documents)
if self.documents is not None:
self.documents = np.append(self.documents, documents)
if self.document_ids_provided is True:
self._validate_document_ids_add_doc(documents, doc_ids)
doc_ids_len = len(self.document_ids)
self.document_ids = np.append(self.document_ids, doc_ids)
self.doc_id2index.update(dict(zip(doc_ids, list(range(doc_ids_len, doc_ids_len + len(doc_ids))))))
elif doc_ids is None:
num_docs = len(documents)
start_id = max(self.document_ids) + 1
doc_ids = list(range(start_id, start_id + num_docs))
doc_ids_len = len(self.document_ids)
self.document_ids = np.append(self.document_ids, doc_ids)
self.doc_id2index.update(dict(zip(doc_ids, list(range(doc_ids_len, doc_ids_len + len(doc_ids))))))
else:
raise ValueError("doc_ids cannot be used because they were not provided to model during training.")
if self.embedding_model == "doc2vec":
docs_processed = [tokenizer(doc) for doc in documents]
document_vectors = np.vstack([self.model.infer_vector(doc_words=doc,
alpha=0.025,
min_alpha=0.01,
epochs=100) for doc in docs_processed])
num_docs = len(documents)
self.model.docvecs.count += num_docs
self.model.docvecs.max_rawint += num_docs
self.model.docvecs.vectors_docs_norm = None
self._set_document_vectors(np.vstack([self._get_document_vectors(norm=False), document_vectors]))
self.model.docvecs.init_sims()
document_vectors = self._l2_normalize(document_vectors)
else:
if use_embedding_model_tokenizer:
docs_training = documents
else:
docs_processed = [tokenizer(doc) for doc in documents]
docs_training = [' '.join(doc) for doc in docs_processed]
document_vectors = self._embed_documents(docs_training)
self._set_document_vectors(np.vstack([self._get_document_vectors(), document_vectors]))
if self.documents_indexed:
current_max = self.document_index.get_max_elements()
updated_max = current_max + len(documents)
self.document_index.resize_index(updated_max)
start_index_id = max(self.index_id2doc_id.keys()) + 1
new_index_ids = list(range(start_index_id, start_index_id + len(doc_ids)))
self.index_id2doc_id.update(dict(zip(new_index_ids, doc_ids)))
self.doc_id2index_id.update(dict(zip(doc_ids, new_index_ids)))
self.document_index.add_items(document_vectors, new_index_ids)
self._assign_documents_to_topic(document_vectors, hierarchy=False)
if self.hierarchy is not None:
self._assign_documents_to_topic(document_vectors, hierarchy=True)
def delete_documents(self, doc_ids):
self._validate_doc_ids(doc_ids, doc_ids_neg=[])
if self.documents_indexed:
index_ids = [self.doc_id2index_id(doc_id) for doc_id in doc_ids]
for index_id in index_ids:
self.document_index.mark_deleted(index_id)
for doc_id in doc_ids:
self.doc_id2index_id.pop(doc_id)
for index_id in index_ids:
self.index_id2doc_id.pop(index_id)
doc_indexes = self._get_document_indexes(doc_ids)
if self.documents is not None:
self.documents = np.delete(self.documents, doc_indexes, 0)
if self.document_ids is not None:
for doc_id in doc_ids:
self.doc_id2index.pop(doc_id)
keys = list(self.doc_id2index.keys())
self.document_ids = np.array(keys)
values = list(range(0, len(self.doc_id2index.values())))
self.doc_id2index = dict(zip(keys, values))
self._set_document_vectors(np.delete(self._get_document_vectors(norm=False), doc_indexes, 0))
if self.embedding_model == 'doc2vec':
num_docs = len(doc_indexes)
self.model.docvecs.count -= num_docs
self.model.docvecs.max_rawint -= num_docs
self.model.docvecs.vectors_docs_norm = None
self.model.docvecs.init_sims()
self._unassign_documents_from_topic(doc_indexes, hierarchy=False)
if self.hierarchy is not None:
self._unassign_documents_from_topic(doc_indexes, hierarchy=True)
def get_num_topics(self, reduced=False):
if reduced:
self._validate_hierarchical_reduction()
return len(self.topic_vectors_reduced)
else:
return len(self.topic_vectors)
def get_topic_sizes(self, reduced=False):
if reduced:
self._validate_hierarchical_reduction()
return np.array(self.topic_sizes_reduced.values), np.array(self.topic_sizes_reduced.index)
else:
return np.array(self.topic_sizes.values), np.array(self.topic_sizes.index)
def get_topics(self, num_topics=None, reduced=False):
if reduced:
self._validate_hierarchical_reduction()
if num_topics is None:
num_topics = len(self.topic_vectors_reduced)
else:
self._validate_num_topics(num_topics, reduced)
return self.topic_words_reduced[0:num_topics], self.topic_word_scores_reduced[0:num_topics], np.array(
range(0, num_topics))
else:
if num_topics is None:
num_topics = len(self.topic_vectors)
else:
self._validate_num_topics(num_topics, reduced)
return self.topic_words[0:num_topics], self.topic_word_scores[0:num_topics], np.array(range(0, num_topics))
def get_topic_hierarchy(self):
self._validate_hierarchical_reduction()
return self.hierarchy
def hierarchical_topic_reduction(self, num_topics):
self._validate_hierarchical_reduction_num_topics(num_topics)
num_topics_current = self.topic_vectors.shape[0]
top_vecs = self.topic_vectors
top_sizes = [self.topic_sizes[i] for i in range(0, len(self.topic_sizes))]
hierarchy = [[i] for i in range(self.topic_vectors.shape[0])]
count = 0
interval = max(int(self._get_document_vectors().shape[0] / 50000), 1)
while num_topics_current > num_topics:
smallest = np.argmin(top_sizes)
res = np.inner(top_vecs[smallest], top_vecs)
sims = np.flip(np.argsort(res))
most_sim = sims[1]
if most_sim == smallest:
most_sim = sims[0]
top_vec_smallest = top_vecs[smallest]
smallest_size = top_sizes[smallest]
top_vec_most_sim = top_vecs[most_sim]
most_sim_size = top_sizes[most_sim]
combined_vec = self._l2_normalize(((top_vec_smallest * smallest_size) +
(top_vec_most_sim * most_sim_size)) / (smallest_size + most_sim_size))
ix_keep = list(range(len(top_vecs)))
ix_keep.remove(smallest)
ix_keep.remove(most_sim)
top_vecs = top_vecs[ix_keep]
top_vecs = np.vstack([top_vecs, combined_vec])
num_topics_current = top_vecs.shape[0]
if count % interval == 0:
doc_top = self._calculate_documents_topic(topic_vectors=top_vecs,
document_vectors=self._get_document_vectors(),
dist=False)
topic_sizes = pd.Series(doc_top).value_counts()
top_sizes = [topic_sizes[i] for i in range(0, len(topic_sizes))]
else:
smallest_size = top_sizes.pop(smallest)
if most_sim < smallest:
most_sim_size = top_sizes.pop(most_sim)
else:
most_sim_size = top_sizes.pop(most_sim - 1)
combined_size = smallest_size + most_sim_size
top_sizes.append(combined_size)
count += 1
smallest_inds = hierarchy.pop(smallest)
if most_sim < smallest:
most_sim_inds = hierarchy.pop(most_sim)
else:
most_sim_inds = hierarchy.pop(most_sim - 1)
combined_inds = smallest_inds + most_sim_inds
hierarchy.append(combined_inds)
doc_top = self._calculate_documents_topic(topic_vectors=top_vecs,
document_vectors=self._get_document_vectors(),
dist=False)
self.topic_vectors_reduced = self._l2_normalize(np.vstack([self._get_document_vectors()
[np.where(doc_top == label)[0]]
.mean(axis=0) for label in set(doc_top)]))
self.hierarchy = hierarchy
self.doc_top_reduced, self.doc_dist_reduced = self._calculate_documents_topic(self.topic_vectors_reduced,
self._get_document_vectors())
self.topic_words_reduced, self.topic_word_scores_reduced = self._find_topic_words_and_scores(
topic_vectors=self.topic_vectors_reduced)
self.topic_sizes_reduced = self._calculate_topic_sizes(hierarchy=True)
self._reorder_topics(hierarchy=True)
return self.hierarchy
def query_documents(self, query, num_docs, return_documents=True, use_index=False, ef=None, tokenizer=None):
self._validate_query(query)
self._validate_num_docs(num_docs)
if self.embedding_model != "doc2vec":
query_vec = self._embed_query(query)
else:
if tokenizer is None:
tokenizer = default_tokenizer
tokenized_query = tokenizer(query)
query_vec = self.model.infer_vector(doc_words=tokenized_query,
alpha=0.025,
min_alpha=0.01,
epochs=100)
return self.search_documents_by_vector(query_vec, num_docs, return_documents=return_documents,
use_index=use_index, ef=ef)
def query_topics(self, query, num_topics, reduced=False, tokenizer=None):
self._validate_query(query)
if self.embedding_model != "doc2vec":
query_vec = self._embed_query(query)
else:
if tokenizer is None:
tokenizer = default_tokenizer
tokenized_query = tokenizer(query)
query_vec = self.model.infer_vector(doc_words=tokenized_query,
alpha=0.025,
min_alpha=0.01,
epochs=100)
return self.search_topics_by_vector(query_vec, num_topics=num_topics, reduced=reduced)
def search_documents_by_vector(self, vector, num_docs, return_documents=True, use_index=False, ef=None):
self._validate_vector(vector)
self._validate_num_docs(num_docs)
vector = self._l2_normalize(vector)
if use_index:
self._check_document_index_status()
if ef is not None:
self.document_index.set_ef(ef)
else:
self.document_index.set_ef(num_docs)
index_ids, doc_scores = self.document_index.knn_query(vector, k=num_docs)
index_ids = index_ids[0]
doc_ids = np.array([self.index_id2doc_id[index_id] for index_id in index_ids])
doc_scores = doc_scores[0]
doc_scores = np.array([1 - score for score in doc_scores])
doc_indexes = self._get_document_indexes(doc_ids)
else:
doc_indexes, doc_scores = self._search_vectors_by_vector(self._get_document_vectors(),
vector, num_docs)
doc_ids = self._get_document_ids(doc_indexes)
if self.documents is not None and return_documents:
documents = self.documents[doc_indexes]
return documents, doc_scores, doc_ids
else:
return doc_scores, doc_ids
def search_words_by_vector(self, vector, num_words, use_index=False, ef=None):
self._validate_vector(vector)
vector = self._l2_normalize(vector)
if use_index:
self._check_word_index_status()
if ef is not None:
self.word_index.set_ef(ef)
else:
self.word_index.set_ef(num_words)
word_indexes, word_scores = self.word_index.knn_query(vector, k=num_words)
word_indexes = word_indexes[0]
word_scores = word_scores[0]
word_scores = np.array([1 - score for score in word_scores])
else:
word_indexes, word_scores = self._search_vectors_by_vector(self._get_word_vectors(),
vector, num_words)
words = np.array([self._index2word(index) for index in word_indexes])
return words, word_scores
def search_topics_by_vector(self, vector, num_topics, reduced=False):
self._validate_vector(vector)
self._validate_num_topics(num_topics, reduced)
vector = self._l2_normalize(vector)
if reduced:
self._validate_hierarchical_reduction()
topic_nums, topic_scores = self._search_vectors_by_vector(self.topic_vectors_reduced,
vector, num_topics)
topic_words = [self.topic_words_reduced[topic] for topic in topic_nums]
word_scores = [self.topic_word_scores_reduced[topic] for topic in topic_nums]
else:
topic_nums, topic_scores = self._search_vectors_by_vector(self.topic_vectors,
vector, num_topics)
topic_words = [self.topic_words[topic] for topic in topic_nums]
word_scores = [self.topic_word_scores[topic] for topic in topic_nums]
return topic_words, word_scores, topic_scores, topic_nums
def search_documents_by_topic(self, topic_num, num_docs, return_documents=True, reduced=False):
if reduced:
self._validate_hierarchical_reduction()
self._validate_topic_num(topic_num, reduced)
self._validate_topic_search(topic_num, num_docs, reduced)
topic_document_indexes = np.where(self.doc_top_reduced == topic_num)[0]
topic_document_indexes_ordered = np.flip(np.argsort(self.doc_dist_reduced[topic_document_indexes]))
doc_indexes = topic_document_indexes[topic_document_indexes_ordered][0:num_docs]
doc_scores = self.doc_dist_reduced[doc_indexes]
doc_ids = self._get_document_ids(doc_indexes)
else:
self._validate_topic_num(topic_num, reduced)
self._validate_topic_search(topic_num, num_docs, reduced)
topic_document_indexes = np.where(self.doc_top == topic_num)[0]
topic_document_indexes_ordered = np.flip(np.argsort(self.doc_dist[topic_document_indexes]))
doc_indexes = topic_document_indexes[topic_document_indexes_ordered][0:num_docs]
doc_scores = self.doc_dist[doc_indexes]
doc_ids = self._get_document_ids(doc_indexes)
if self.documents is not None and return_documents:
documents = self.documents[doc_indexes]
return documents, doc_scores, doc_ids
else:
return doc_scores, doc_ids
def search_documents_by_keywords(self, keywords, num_docs, keywords_neg=None, return_documents=True,
use_index=False, ef=None):
if keywords_neg is None:
keywords_neg = []
self._validate_num_docs(num_docs)
keywords, keywords_neg = self._validate_keywords(keywords, keywords_neg)
word_vecs = self._words2word_vectors(keywords)
neg_word_vecs = self._words2word_vectors(keywords_neg)
if use_index:
self._check_document_index_status()
combined_vector = self._get_combined_vec(word_vecs, neg_word_vecs)
return self.search_documents_by_vector(combined_vector, num_docs, return_documents=return_documents,
use_index=True, ef=ef)
if self.embedding_model == 'doc2vec':
sim_docs = self.model.docvecs.most_similar(positive=word_vecs,
negative=neg_word_vecs,
topn=num_docs)
doc_indexes = [doc[0] for doc in sim_docs]
doc_scores = np.array([doc[1] for doc in sim_docs])
else:
combined_vector = self._get_combined_vec(word_vecs, neg_word_vecs)
doc_indexes, doc_scores = self._search_vectors_by_vector(self._get_document_vectors(),
combined_vector, num_docs)
doc_ids = self._get_document_ids(doc_indexes)
if self.documents is not None and return_documents:
documents = self.documents[doc_indexes]
return documents, doc_scores, doc_ids
else:
return doc_scores, doc_ids
def similar_words(self, keywords, num_words, keywords_neg=None, use_index=False, ef=None):
if keywords_neg is None:
keywords_neg = []
keywords, keywords_neg = self._validate_keywords(keywords, keywords_neg)
word_vecs = self._words2word_vectors(keywords)
neg_word_vecs = self._words2word_vectors(keywords_neg)
combined_vector = self._get_combined_vec(word_vecs, neg_word_vecs)
num_res = min(num_words + len(keywords) + len(keywords_neg), self._get_word_vectors().shape[0])
words, word_scores = self.search_words_by_vector(vector=combined_vector,
num_words=num_res,
use_index=use_index,
ef=ef)
res_indexes = [index for index, word in enumerate(words)
if word not in list(keywords) + list(keywords_neg)][:num_words]
words = words[res_indexes]
word_scores = word_scores[res_indexes]
return words, word_scores
def search_topics(self, keywords, num_topics, keywords_neg=None, reduced=False):
if keywords_neg is None:
keywords_neg = []
keywords, keywords_neg = self._validate_keywords(keywords, keywords_neg)
word_vecs = self._words2word_vectors(keywords)
neg_word_vecs = self._words2word_vectors(keywords_neg)
combined_vector = self._get_combined_vec(word_vecs, neg_word_vecs)
return self.search_topics_by_vector(combined_vector, num_topics=num_topics, reduced=reduced)
def search_documents_by_documents(self, doc_ids, num_docs, doc_ids_neg=None, return_documents=True,
use_index=False, ef=None):
if doc_ids_neg is None:
doc_ids_neg = []
self._validate_num_docs(num_docs)
self._validate_doc_ids(doc_ids, doc_ids_neg)
doc_indexes = self._get_document_indexes(doc_ids)
doc_indexes_neg = self._get_document_indexes(doc_ids_neg)
if use_index:
self._check_document_index_status()
document_vectors = self._get_document_vectors()
doc_vecs = [document_vectors[ind] for ind in doc_indexes]
doc_vecs_neg = [document_vectors[ind] for ind in doc_indexes_neg]
combined_vector = self._get_combined_vec(doc_vecs, doc_vecs_neg)
return self.search_documents_by_vector(combined_vector, num_docs, return_documents=return_documents,
use_index=True, ef=ef)
if self.embedding_model == 'doc2vec':
sim_docs = self.model.docvecs.most_similar(positive=doc_indexes,
negative=doc_indexes_neg,
topn=num_docs)
doc_indexes = [doc[0] for doc in sim_docs]
doc_scores = np.array([doc[1] for doc in sim_docs])
else:
doc_vecs = [self.document_vectors[ind] for ind in doc_indexes]
doc_vecs_neg = [self.document_vectors[ind] for ind in doc_indexes_neg]
combined_vector = self._get_combined_vec(doc_vecs, doc_vecs_neg)
num_res = min(num_docs + len(doc_indexes) + len(doc_indexes_neg),
self._get_document_vectors().shape[0])
search_doc_indexes = list(doc_indexes) + list(doc_indexes_neg)
doc_indexes, doc_scores = self._search_vectors_by_vector(self._get_document_vectors(),
combined_vector, num_res)
res_indexes = [index for index, doc_ind in enumerate(doc_indexes)
if doc_ind not in search_doc_indexes][:num_docs]
doc_indexes = doc_indexes[res_indexes]
doc_scores = doc_scores[res_indexes]
doc_ids = self._get_document_ids(doc_indexes)
if self.documents is not None and return_documents:
documents = self.documents[doc_indexes]
return documents, doc_scores, doc_ids
else:
return doc_scores, doc_ids
def generate_topic_wordcloud(self, topic_num, background_color="black", reduced=False):
if reduced:
self._validate_hierarchical_reduction()
self._validate_topic_num(topic_num, reduced)
word_score_dict = dict(zip(self.topic_words_reduced[topic_num],
softmax(self.topic_word_scores_reduced[topic_num])))
else:
self._validate_topic_num(topic_num, reduced)
word_score_dict = dict(zip(self.topic_words[topic_num],
softmax(self.topic_word_scores[topic_num])))
plt.figure(figsize=(16, 4),
dpi=200)
plt.axis("off")
plt.imshow(
WordCloud(width=1600,
height=400,
background_color=background_color,
font_path='B Zar.ttf').generate_from_frequencies(word_score_dict))
plt.title("Topic " + str(topic_num), loc='left', fontsize=25, pad=20)
| true | true |
f7fc54d9346560e9db56dde1a2b49fc6469fe28f | 11,154 | py | Python | test/fakedata_generation.py | AlessandroLaRocca96/vision | d4195587166134c3806ae81458d08b06f5e00295 | [
"BSD-3-Clause"
] | 1 | 2019-08-22T00:50:56.000Z | 2019-08-22T00:50:56.000Z | test/fakedata_generation.py | AlessandroLaRocca96/vision | d4195587166134c3806ae81458d08b06f5e00295 | [
"BSD-3-Clause"
] | 27 | 2019-11-06T10:06:13.000Z | 2020-11-06T11:34:20.000Z | test/fakedata_generation.py | AlessandroLaRocca96/vision | d4195587166134c3806ae81458d08b06f5e00295 | [
"BSD-3-Clause"
] | null | null | null | import os
import contextlib
import tarfile
import json
import numpy as np
import PIL
import torch
from common_utils import get_tmp_dir
import pickle
import random
from itertools import cycle
from torchvision.io.video import write_video
import unittest.mock
import hashlib
from distutils import dir_util
import re
def mock_class_attribute(stack, target, new):
mock = unittest.mock.patch(target, new_callable=unittest.mock.PropertyMock, return_value=new)
stack.enter_context(mock)
return mock
def compute_md5(file):
with open(file, "rb") as fh:
return hashlib.md5(fh.read()).hexdigest()
def make_tar(root, name, *files, compression=None):
ext = ".tar"
mode = "w"
if compression is not None:
ext = f"{ext}.{compression}"
mode = f"{mode}:{compression}"
name = os.path.splitext(name)[0] + ext
archive = os.path.join(root, name)
with tarfile.open(archive, mode) as fh:
for file in files:
fh.add(os.path.join(root, file), arcname=file)
return name, compute_md5(archive)
def clean_dir(root, *keep):
pattern = re.compile(f"({f')|('.join(keep)})")
for file_or_dir in os.listdir(root):
if pattern.search(file_or_dir):
continue
file_or_dir = os.path.join(root, file_or_dir)
if os.path.isfile(file_or_dir):
os.remove(file_or_dir)
else:
dir_util.remove_tree(file_or_dir)
@contextlib.contextmanager
def mnist_root(num_images, cls_name):
def _encode(v):
return torch.tensor(v, dtype=torch.int32).numpy().tobytes()[::-1]
def _make_image_file(filename, num_images):
img = torch.randint(0, 256, size=(28 * 28 * num_images,), dtype=torch.uint8)
with open(filename, "wb") as f:
f.write(_encode(2051)) # magic header
f.write(_encode(num_images))
f.write(_encode(28))
f.write(_encode(28))
f.write(img.numpy().tobytes())
def _make_label_file(filename, num_images):
labels = torch.zeros((num_images,), dtype=torch.uint8)
with open(filename, "wb") as f:
f.write(_encode(2049)) # magic header
f.write(_encode(num_images))
f.write(labels.numpy().tobytes())
with get_tmp_dir() as tmp_dir:
raw_dir = os.path.join(tmp_dir, cls_name, "raw")
os.makedirs(raw_dir)
_make_image_file(os.path.join(raw_dir, "train-images-idx3-ubyte"), num_images)
_make_label_file(os.path.join(raw_dir, "train-labels-idx1-ubyte"), num_images)
_make_image_file(os.path.join(raw_dir, "t10k-images-idx3-ubyte"), num_images)
_make_label_file(os.path.join(raw_dir, "t10k-labels-idx1-ubyte"), num_images)
yield tmp_dir
@contextlib.contextmanager
def cifar_root(version):
def _get_version_params(version):
if version == 'CIFAR10':
return {
'base_folder': 'cifar-10-batches-py',
'train_files': ['data_batch_{}'.format(batch) for batch in range(1, 6)],
'test_file': 'test_batch',
'target_key': 'labels',
'meta_file': 'batches.meta',
'classes_key': 'label_names',
}
elif version == 'CIFAR100':
return {
'base_folder': 'cifar-100-python',
'train_files': ['train'],
'test_file': 'test',
'target_key': 'fine_labels',
'meta_file': 'meta',
'classes_key': 'fine_label_names',
}
else:
raise ValueError
def _make_pickled_file(obj, file):
with open(file, 'wb') as fh:
pickle.dump(obj, fh, 2)
def _make_data_file(file, target_key):
obj = {
'data': np.zeros((1, 32 * 32 * 3), dtype=np.uint8),
target_key: [0]
}
_make_pickled_file(obj, file)
def _make_meta_file(file, classes_key):
obj = {
classes_key: ['fakedata'],
}
_make_pickled_file(obj, file)
params = _get_version_params(version)
with get_tmp_dir() as root:
base_folder = os.path.join(root, params['base_folder'])
os.mkdir(base_folder)
for file in list(params['train_files']) + [params['test_file']]:
_make_data_file(os.path.join(base_folder, file), params['target_key'])
_make_meta_file(os.path.join(base_folder, params['meta_file']),
params['classes_key'])
yield root
@contextlib.contextmanager
def widerface_root():
"""
Generates a dataset with the following folder structure and returns the path root:
<root>
└── widerface
├── wider_face_split
├── WIDER_train
├── WIDER_val
└── WIDER_test
The dataset consist of
1 image for each dataset split (train, val, test) and annotation files
for each split
"""
def _make_image(file):
PIL.Image.fromarray(np.zeros((32, 32, 3), dtype=np.uint8)).save(file)
def _make_train_archive(root):
extracted_dir = os.path.join(root, 'WIDER_train', 'images', '0--Parade')
os.makedirs(extracted_dir)
_make_image(os.path.join(extracted_dir, '0_Parade_marchingband_1_1.jpg'))
def _make_val_archive(root):
extracted_dir = os.path.join(root, 'WIDER_val', 'images', '0--Parade')
os.makedirs(extracted_dir)
_make_image(os.path.join(extracted_dir, '0_Parade_marchingband_1_2.jpg'))
def _make_test_archive(root):
extracted_dir = os.path.join(root, 'WIDER_test', 'images', '0--Parade')
os.makedirs(extracted_dir)
_make_image(os.path.join(extracted_dir, '0_Parade_marchingband_1_3.jpg'))
def _make_annotations_archive(root):
train_bbox_contents = '0--Parade/0_Parade_marchingband_1_1.jpg\n1\n449 330 122 149 0 0 0 0 0 0\n'
val_bbox_contents = '0--Parade/0_Parade_marchingband_1_2.jpg\n1\n501 160 285 443 0 0 0 0 0 0\n'
test_filelist_contents = '0--Parade/0_Parade_marchingband_1_3.jpg\n'
extracted_dir = os.path.join(root, 'wider_face_split')
os.mkdir(extracted_dir)
# bbox training file
bbox_file = os.path.join(extracted_dir, "wider_face_train_bbx_gt.txt")
with open(bbox_file, "w") as txt_file:
txt_file.write(train_bbox_contents)
# bbox validation file
bbox_file = os.path.join(extracted_dir, "wider_face_val_bbx_gt.txt")
with open(bbox_file, "w") as txt_file:
txt_file.write(val_bbox_contents)
# test filelist file
filelist_file = os.path.join(extracted_dir, "wider_face_test_filelist.txt")
with open(filelist_file, "w") as txt_file:
txt_file.write(test_filelist_contents)
with get_tmp_dir() as root:
root_base = os.path.join(root, "widerface")
os.mkdir(root_base)
_make_train_archive(root_base)
_make_val_archive(root_base)
_make_test_archive(root_base)
_make_annotations_archive(root_base)
yield root
@contextlib.contextmanager
def places365_root(split="train-standard", small=False):
VARIANTS = {
"train-standard": "standard",
"train-challenge": "challenge",
"val": "standard",
}
# {split: file}
DEVKITS = {
"train-standard": "filelist_places365-standard.tar",
"train-challenge": "filelist_places365-challenge.tar",
"val": "filelist_places365-standard.tar",
}
CATEGORIES = "categories_places365.txt"
# {split: file}
FILE_LISTS = {
"train-standard": "places365_train_standard.txt",
"train-challenge": "places365_train_challenge.txt",
"val": "places365_train_standard.txt",
}
# {(split, small): (archive, folder_default, folder_renamed)}
IMAGES = {
("train-standard", False): ("train_large_places365standard.tar", "data_large", "data_large_standard"),
("train-challenge", False): ("train_large_places365challenge.tar", "data_large", "data_large_challenge"),
("val", False): ("val_large.tar", "val_large", "val_large"),
("train-standard", True): ("train_256_places365standard.tar", "data_256", "data_256_standard"),
("train-challenge", True): ("train_256_places365challenge.tar", "data_256", "data_256_challenge"),
("val", True): ("val_256.tar", "val_256", "val_256"),
}
# (class, idx)
CATEGORIES_CONTENT = (("/a/airfield", 0), ("/a/apartment_building/outdoor", 8), ("/b/badlands", 30))
# (file, idx)
FILE_LIST_CONTENT = (
("Places365_val_00000001.png", 0),
*((f"{category}/Places365_train_00000001.png", idx) for category, idx in CATEGORIES_CONTENT),
)
def mock_target(attr, partial="torchvision.datasets.places365.Places365"):
return f"{partial}.{attr}"
def make_txt(root, name, seq):
file = os.path.join(root, name)
with open(file, "w") as fh:
for string, idx in seq:
fh.write(f"{string} {idx}\n")
return name, compute_md5(file)
def make_categories_txt(root, name):
return make_txt(root, name, CATEGORIES_CONTENT)
def make_file_list_txt(root, name):
return make_txt(root, name, FILE_LIST_CONTENT)
def make_image(file, size):
os.makedirs(os.path.dirname(file), exist_ok=True)
PIL.Image.fromarray(np.zeros((*size, 3), dtype=np.uint8)).save(file)
def make_devkit_archive(stack, root, split):
archive = DEVKITS[split]
files = []
meta = make_categories_txt(root, CATEGORIES)
mock_class_attribute(stack, mock_target("_CATEGORIES_META"), meta)
files.append(meta[0])
meta = {split: make_file_list_txt(root, FILE_LISTS[split])}
mock_class_attribute(stack, mock_target("_FILE_LIST_META"), meta)
files.extend([item[0] for item in meta.values()])
meta = {VARIANTS[split]: make_tar(root, archive, *files)}
mock_class_attribute(stack, mock_target("_DEVKIT_META"), meta)
def make_images_archive(stack, root, split, small):
archive, folder_default, folder_renamed = IMAGES[(split, small)]
image_size = (256, 256) if small else (512, random.randint(512, 1024))
files, idcs = zip(*FILE_LIST_CONTENT)
images = [file.lstrip("/").replace("/", os.sep) for file in files]
for image in images:
make_image(os.path.join(root, folder_default, image), image_size)
meta = {(split, small): make_tar(root, archive, folder_default)}
mock_class_attribute(stack, mock_target("_IMAGES_META"), meta)
return [(os.path.join(root, folder_renamed, image), idx) for image, idx in zip(images, idcs)]
with contextlib.ExitStack() as stack, get_tmp_dir() as root:
make_devkit_archive(stack, root, split)
class_to_idx = dict(CATEGORIES_CONTENT)
classes = list(class_to_idx.keys())
data = {"class_to_idx": class_to_idx, "classes": classes}
data["imgs"] = make_images_archive(stack, root, split, small)
clean_dir(root, ".tar$")
yield root, data
| 35.864952 | 113 | 0.633584 | import os
import contextlib
import tarfile
import json
import numpy as np
import PIL
import torch
from common_utils import get_tmp_dir
import pickle
import random
from itertools import cycle
from torchvision.io.video import write_video
import unittest.mock
import hashlib
from distutils import dir_util
import re
def mock_class_attribute(stack, target, new):
mock = unittest.mock.patch(target, new_callable=unittest.mock.PropertyMock, return_value=new)
stack.enter_context(mock)
return mock
def compute_md5(file):
with open(file, "rb") as fh:
return hashlib.md5(fh.read()).hexdigest()
def make_tar(root, name, *files, compression=None):
ext = ".tar"
mode = "w"
if compression is not None:
ext = f"{ext}.{compression}"
mode = f"{mode}:{compression}"
name = os.path.splitext(name)[0] + ext
archive = os.path.join(root, name)
with tarfile.open(archive, mode) as fh:
for file in files:
fh.add(os.path.join(root, file), arcname=file)
return name, compute_md5(archive)
def clean_dir(root, *keep):
pattern = re.compile(f"({f')|('.join(keep)})")
for file_or_dir in os.listdir(root):
if pattern.search(file_or_dir):
continue
file_or_dir = os.path.join(root, file_or_dir)
if os.path.isfile(file_or_dir):
os.remove(file_or_dir)
else:
dir_util.remove_tree(file_or_dir)
@contextlib.contextmanager
def mnist_root(num_images, cls_name):
def _encode(v):
return torch.tensor(v, dtype=torch.int32).numpy().tobytes()[::-1]
def _make_image_file(filename, num_images):
img = torch.randint(0, 256, size=(28 * 28 * num_images,), dtype=torch.uint8)
with open(filename, "wb") as f:
f.write(_encode(2051))
f.write(_encode(num_images))
f.write(_encode(28))
f.write(_encode(28))
f.write(img.numpy().tobytes())
def _make_label_file(filename, num_images):
labels = torch.zeros((num_images,), dtype=torch.uint8)
with open(filename, "wb") as f:
f.write(_encode(2049))
f.write(_encode(num_images))
f.write(labels.numpy().tobytes())
with get_tmp_dir() as tmp_dir:
raw_dir = os.path.join(tmp_dir, cls_name, "raw")
os.makedirs(raw_dir)
_make_image_file(os.path.join(raw_dir, "train-images-idx3-ubyte"), num_images)
_make_label_file(os.path.join(raw_dir, "train-labels-idx1-ubyte"), num_images)
_make_image_file(os.path.join(raw_dir, "t10k-images-idx3-ubyte"), num_images)
_make_label_file(os.path.join(raw_dir, "t10k-labels-idx1-ubyte"), num_images)
yield tmp_dir
@contextlib.contextmanager
def cifar_root(version):
def _get_version_params(version):
if version == 'CIFAR10':
return {
'base_folder': 'cifar-10-batches-py',
'train_files': ['data_batch_{}'.format(batch) for batch in range(1, 6)],
'test_file': 'test_batch',
'target_key': 'labels',
'meta_file': 'batches.meta',
'classes_key': 'label_names',
}
elif version == 'CIFAR100':
return {
'base_folder': 'cifar-100-python',
'train_files': ['train'],
'test_file': 'test',
'target_key': 'fine_labels',
'meta_file': 'meta',
'classes_key': 'fine_label_names',
}
else:
raise ValueError
def _make_pickled_file(obj, file):
with open(file, 'wb') as fh:
pickle.dump(obj, fh, 2)
def _make_data_file(file, target_key):
obj = {
'data': np.zeros((1, 32 * 32 * 3), dtype=np.uint8),
target_key: [0]
}
_make_pickled_file(obj, file)
def _make_meta_file(file, classes_key):
obj = {
classes_key: ['fakedata'],
}
_make_pickled_file(obj, file)
params = _get_version_params(version)
with get_tmp_dir() as root:
base_folder = os.path.join(root, params['base_folder'])
os.mkdir(base_folder)
for file in list(params['train_files']) + [params['test_file']]:
_make_data_file(os.path.join(base_folder, file), params['target_key'])
_make_meta_file(os.path.join(base_folder, params['meta_file']),
params['classes_key'])
yield root
@contextlib.contextmanager
def widerface_root():
def _make_image(file):
PIL.Image.fromarray(np.zeros((32, 32, 3), dtype=np.uint8)).save(file)
def _make_train_archive(root):
extracted_dir = os.path.join(root, 'WIDER_train', 'images', '0--Parade')
os.makedirs(extracted_dir)
_make_image(os.path.join(extracted_dir, '0_Parade_marchingband_1_1.jpg'))
def _make_val_archive(root):
extracted_dir = os.path.join(root, 'WIDER_val', 'images', '0--Parade')
os.makedirs(extracted_dir)
_make_image(os.path.join(extracted_dir, '0_Parade_marchingband_1_2.jpg'))
def _make_test_archive(root):
extracted_dir = os.path.join(root, 'WIDER_test', 'images', '0--Parade')
os.makedirs(extracted_dir)
_make_image(os.path.join(extracted_dir, '0_Parade_marchingband_1_3.jpg'))
def _make_annotations_archive(root):
train_bbox_contents = '0--Parade/0_Parade_marchingband_1_1.jpg\n1\n449 330 122 149 0 0 0 0 0 0\n'
val_bbox_contents = '0--Parade/0_Parade_marchingband_1_2.jpg\n1\n501 160 285 443 0 0 0 0 0 0\n'
test_filelist_contents = '0--Parade/0_Parade_marchingband_1_3.jpg\n'
extracted_dir = os.path.join(root, 'wider_face_split')
os.mkdir(extracted_dir)
bbox_file = os.path.join(extracted_dir, "wider_face_train_bbx_gt.txt")
with open(bbox_file, "w") as txt_file:
txt_file.write(train_bbox_contents)
bbox_file = os.path.join(extracted_dir, "wider_face_val_bbx_gt.txt")
with open(bbox_file, "w") as txt_file:
txt_file.write(val_bbox_contents)
filelist_file = os.path.join(extracted_dir, "wider_face_test_filelist.txt")
with open(filelist_file, "w") as txt_file:
txt_file.write(test_filelist_contents)
with get_tmp_dir() as root:
root_base = os.path.join(root, "widerface")
os.mkdir(root_base)
_make_train_archive(root_base)
_make_val_archive(root_base)
_make_test_archive(root_base)
_make_annotations_archive(root_base)
yield root
@contextlib.contextmanager
def places365_root(split="train-standard", small=False):
VARIANTS = {
"train-standard": "standard",
"train-challenge": "challenge",
"val": "standard",
}
DEVKITS = {
"train-standard": "filelist_places365-standard.tar",
"train-challenge": "filelist_places365-challenge.tar",
"val": "filelist_places365-standard.tar",
}
CATEGORIES = "categories_places365.txt"
FILE_LISTS = {
"train-standard": "places365_train_standard.txt",
"train-challenge": "places365_train_challenge.txt",
"val": "places365_train_standard.txt",
}
IMAGES = {
("train-standard", False): ("train_large_places365standard.tar", "data_large", "data_large_standard"),
("train-challenge", False): ("train_large_places365challenge.tar", "data_large", "data_large_challenge"),
("val", False): ("val_large.tar", "val_large", "val_large"),
("train-standard", True): ("train_256_places365standard.tar", "data_256", "data_256_standard"),
("train-challenge", True): ("train_256_places365challenge.tar", "data_256", "data_256_challenge"),
("val", True): ("val_256.tar", "val_256", "val_256"),
}
CATEGORIES_CONTENT = (("/a/airfield", 0), ("/a/apartment_building/outdoor", 8), ("/b/badlands", 30))
FILE_LIST_CONTENT = (
("Places365_val_00000001.png", 0),
*((f"{category}/Places365_train_00000001.png", idx) for category, idx in CATEGORIES_CONTENT),
)
def mock_target(attr, partial="torchvision.datasets.places365.Places365"):
return f"{partial}.{attr}"
def make_txt(root, name, seq):
file = os.path.join(root, name)
with open(file, "w") as fh:
for string, idx in seq:
fh.write(f"{string} {idx}\n")
return name, compute_md5(file)
def make_categories_txt(root, name):
return make_txt(root, name, CATEGORIES_CONTENT)
def make_file_list_txt(root, name):
return make_txt(root, name, FILE_LIST_CONTENT)
def make_image(file, size):
os.makedirs(os.path.dirname(file), exist_ok=True)
PIL.Image.fromarray(np.zeros((*size, 3), dtype=np.uint8)).save(file)
def make_devkit_archive(stack, root, split):
archive = DEVKITS[split]
files = []
meta = make_categories_txt(root, CATEGORIES)
mock_class_attribute(stack, mock_target("_CATEGORIES_META"), meta)
files.append(meta[0])
meta = {split: make_file_list_txt(root, FILE_LISTS[split])}
mock_class_attribute(stack, mock_target("_FILE_LIST_META"), meta)
files.extend([item[0] for item in meta.values()])
meta = {VARIANTS[split]: make_tar(root, archive, *files)}
mock_class_attribute(stack, mock_target("_DEVKIT_META"), meta)
def make_images_archive(stack, root, split, small):
archive, folder_default, folder_renamed = IMAGES[(split, small)]
image_size = (256, 256) if small else (512, random.randint(512, 1024))
files, idcs = zip(*FILE_LIST_CONTENT)
images = [file.lstrip("/").replace("/", os.sep) for file in files]
for image in images:
make_image(os.path.join(root, folder_default, image), image_size)
meta = {(split, small): make_tar(root, archive, folder_default)}
mock_class_attribute(stack, mock_target("_IMAGES_META"), meta)
return [(os.path.join(root, folder_renamed, image), idx) for image, idx in zip(images, idcs)]
with contextlib.ExitStack() as stack, get_tmp_dir() as root:
make_devkit_archive(stack, root, split)
class_to_idx = dict(CATEGORIES_CONTENT)
classes = list(class_to_idx.keys())
data = {"class_to_idx": class_to_idx, "classes": classes}
data["imgs"] = make_images_archive(stack, root, split, small)
clean_dir(root, ".tar$")
yield root, data
| true | true |
f7fc55a2b7b053b3b7296d141796a25fb46580bf | 144 | py | Python | gis/__init__.py | DiviPeople/villudyr | b9de79f3985787685a00d407488909da183a5ccf | [
"Apache-2.0"
] | null | null | null | gis/__init__.py | DiviPeople/villudyr | b9de79f3985787685a00d407488909da183a5ccf | [
"Apache-2.0"
] | null | null | null | gis/__init__.py | DiviPeople/villudyr | b9de79f3985787685a00d407488909da183a5ccf | [
"Apache-2.0"
] | null | null | null | """Module initializing the package."""
import os
import django
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gis.docker')
django.setup()
| 14.4 | 61 | 0.756944 |
import os
import django
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gis.docker')
django.setup()
| true | true |
f7fc56499362846a5ade9d6580121d8beb2af7f0 | 1,038 | py | Python | setup.py | mir-group/PyfileUtils | 925046a5e328ab144e8b554de9efc2655c686b0a | [
"MIT"
] | null | null | null | setup.py | mir-group/PyfileUtils | 925046a5e328ab144e8b554de9efc2655c686b0a | [
"MIT"
] | null | null | null | setup.py | mir-group/PyfileUtils | 925046a5e328ab144e8b554de9efc2655c686b0a | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from pathlib import Path
package_names = ["pyfile", "utils"]
_name = "_".join(package_names)
name = "-".join(package_names)
# see https://packaging.python.org/guides/single-sourcing-package-version/
version_dict = {}
with open(Path(__file__).parents[0] / _name / "_version.py") as fp:
exec(fp.read(), version_dict)
version = version_dict["__version__"]
del version_dict
setup(
name=name,
version=f"{version}",
author="Lixin Sun",
author_email="nw13mifaso@gmail.com",
description="A collection of utils for file write/load and instantiation",
url="https://github.com/mir-group/PyfileUtils",
python_requires=">=3.6.9",
packages=find_packages(include=[name, _name, _name + ".*"]),
install_requires=[
"numpy",
"pyyaml",
"contextvars",
],
zip_safe=True,
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
| 28.833333 | 78 | 0.659923 | from setuptools import setup, find_packages
from pathlib import Path
package_names = ["pyfile", "utils"]
_name = "_".join(package_names)
name = "-".join(package_names)
version_dict = {}
with open(Path(__file__).parents[0] / _name / "_version.py") as fp:
exec(fp.read(), version_dict)
version = version_dict["__version__"]
del version_dict
setup(
name=name,
version=f"{version}",
author="Lixin Sun",
author_email="nw13mifaso@gmail.com",
description="A collection of utils for file write/load and instantiation",
url="https://github.com/mir-group/PyfileUtils",
python_requires=">=3.6.9",
packages=find_packages(include=[name, _name, _name + ".*"]),
install_requires=[
"numpy",
"pyyaml",
"contextvars",
],
zip_safe=True,
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
| true | true |
f7fc565ac3b40a146237178344e60fd3947c3d50 | 3,693 | py | Python | pyaoscx/utils/list_attributes.py | aopdal/pyaoscx | b21adba63ace0b9e2a54f65e82b284a6d023791c | [
"Apache-2.0"
] | null | null | null | pyaoscx/utils/list_attributes.py | aopdal/pyaoscx | b21adba63ace0b9e2a54f65e82b284a6d023791c | [
"Apache-2.0"
] | null | null | null | pyaoscx/utils/list_attributes.py | aopdal/pyaoscx | b21adba63ace0b9e2a54f65e82b284a6d023791c | [
"Apache-2.0"
] | null | null | null | # (C) Copyright 2019-2022 Hewlett Packard Enterprise Development LP.
# Apache License 2.0
from pyaoscx.exceptions.generic_op_error import GenericOperationError
class ListDescriptor(list):
"""
Attribute descriptor class to keep track of a list that contains
pyaoscx_module objects simulating a Reference to a resource. If the
list changes, then every pyaoscx_module object has to be changed.
"""
def __init__(
self,
name,
):
self.name = name
def __get__(self, instance, owner):
"""
Method called when current attribute is used.
:param instance: Instance of the current Object
"""
return instance.__dict__[self.name]
def __set__(self, instance, new_list):
"""
Method called when current attribute is set.
:param instance: Instance of the current Object.
:param new_list: new list being set to current attribute object.
"""
new_list = ReferenceList(new_list)
prev_list = (
instance.__dict__[self.name]
if self.name in instance.__dict__
else None
)
# Update value inside the instance dictionary
instance.__dict__[self.name] = new_list
# Check changes and delete
if prev_list is not None and prev_list != new_list:
# Reflect changes made inside the list
for element in prev_list:
if element not in new_list:
# Delete element reference
try:
element.delete()
except AttributeError:
# Ignore
pass
class ReferenceList(list):
"""
Wrapper class for a Python List object.
Modifies remove() method to use the pyaoscx.pyaoscx_module.delete()
method when using remove on this special type list.
"""
def __init__(self, value):
list.__init__(self, value)
def __setitem__(self, key, value):
"""
Intercept the l[key]=value operations.
Also covers slice assignment.
"""
try:
_ = self.__getitem__(key)
except KeyError:
list.__setitem__(self, key, value)
else:
list.__setitem__(self, key, value)
def __delitem__(self, key):
"""
Delete self.key.
"""
_ = list.__getitem__(self, key)
list.__delitem__(self, key)
def pop(self):
"""
Remove and return item at index (default last).
"""
oldvalue = list.pop(self)
return oldvalue
def extend(self, newvalue):
"""
Extend list by appending elements from iterable.
"""
list.extend(self, newvalue)
def insert(self, i, element):
"""
Insert object before index.
"""
list.insert(self, i, element)
def remove(self, element):
"""
Remove first occurrence of value.
"""
_ = list.index(self, element)
list.remove(self, element)
try:
# Delete element with a DELETE request
element.delete()
# If delete fails because table entry
# is already deleted: IGNORE
except GenericOperationError as error:
# In case error is not 404, raise
if error.response_code != 404:
raise error
def reverse(self):
"""
Reverse *IN PLACE*.
"""
list.reverse(self)
def sort(self, cmpfunc=None):
"""
Stable sort *IN PLACE*.
"""
_ = self[:]
list.sort(self, cmpfunc)
| 27.559701 | 75 | 0.562957 |
from pyaoscx.exceptions.generic_op_error import GenericOperationError
class ListDescriptor(list):
def __init__(
self,
name,
):
self.name = name
def __get__(self, instance, owner):
return instance.__dict__[self.name]
def __set__(self, instance, new_list):
new_list = ReferenceList(new_list)
prev_list = (
instance.__dict__[self.name]
if self.name in instance.__dict__
else None
)
instance.__dict__[self.name] = new_list
if prev_list is not None and prev_list != new_list:
for element in prev_list:
if element not in new_list:
try:
element.delete()
except AttributeError:
pass
class ReferenceList(list):
def __init__(self, value):
list.__init__(self, value)
def __setitem__(self, key, value):
try:
_ = self.__getitem__(key)
except KeyError:
list.__setitem__(self, key, value)
else:
list.__setitem__(self, key, value)
def __delitem__(self, key):
_ = list.__getitem__(self, key)
list.__delitem__(self, key)
def pop(self):
oldvalue = list.pop(self)
return oldvalue
def extend(self, newvalue):
list.extend(self, newvalue)
def insert(self, i, element):
list.insert(self, i, element)
def remove(self, element):
_ = list.index(self, element)
list.remove(self, element)
try:
element.delete()
except GenericOperationError as error:
if error.response_code != 404:
raise error
def reverse(self):
list.reverse(self)
def sort(self, cmpfunc=None):
_ = self[:]
list.sort(self, cmpfunc)
| true | true |
f7fc577bc363a918f7b55dcea299ff98c58e511c | 395 | py | Python | Random Python Projects/Hypixel Skyblock/BAZAARE.py | n0rel/self | f9f44af42aa652f9a72279e44ffd8d4387a4bdae | [
"MIT"
] | null | null | null | Random Python Projects/Hypixel Skyblock/BAZAARE.py | n0rel/self | f9f44af42aa652f9a72279e44ffd8d4387a4bdae | [
"MIT"
] | null | null | null | Random Python Projects/Hypixel Skyblock/BAZAARE.py | n0rel/self | f9f44af42aa652f9a72279e44ffd8d4387a4bdae | [
"MIT"
] | null | null | null | import json, requests, datetime
item = 'HOT_POTATO_BOOK'
r = requests.get(f"https://api.hypixel.net/skyblock/bazaar/product?key=7e8355c8-a50b-4473-ba41-b03d0473a0d8&productId={item}").json()
for i in r['product_info']['week_historic']:
time = datetime.datetime.fromtimestamp(i['timestamp']/1000).strftime("%a, %H:%M")
print(f"{time} ---> Sells: {i['sells']:,} >>> Buys: {i['buys']:,}") | 49.375 | 133 | 0.681013 | import json, requests, datetime
item = 'HOT_POTATO_BOOK'
r = requests.get(f"https://api.hypixel.net/skyblock/bazaar/product?key=7e8355c8-a50b-4473-ba41-b03d0473a0d8&productId={item}").json()
for i in r['product_info']['week_historic']:
time = datetime.datetime.fromtimestamp(i['timestamp']/1000).strftime("%a, %H:%M")
print(f"{time} ---> Sells: {i['sells']:,} >>> Buys: {i['buys']:,}") | true | true |
f7fc57bb32b25b6b9c1161a740b9ac75acd0d10e | 6,967 | py | Python | utils/SwiftBuildSupport.py | YogeshBharate/Swift | a14a836caa42b1652f8f30b725370eff2ad6d799 | [
"Apache-2.0"
] | 3 | 2016-10-13T11:30:36.000Z | 2016-12-10T05:00:31.000Z | utils/SwiftBuildSupport.py | YogeshBharate/Swift | a14a836caa42b1652f8f30b725370eff2ad6d799 | [
"Apache-2.0"
] | null | null | null | utils/SwiftBuildSupport.py | YogeshBharate/Swift | a14a836caa42b1652f8f30b725370eff2ad6d799 | [
"Apache-2.0"
] | 1 | 2019-02-10T19:49:36.000Z | 2019-02-10T19:49:36.000Z | # utils/SwiftBuildSupport.py - Utilities for Swift build scripts -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from __future__ import print_function
try:
# Python 2
import ConfigParser
except ImportError:
# Python 3
import configparser as ConfigParser
import os
import pipes
import platform
import subprocess
import sys
HOME = os.environ.get("HOME", "/")
def _get_default_source_root():
result = ""
# Are we in a Swift checkout? Start from this file and check its parent
# directories.
#
# $SWIFT_SOURCE_ROOT/swift/utils/SwiftBuildSupport.py
(swift_path, parent_dirname) = os.path.split(os.path.dirname(__file__))
if parent_dirname != "utils":
return result
if not os.path.exists(os.path.join(swift_path, 'CMakeLists.txt')):
return result
result = os.path.dirname(swift_path)
# Are we in an LLVM checkout? Start from the Swift checkout and check /its/
# parent directories.
#
# $SWIFT_SOURCE_ROOT/llvm/tools/swift/utils/SwiftBuildSupport.py
(llvm_path, parent_dirname) = os.path.split(result)
if parent_dirname != "tools":
return result
if not os.path.exists(os.path.join(llvm_path, 'CMakeLists.txt')):
return result
result = os.path.dirname(llvm_path)
return result
# Set SWIFT_SOURCE_ROOT in your environment to control where the sources
# are found.
SWIFT_SOURCE_ROOT = os.environ.get(
"SWIFT_SOURCE_ROOT", _get_default_source_root())
# Set SWIFT_BUILD_ROOT to a directory that will contain a subdirectory
# for each build configuration
SWIFT_BUILD_ROOT = os.environ.get(
"SWIFT_BUILD_ROOT", os.path.join(SWIFT_SOURCE_ROOT, "build"))
def print_with_argv0(message):
print(sys.argv[0] + ": " + message)
def quote_shell_command(args):
return " ".join([pipes.quote(a) for a in args])
def check_call(args, print_command=False, verbose=False, disable_sleep=False):
if disable_sleep:
if platform.system() == 'Darwin':
# Don't mutate the caller's copy of the arguments.
args = list(args)
args.insert(0, "caffeinate")
if print_command:
print(os.getcwd() + "$ " + quote_shell_command(args))
try:
return subprocess.check_call(args)
except subprocess.CalledProcessError as e:
print_with_argv0(
"command terminated with a non-zero exit status " +
str(e.returncode) + ", aborting")
sys.stdout.flush()
sys.exit(1)
except OSError as e:
print_with_argv0(
"could not execute '" + quote_shell_command(args) +
"': " + e.strerror)
sys.stdout.flush()
sys.exit(1)
def check_output(args, print_command=False, verbose=False):
if print_command:
print(os.getcwd() + "$ " + quote_shell_command(args))
try:
return subprocess.check_output(args)
except subprocess.CalledProcessError as e:
print_with_argv0(
"command terminated with a non-zero exit status " +
str(e.returncode) + ", aborting")
sys.stdout.flush()
sys.exit(1)
except OSError as e:
print_with_argv0(
"could not execute '" + quote_shell_command(args) +
"': " + e.strerror)
sys.stdout.flush()
sys.exit(1)
def _load_preset_files_impl(preset_file_names, substitutions={}):
config = ConfigParser.SafeConfigParser(substitutions, allow_no_value=True)
if config.read(preset_file_names) == []:
print_with_argv0(
"preset file not found (tried " + str(preset_file_names) + ")")
sys.exit(1)
return config
_PRESET_PREFIX = "preset: "
def _get_preset_options_impl(config, substitutions, preset_name):
section_name = _PRESET_PREFIX + preset_name
if section_name not in config.sections():
return (None, None, None)
build_script_opts = []
build_script_impl_opts = []
missing_opts = []
dash_dash_seen = False
for o in config.options(section_name):
try:
a = config.get(section_name, o)
except ConfigParser.InterpolationMissingOptionError as e:
# e.reference contains the correctly formatted option
missing_opts.append(e.reference)
continue
if not a:
a = ""
if o in substitutions:
continue
opt = None
if o == "mixin-preset":
# Split on newlines and filter out empty lines.
mixins = filter(None, [m.strip() for m in a.splitlines()])
for mixin in mixins:
(base_build_script_opts,
base_build_script_impl_opts,
base_missing_opts) = \
_get_preset_options_impl(config, substitutions, mixin)
build_script_opts += base_build_script_opts
build_script_impl_opts += base_build_script_impl_opts
missing_opts += base_missing_opts
elif o == "dash-dash":
dash_dash_seen = True
elif a == "":
opt = "--" + o
else:
opt = "--" + o + "=" + a
if opt:
if not dash_dash_seen:
build_script_opts.append(opt)
else:
build_script_impl_opts.append(opt)
return (build_script_opts, build_script_impl_opts, missing_opts)
def get_preset_options(substitutions, preset_file_names, preset_name):
config = _load_preset_files_impl(preset_file_names, substitutions)
(build_script_opts, build_script_impl_opts, missing_opts) = \
_get_preset_options_impl(config, substitutions, preset_name)
if not build_script_opts and not build_script_impl_opts:
print_with_argv0("preset '" + preset_name + "' not found")
sys.exit(1)
if missing_opts:
print_with_argv0("missing option(s) for preset '" + preset_name +
"': " + ", ".join(missing_opts))
sys.exit(1)
return build_script_opts + ["--"] + build_script_impl_opts
def get_all_preset_names(preset_file_names):
config = _load_preset_files_impl(preset_file_names)
return [name[len(_PRESET_PREFIX):] for name in config.sections()
if name.startswith(_PRESET_PREFIX)]
# A context manager for changing the current working directory.
#
# with WorkingDirectory('/tmp'):
# ... do work in /tmp...
class WorkingDirectory(object):
def __init__(self, new_cwd):
self.new_cwd = new_cwd
def __enter__(self):
self.old_cwd = os.getcwd()
os.chdir(self.new_cwd)
def __exit__(self, type, value, traceback):
os.chdir(self.old_cwd)
| 31.524887 | 79 | 0.648486 |
from __future__ import print_function
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import os
import pipes
import platform
import subprocess
import sys
HOME = os.environ.get("HOME", "/")
def _get_default_source_root():
result = ""
(swift_path, parent_dirname) = os.path.split(os.path.dirname(__file__))
if parent_dirname != "utils":
return result
if not os.path.exists(os.path.join(swift_path, 'CMakeLists.txt')):
return result
result = os.path.dirname(swift_path)
(llvm_path, parent_dirname) = os.path.split(result)
if parent_dirname != "tools":
return result
if not os.path.exists(os.path.join(llvm_path, 'CMakeLists.txt')):
return result
result = os.path.dirname(llvm_path)
return result
SWIFT_SOURCE_ROOT = os.environ.get(
"SWIFT_SOURCE_ROOT", _get_default_source_root())
SWIFT_BUILD_ROOT = os.environ.get(
"SWIFT_BUILD_ROOT", os.path.join(SWIFT_SOURCE_ROOT, "build"))
def print_with_argv0(message):
print(sys.argv[0] + ": " + message)
def quote_shell_command(args):
return " ".join([pipes.quote(a) for a in args])
def check_call(args, print_command=False, verbose=False, disable_sleep=False):
if disable_sleep:
if platform.system() == 'Darwin':
args = list(args)
args.insert(0, "caffeinate")
if print_command:
print(os.getcwd() + "$ " + quote_shell_command(args))
try:
return subprocess.check_call(args)
except subprocess.CalledProcessError as e:
print_with_argv0(
"command terminated with a non-zero exit status " +
str(e.returncode) + ", aborting")
sys.stdout.flush()
sys.exit(1)
except OSError as e:
print_with_argv0(
"could not execute '" + quote_shell_command(args) +
"': " + e.strerror)
sys.stdout.flush()
sys.exit(1)
def check_output(args, print_command=False, verbose=False):
if print_command:
print(os.getcwd() + "$ " + quote_shell_command(args))
try:
return subprocess.check_output(args)
except subprocess.CalledProcessError as e:
print_with_argv0(
"command terminated with a non-zero exit status " +
str(e.returncode) + ", aborting")
sys.stdout.flush()
sys.exit(1)
except OSError as e:
print_with_argv0(
"could not execute '" + quote_shell_command(args) +
"': " + e.strerror)
sys.stdout.flush()
sys.exit(1)
def _load_preset_files_impl(preset_file_names, substitutions={}):
config = ConfigParser.SafeConfigParser(substitutions, allow_no_value=True)
if config.read(preset_file_names) == []:
print_with_argv0(
"preset file not found (tried " + str(preset_file_names) + ")")
sys.exit(1)
return config
_PRESET_PREFIX = "preset: "
def _get_preset_options_impl(config, substitutions, preset_name):
section_name = _PRESET_PREFIX + preset_name
if section_name not in config.sections():
return (None, None, None)
build_script_opts = []
build_script_impl_opts = []
missing_opts = []
dash_dash_seen = False
for o in config.options(section_name):
try:
a = config.get(section_name, o)
except ConfigParser.InterpolationMissingOptionError as e:
missing_opts.append(e.reference)
continue
if not a:
a = ""
if o in substitutions:
continue
opt = None
if o == "mixin-preset":
mixins = filter(None, [m.strip() for m in a.splitlines()])
for mixin in mixins:
(base_build_script_opts,
base_build_script_impl_opts,
base_missing_opts) = \
_get_preset_options_impl(config, substitutions, mixin)
build_script_opts += base_build_script_opts
build_script_impl_opts += base_build_script_impl_opts
missing_opts += base_missing_opts
elif o == "dash-dash":
dash_dash_seen = True
elif a == "":
opt = "--" + o
else:
opt = "--" + o + "=" + a
if opt:
if not dash_dash_seen:
build_script_opts.append(opt)
else:
build_script_impl_opts.append(opt)
return (build_script_opts, build_script_impl_opts, missing_opts)
def get_preset_options(substitutions, preset_file_names, preset_name):
config = _load_preset_files_impl(preset_file_names, substitutions)
(build_script_opts, build_script_impl_opts, missing_opts) = \
_get_preset_options_impl(config, substitutions, preset_name)
if not build_script_opts and not build_script_impl_opts:
print_with_argv0("preset '" + preset_name + "' not found")
sys.exit(1)
if missing_opts:
print_with_argv0("missing option(s) for preset '" + preset_name +
"': " + ", ".join(missing_opts))
sys.exit(1)
return build_script_opts + ["--"] + build_script_impl_opts
def get_all_preset_names(preset_file_names):
config = _load_preset_files_impl(preset_file_names)
return [name[len(_PRESET_PREFIX):] for name in config.sections()
if name.startswith(_PRESET_PREFIX)]
class WorkingDirectory(object):
def __init__(self, new_cwd):
self.new_cwd = new_cwd
def __enter__(self):
self.old_cwd = os.getcwd()
os.chdir(self.new_cwd)
def __exit__(self, type, value, traceback):
os.chdir(self.old_cwd)
| true | true |
f7fc57d0e520c850d41edf11f01047f9f128d125 | 63 | py | Python | archives/workflows/simple_uq/swift/junk.py | mdorier/Supervisor | f1e43b2b33fb2cf9e03ea3ac49378aba37bd9839 | [
"MIT"
] | 10 | 2017-03-14T14:36:19.000Z | 2021-01-21T00:39:36.000Z | archives/workflows/simple_uq/swift/junk.py | mdorier/Supervisor | f1e43b2b33fb2cf9e03ea3ac49378aba37bd9839 | [
"MIT"
] | 58 | 2017-03-03T21:07:53.000Z | 2021-07-19T18:51:03.000Z | archives/workflows/simple_uq/swift/junk.py | ORNL-BSEC/Supervisor | 14a73ad19b10cebab0d7d2d48e52692485957ad2 | [
"MIT"
] | 21 | 2017-03-08T16:07:47.000Z | 2020-11-24T04:23:00.000Z |
inputs = eval(permutation_sets)
training, validation = inputs
| 15.75 | 31 | 0.793651 |
inputs = eval(permutation_sets)
training, validation = inputs
| true | true |
f7fc57e78b69253de269a68cdc3bf266b165bc16 | 38,765 | py | Python | libs/utils/env.py | MIPS/external-lisa | 48024e3bdcb39528f69bb897a3aff57347535c7d | [
"Apache-2.0"
] | null | null | null | libs/utils/env.py | MIPS/external-lisa | 48024e3bdcb39528f69bb897a3aff57347535c7d | [
"Apache-2.0"
] | null | null | null | libs/utils/env.py | MIPS/external-lisa | 48024e3bdcb39528f69bb897a3aff57347535c7d | [
"Apache-2.0"
] | null | null | null | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import json
import logging
import os
import re
import shutil
import sys
import time
import unittest
import devlib
from devlib.utils.misc import memoized
from devlib import Platform, TargetError
from trappy.stats.Topology import Topology
from wlgen import RTA
from energy import EnergyMeter
from energy_model import EnergyModel
from conf import JsonConf
from platforms.juno_energy import juno_energy
from platforms.hikey_energy import hikey_energy
from platforms.pixel_energy import pixel_energy
USERNAME_DEFAULT = 'root'
PASSWORD_DEFAULT = ''
WORKING_DIR_DEFAULT = '/data/local/schedtest'
FTRACE_EVENTS_DEFAULT = ['sched:*']
FTRACE_BUFSIZE_DEFAULT = 10240
OUT_PREFIX = 'results'
LATEST_LINK = 'results_latest'
basepath = os.path.dirname(os.path.realpath(__file__))
basepath = basepath.replace('/libs/utils', '')
def os_which(file):
for path in os.environ["PATH"].split(os.pathsep):
if os.path.exists(os.path.join(path, file)):
return os.path.join(path, file)
return None
class ShareState(object):
__shared_state = {}
def __init__(self):
self.__dict__ = self.__shared_state
class TestEnv(ShareState):
"""
Represents the environment configuring LISA, the target, and the test setup
The test environment is defined by:
- a target configuration (target_conf) defining which HW platform we
want to use to run the experiments
- a test configuration (test_conf) defining which SW setups we need on
that HW target
- a folder to collect the experiments results, which can be specified
using the test_conf::results_dir option and is by default wiped from
all the previous contents (if wipe=True)
:param target_conf:
Configuration defining the target to run experiments on. May be
- A dict defining the values directly
- A path to a JSON file containing the configuration
- ``None``, in which case $LISA_HOME/target.config is used.
You need to provide the information needed to connect to the
target. For SSH targets that means "host", "username" and
either "password" or "keyfile". All other fields are optional if
the relevant features aren't needed. Has the following keys:
**host**
Target IP or MAC address for SSH access
**username**
For SSH access
**keyfile**
Path to SSH key (alternative to password)
**password**
SSH password (alternative to keyfile)
**device**
Target Android device ID if using ADB
**port**
Port for Android connection default port is 5555
**ANDROID_HOME**
Path to Android SDK. Defaults to ``$ANDROID_HOME`` from the
environment.
**rtapp-calib**
Calibration values for RT-App. If unspecified, LISA will
calibrate RT-App on the target. A message will be logged with
a value that can be copied here to avoid having to re-run
calibration on subsequent tests.
**tftp**
Directory path containing kernels and DTB images for the
target. LISA does *not* manage this TFTP server, it must be
provided externally. Optional.
:param test_conf: Configuration of software for target experiments. Takes
the same form as target_conf. Fields are:
**modules**
Devlib modules to be enabled. Default is []
**exclude_modules**
Devlib modules to be disabled. Default is [].
**tools**
List of tools (available under ./tools/$ARCH/) to install on
the target. Names, not paths (e.g. ['ftrace']). Default is [].
**ping_time**, **reboot_time**
Override parameters to :meth:`reboot` method
**__features__**
List of test environment features to enable. Options are:
"no-kernel"
do not deploy kernel/dtb images
"no-reboot"
do not force reboot the target at each configuration change
"debug"
enable debugging messages
**ftrace**
Configuration for ftrace. Dictionary with keys:
events
events to enable.
functions
functions to enable in the function tracer. Optional.
buffsize
Size of buffer. Default is 10240.
**systrace**
Configuration for systrace. Dictionary with keys:
categories:
overide the list of categories enabled
extra_categories:
append to the default list of categories
extra_events:
additional ftrace events to manually enable during systrac'ing
buffsize:
Size of ftrace buffer that systrace uses
**results_dir**
location of results of the experiments
:param wipe: set true to cleanup all previous content from the output
folder
:type wipe: bool
:param force_new: Create a new TestEnv object even if there is one available
for this session. By default, TestEnv only creates one
object per session, use this to override this behaviour.
:type force_new: bool
"""
_initialized = False
def __init__(self, target_conf=None, test_conf=None, wipe=True,
force_new=False):
super(TestEnv, self).__init__()
if self._initialized and not force_new:
return
self.conf = {}
self.test_conf = {}
self.target = None
self.ftrace = None
self.workdir = WORKING_DIR_DEFAULT
self.__installed_tools = set()
self.__modules = []
self.__connection_settings = None
self._calib = None
# Keep track of target IP and MAC address
self.ip = None
self.mac = None
# Keep track of last installed kernel
self.kernel = None
self.dtb = None
# Energy meter configuration
self.emeter = None
# The platform descriptor to be saved into the results folder
self.platform = {}
# Keep track of android support
self.LISA_HOME = os.environ.get('LISA_HOME', '/vagrant')
self.ANDROID_HOME = os.environ.get('ANDROID_HOME', None)
self.CATAPULT_HOME = os.environ.get('CATAPULT_HOME',
os.path.join(self.LISA_HOME, 'tools', 'catapult'))
# Setup logging
self._log = logging.getLogger('TestEnv')
# Compute base installation path
self._log.info('Using base path: %s', basepath)
# Setup target configuration
if isinstance(target_conf, dict):
self._log.info('Loading custom (inline) target configuration')
self.conf = target_conf
elif isinstance(target_conf, str):
self._log.info('Loading custom (file) target configuration')
self.conf = self.loadTargetConfig(target_conf)
elif target_conf is None:
self._log.info('Loading default (file) target configuration')
self.conf = self.loadTargetConfig()
self._log.debug('Target configuration %s', self.conf)
# Setup test configuration
if test_conf:
if isinstance(test_conf, dict):
self._log.info('Loading custom (inline) test configuration')
self.test_conf = test_conf
elif isinstance(test_conf, str):
self._log.info('Loading custom (file) test configuration')
self.test_conf = self.loadTargetConfig(test_conf)
else:
raise ValueError('test_conf must be either a dictionary or a filepath')
self._log.debug('Test configuration %s', self.conf)
# Setup target working directory
if 'workdir' in self.conf:
self.workdir = self.conf['workdir']
# Initialize binary tools to deploy
test_conf_tools = self.test_conf.get('tools', [])
target_conf_tools = self.conf.get('tools', [])
self.__tools = list(set(test_conf_tools + target_conf_tools))
# Initialize ftrace events
# test configuration override target one
if 'ftrace' in self.test_conf:
self.conf['ftrace'] = self.test_conf['ftrace']
if self.conf.get('ftrace'):
self.__tools.append('trace-cmd')
# Initialize features
if '__features__' not in self.conf:
self.conf['__features__'] = []
self._init()
# Initialize FTrace events collection
self._init_ftrace(True)
# Initialize RT-App calibration values
self.calibration()
# Initialize local results folder
# test configuration overrides target one
self.res_dir = (self.test_conf.get('results_dir') or
self.conf.get('results_dir'))
if self.res_dir and not os.path.isabs(self.res_dir):
self.res_dir = os.path.join(basepath, 'results', self.res_dir)
else:
self.res_dir = os.path.join(basepath, OUT_PREFIX)
self.res_dir = datetime.datetime.now()\
.strftime(self.res_dir + '/%Y%m%d_%H%M%S')
if wipe and os.path.exists(self.res_dir):
self._log.warning('Wipe previous contents of the results folder:')
self._log.warning(' %s', self.res_dir)
shutil.rmtree(self.res_dir, ignore_errors=True)
if not os.path.exists(self.res_dir):
os.makedirs(self.res_dir)
res_lnk = os.path.join(basepath, LATEST_LINK)
if os.path.islink(res_lnk):
os.remove(res_lnk)
os.symlink(self.res_dir, res_lnk)
# Initialize energy probe instrument
self._init_energy(True)
self._log.info('Set results folder to:')
self._log.info(' %s', self.res_dir)
self._log.info('Experiment results available also in:')
self._log.info(' %s', res_lnk)
self._initialized = True
def loadTargetConfig(self, filepath='target.config'):
"""
Load the target configuration from the specified file.
:param filepath: Path of the target configuration file. Relative to the
root folder of the test suite.
:type filepath: str
"""
# Loading default target configuration
conf_file = os.path.join(basepath, filepath)
self._log.info('Loading target configuration [%s]...', conf_file)
conf = JsonConf(conf_file)
conf.load()
return conf.json
def _init(self, force = False):
# Initialize target
self._init_target(force)
# Initialize target Topology for behavior analysis
CLUSTERS = []
# Build topology for a big.LITTLE systems
if self.target.big_core and \
(self.target.abi == 'arm64' or self.target.abi == 'armeabi'):
# Populate cluster for a big.LITTLE platform
if self.target.big_core:
# Load cluster of LITTLE cores
CLUSTERS.append(
[i for i,t in enumerate(self.target.core_names)
if t == self.target.little_core])
# Load cluster of big cores
CLUSTERS.append(
[i for i,t in enumerate(self.target.core_names)
if t == self.target.big_core])
# Build topology for an SMP systems
elif not self.target.big_core or \
self.target.abi == 'x86_64':
for c in set(self.target.core_clusters):
CLUSTERS.append(
[i for i,v in enumerate(self.target.core_clusters)
if v == c])
self.topology = Topology(clusters=CLUSTERS)
self._log.info('Topology:')
self._log.info(' %s', CLUSTERS)
# Initialize the platform descriptor
self._init_platform()
def _init_target(self, force = False):
if not force and self.target is not None:
return self.target
self.__connection_settings = {}
# Configure username
if 'username' in self.conf:
self.__connection_settings['username'] = self.conf['username']
else:
self.__connection_settings['username'] = USERNAME_DEFAULT
# Configure password or SSH keyfile
if 'keyfile' in self.conf:
self.__connection_settings['keyfile'] = self.conf['keyfile']
elif 'password' in self.conf:
self.__connection_settings['password'] = self.conf['password']
else:
self.__connection_settings['password'] = PASSWORD_DEFAULT
# Configure port
if 'port' in self.conf:
self.__connection_settings['port'] = self.conf['port']
# Configure the host IP/MAC address
if 'host' in self.conf:
try:
if ':' in self.conf['host']:
(self.mac, self.ip) = self.resolv_host(self.conf['host'])
else:
self.ip = self.conf['host']
self.__connection_settings['host'] = self.ip
except KeyError:
raise ValueError('Config error: missing [host] parameter')
try:
platform_type = self.conf['platform']
except KeyError:
raise ValueError('Config error: missing [platform] parameter')
if platform_type.lower() == 'android':
self.ANDROID_HOME = self.conf.get('ANDROID_HOME',
self.ANDROID_HOME)
if self.ANDROID_HOME:
self._adb = os.path.join(self.ANDROID_HOME,
'platform-tools', 'adb')
self._fastboot = os.path.join(self.ANDROID_HOME,
'platform-tools', 'fastboot')
os.environ['ANDROID_HOME'] = self.ANDROID_HOME
os.environ['CATAPULT_HOME'] = self.CATAPULT_HOME
else:
self._log.info('Android SDK not found as ANDROID_HOME not defined, using PATH for platform tools')
self._adb = os_which('adb')
self._fastboot = os_which('fastboot')
if self._adb:
self._log.info('Using adb from ' + self._adb)
if self._fastboot:
self._log.info('Using fastboot from ' + self._fastboot)
self._log.info('External tools using:')
self._log.info(' ANDROID_HOME: %s', self.ANDROID_HOME)
self._log.info(' CATAPULT_HOME: %s', self.CATAPULT_HOME)
if not os.path.exists(self._adb):
raise RuntimeError('\nADB binary not found\n\t{}\ndoes not exists!\n\n'
'Please configure ANDROID_HOME to point to '
'a valid Android SDK installation folder.'\
.format(self._adb))
########################################################################
# Board configuration
########################################################################
# Setup board default if not specified by configuration
self.nrg_model = None
platform = None
self.__modules = []
if 'board' not in self.conf:
self.conf['board'] = 'UNKNOWN'
# Initialize TC2 board
if self.conf['board'].upper() == 'TC2':
platform = devlib.platform.arm.TC2()
self.__modules = ['bl', 'hwmon', 'cpufreq']
# Initialize JUNO board
elif self.conf['board'].upper() in ('JUNO', 'JUNO2'):
platform = devlib.platform.arm.Juno()
self.nrg_model = juno_energy
self.__modules = ['bl', 'hwmon', 'cpufreq']
# Initialize OAK board
elif self.conf['board'].upper() == 'OAK':
platform = Platform(model='MT8173')
self.__modules = ['bl', 'cpufreq']
# Initialized HiKey board
elif self.conf['board'].upper() == 'HIKEY':
self.nrg_model = hikey_energy
self.__modules = [ "cpufreq", "cpuidle" ]
platform = Platform(model='hikey')
# Initialize Pixel phone
elif self.conf['board'].upper() == 'PIXEL':
self.nrg_model = pixel_energy
self.__modules = ['bl', 'cpufreq']
platform = Platform(model='pixel')
elif self.conf['board'] != 'UNKNOWN':
# Initilize from platform descriptor (if available)
board = self._load_board(self.conf['board'])
if board:
core_names=board['cores']
platform = Platform(
model=self.conf['board'],
core_names=core_names,
core_clusters = self._get_clusters(core_names),
big_core=board.get('big_core', None)
)
self.__modules=board.get('modules', [])
########################################################################
# Modules configuration
########################################################################
modules = set(self.__modules)
# Refine modules list based on target.conf
modules.update(self.conf.get('modules', []))
# Merge tests specific modules
modules.update(self.test_conf.get('modules', []))
remove_modules = set(self.conf.get('exclude_modules', []) +
self.test_conf.get('exclude_modules', []))
modules.difference_update(remove_modules)
self.__modules = list(modules)
self._log.info('Devlib modules to load: %s', self.__modules)
########################################################################
# Devlib target setup (based on target.config::platform)
########################################################################
# If the target is Android, we need just (eventually) the device
if platform_type.lower() == 'android':
self.__connection_settings = None
device = 'DEFAULT'
if 'device' in self.conf:
device = self.conf['device']
self.__connection_settings = {'device' : device}
elif 'host' in self.conf:
host = self.conf['host']
port = '5555'
if 'port' in self.conf:
port = str(self.conf['port'])
device = '{}:{}'.format(host, port)
self.__connection_settings = {'device' : device}
self._log.info('Connecting Android target [%s]', device)
else:
self._log.info('Connecting %s target:', platform_type)
for key in self.__connection_settings:
self._log.info('%10s : %s', key,
self.__connection_settings[key])
self._log.info('Connection settings:')
self._log.info(' %s', self.__connection_settings)
if platform_type.lower() == 'linux':
self._log.debug('Setup LINUX target...')
if "host" not in self.__connection_settings:
raise ValueError('Missing "host" param in Linux target conf')
self.target = devlib.LinuxTarget(
platform = platform,
connection_settings = self.__connection_settings,
load_default_modules = False,
modules = self.__modules)
elif platform_type.lower() == 'android':
self._log.debug('Setup ANDROID target...')
self.target = devlib.AndroidTarget(
platform = platform,
connection_settings = self.__connection_settings,
load_default_modules = False,
modules = self.__modules)
elif platform_type.lower() == 'host':
self._log.debug('Setup HOST target...')
self.target = devlib.LocalLinuxTarget(
platform = platform,
load_default_modules = False,
modules = self.__modules)
else:
raise ValueError('Config error: not supported [platform] type {}'\
.format(platform_type))
self._log.debug('Checking target connection...')
self._log.debug('Target info:')
self._log.debug(' ABI: %s', self.target.abi)
self._log.debug(' CPUs: %s', self.target.cpuinfo)
self._log.debug(' Clusters: %s', self.target.core_clusters)
self._log.info('Initializing target workdir:')
self._log.info(' %s', self.target.working_directory)
self.target.setup()
self.install_tools(self.__tools)
# Verify that all the required modules have been initialized
for module in self.__modules:
self._log.debug('Check for module [%s]...', module)
if not hasattr(self.target, module):
self._log.warning('Unable to initialize [%s] module', module)
self._log.error('Fix your target kernel configuration or '
'disable module from configuration')
raise RuntimeError('Failed to initialized [{}] module, '
'update your kernel or test configurations'.format(module))
if not self.nrg_model:
try:
self._log.info('Attempting to read energy model from target')
self.nrg_model = EnergyModel.from_target(self.target)
except (TargetError, RuntimeError, ValueError) as e:
self._log.error("Couldn't read target energy model: %s", e)
def install_tools(self, tools):
"""
Install tools additional to those specified in the test config 'tools'
field
:param tools: The list of names of tools to install
:type tools: list(str)
"""
tools = set(tools)
# Add tools dependencies
if 'rt-app' in tools:
tools.update(['taskset', 'trace-cmd', 'perf', 'cgroup_run_into.sh'])
# Remove duplicates and already-instaled tools
tools.difference_update(self.__installed_tools)
tools_to_install = []
for tool in tools:
binary = '{}/tools/scripts/{}'.format(basepath, tool)
if not os.path.isfile(binary):
binary = '{}/tools/{}/{}'\
.format(basepath, self.target.abi, tool)
tools_to_install.append(binary)
for tool_to_install in tools_to_install:
self.target.install(tool_to_install)
self.__installed_tools.update(tools)
def ftrace_conf(self, conf):
self._init_ftrace(True, conf)
def _init_ftrace(self, force=False, conf=None):
if not force and self.ftrace is not None:
return self.ftrace
if conf is None and 'ftrace' not in self.conf:
return None
if conf is not None:
ftrace = conf
else:
ftrace = self.conf['ftrace']
events = FTRACE_EVENTS_DEFAULT
if 'events' in ftrace:
events = ftrace['events']
functions = None
if 'functions' in ftrace:
functions = ftrace['functions']
buffsize = FTRACE_BUFSIZE_DEFAULT
if 'buffsize' in ftrace:
buffsize = ftrace['buffsize']
self.ftrace = devlib.FtraceCollector(
self.target,
events = events,
functions = functions,
buffer_size = buffsize,
autoreport = False,
autoview = False
)
if events:
self._log.info('Enabled tracepoints:')
for event in events:
self._log.info(' %s', event)
if functions:
self._log.info('Kernel functions profiled:')
for function in functions:
self._log.info(' %s', function)
return self.ftrace
def _init_energy(self, force):
# Initialize energy probe to board default
self.emeter = EnergyMeter.getInstance(self.target, self.conf, force,
self.res_dir)
def _init_platform_bl(self):
self.platform = {
'clusters' : {
'little' : self.target.bl.littles,
'big' : self.target.bl.bigs
},
'freqs' : {
'little' : self.target.bl.list_littles_frequencies(),
'big' : self.target.bl.list_bigs_frequencies()
}
}
self.platform['cpus_count'] = \
len(self.platform['clusters']['little']) + \
len(self.platform['clusters']['big'])
def _init_platform_smp(self):
self.platform = {
'clusters' : {},
'freqs' : {}
}
for cpu_id,node_id in enumerate(self.target.core_clusters):
if node_id not in self.platform['clusters']:
self.platform['clusters'][node_id] = []
self.platform['clusters'][node_id].append(cpu_id)
if 'cpufreq' in self.target.modules:
# Try loading frequencies using the cpufreq module
for cluster_id in self.platform['clusters']:
core_id = self.platform['clusters'][cluster_id][0]
self.platform['freqs'][cluster_id] = \
self.target.cpufreq.list_frequencies(core_id)
else:
self._log.warning('Unable to identify cluster frequencies')
# TODO: get the performance boundaries in case of intel_pstate driver
self.platform['cpus_count'] = len(self.target.core_clusters)
def _load_em(self, board):
em_path = os.path.join(basepath,
'libs/utils/platforms', board.lower() + '.json')
self._log.debug('Trying to load default EM from %s', em_path)
if not os.path.exists(em_path):
return None
self._log.info('Loading default EM:')
self._log.info(' %s', em_path)
board = JsonConf(em_path)
board.load()
if 'nrg_model' not in board.json:
return None
return board.json['nrg_model']
def _load_board(self, board):
board_path = os.path.join(basepath,
'libs/utils/platforms', board.lower() + '.json')
self._log.debug('Trying to load board descriptor from %s', board_path)
if not os.path.exists(board_path):
return None
self._log.info('Loading board:')
self._log.info(' %s', board_path)
board = JsonConf(board_path)
board.load()
if 'board' not in board.json:
return None
return board.json['board']
def _get_clusters(self, core_names):
idx = 0
clusters = []
ids_map = { core_names[0] : 0 }
for name in core_names:
idx = ids_map.get(name, idx+1)
ids_map[name] = idx
clusters.append(idx)
return clusters
def _init_platform(self):
if 'bl' in self.target.modules:
self._init_platform_bl()
else:
self._init_platform_smp()
# Adding energy model information
if 'nrg_model' in self.conf:
self.platform['nrg_model'] = self.conf['nrg_model']
# Try to load the default energy model (if available)
else:
self.platform['nrg_model'] = self._load_em(self.conf['board'])
# Adding topology information
self.platform['topology'] = self.topology.get_level("cluster")
# Adding kernel build information
kver = self.target.kernel_version
self.platform['kernel'] = {t: getattr(kver, t, None)
for t in [
'release', 'version',
'version_number', 'major', 'minor',
'rc', 'sha1', 'parts'
]
}
self.platform['abi'] = self.target.abi
self.platform['os'] = self.target.os
self._log.debug('Platform descriptor initialized\n%s', self.platform)
# self.platform_dump('./')
def platform_dump(self, dest_dir, dest_file='platform.json'):
plt_file = os.path.join(dest_dir, dest_file)
self._log.debug('Dump platform descriptor in [%s]', plt_file)
with open(plt_file, 'w') as ofile:
json.dump(self.platform, ofile, sort_keys=True, indent=4)
return (self.platform, plt_file)
def calibration(self, force=False):
"""
Get rt-app calibration. Run calibration on target if necessary.
:param force: Always run calibration on target, even if we have not
installed rt-app or have already run calibration.
:returns: A dict with calibration results, which can be passed as the
``calibration`` parameter to :class:`RTA`, or ``None`` if
force=False and we have not installed rt-app.
"""
if not force and self._calib:
return self._calib
required = force or 'rt-app' in self.__installed_tools
if not required:
self._log.debug('No RT-App workloads, skipping calibration')
return
if not force and 'rtapp-calib' in self.conf:
self._log.warning('Using configuration provided RTApp calibration')
self._calib = {
int(key): int(value)
for key, value in self.conf['rtapp-calib'].items()
}
else:
self._log.info('Calibrating RTApp...')
self._calib = RTA.calibrate(self.target)
self._log.info('Using RT-App calibration values:')
self._log.info(' %s',
"{" + ", ".join('"%r": %r' % (key, self._calib[key])
for key in sorted(self._calib)) + "}")
return self._calib
def resolv_host(self, host=None):
"""
Resolve a host name or IP address to a MAC address
.. TODO Is my networking terminology correct here?
:param host: IP address or host name to resolve. If None, use 'host'
value from target_config.
:type host: str
"""
if host is None:
host = self.conf['host']
# Refresh ARP for local network IPs
self._log.debug('Collecting all Bcast address')
output = os.popen(r'ifconfig').read().split('\n')
for line in output:
match = IFCFG_BCAST_RE.search(line)
if not match:
continue
baddr = match.group(1)
try:
cmd = r'nmap -T4 -sP {}/24 &>/dev/null'.format(baddr.strip())
self._log.debug(cmd)
os.popen(cmd)
except RuntimeError:
self._log.warning('Nmap not available, try IP lookup using broadcast ping')
cmd = r'ping -b -c1 {} &>/dev/null'.format(baddr)
self._log.debug(cmd)
os.popen(cmd)
return self.parse_arp_cache(host)
def parse_arp_cache(self, host):
output = os.popen(r'arp -n')
if ':' in host:
# Assuming this is a MAC address
# TODO add a suitable check on MAC address format
# Query ARP for the specified HW address
ARP_RE = re.compile(
r'([^ ]*).*({}|{})'.format(host.lower(), host.upper())
)
macaddr = host
ipaddr = None
for line in output:
match = ARP_RE.search(line)
if not match:
continue
ipaddr = match.group(1)
break
else:
# Assuming this is an IP address
# TODO add a suitable check on IP address format
# Query ARP for the specified IP address
ARP_RE = re.compile(
r'{}.*ether *([0-9a-fA-F:]*)'.format(host)
)
macaddr = None
ipaddr = host
for line in output:
match = ARP_RE.search(line)
if not match:
continue
macaddr = match.group(1)
break
else:
# When target is accessed via WiFi, there is not MAC address
# reported by arp. In these cases we can know only the IP
# of the remote target.
macaddr = 'UNKNOWN'
if not ipaddr or not macaddr:
raise ValueError('Unable to lookup for target IP/MAC address')
self._log.info('Target (%s) at IP address: %s', macaddr, ipaddr)
return (macaddr, ipaddr)
def reboot(self, reboot_time=120, ping_time=15):
"""
Reboot target.
:param boot_time: Time to wait for the target to become available after
reboot before declaring failure.
:param ping_time: Period between attempts to ping the target while
waiting for reboot.
"""
# Send remote target a reboot command
if self._feature('no-reboot'):
self._log.warning('Reboot disabled by conf features')
else:
if 'reboot_time' in self.conf:
reboot_time = int(self.conf['reboot_time'])
if 'ping_time' in self.conf:
ping_time = int(self.conf['ping_time'])
# Before rebooting make sure to have IP and MAC addresses
# of the target
(self.mac, self.ip) = self.parse_arp_cache(self.ip)
self.target.execute('sleep 2 && reboot -f &', as_root=True)
# Wait for the target to complete the reboot
self._log.info('Waiting up to %s[s] for target [%s] to reboot...',
reboot_time, self.ip)
ping_cmd = "ping -c 1 {} >/dev/null".format(self.ip)
elapsed = 0
start = time.time()
while elapsed <= reboot_time:
time.sleep(ping_time)
self._log.debug('Trying to connect to [%s] target...', self.ip)
if os.system(ping_cmd) == 0:
break
elapsed = time.time() - start
if elapsed > reboot_time:
if self.mac:
self._log.warning('target [%s] not responding to PINGs, '
'trying to resolve MAC address...',
self.ip)
(self.mac, self.ip) = self.resolv_host(self.mac)
else:
self._log.warning('target [%s] not responding to PINGs, '
'trying to continue...',
self.ip)
# Force re-initialization of all the devlib modules
force = True
# Reset the connection to the target
self._init(force)
# Initialize FTrace events collection
self._init_ftrace(force)
# Initialize energy probe instrument
self._init_energy(force)
def install_kernel(self, tc, reboot=False):
"""
Deploy kernel and DTB via TFTP, optionally rebooting
:param tc: Dicionary containing optional keys 'kernel' and 'dtb'. Values
are paths to the binaries to deploy.
:type tc: dict
:param reboot: Reboot thet target after deployment
:type reboot: bool
"""
# Default initialize the kernel/dtb settings
tc.setdefault('kernel', None)
tc.setdefault('dtb', None)
if self.kernel == tc['kernel'] and self.dtb == tc['dtb']:
return
self._log.info('Install kernel [%s] on target...', tc['kernel'])
# Install kernel/dtb via FTFP
if self._feature('no-kernel'):
self._log.warning('Kernel deploy disabled by conf features')
elif 'tftp' in self.conf:
self._log.info('Deploy kernel via TFTP...')
# Deploy kernel in TFTP folder (mandatory)
if 'kernel' not in tc or not tc['kernel']:
raise ValueError('Missing "kernel" parameter in conf: %s',
'KernelSetup', tc)
self.tftp_deploy(tc['kernel'])
# Deploy DTB in TFTP folder (if provided)
if 'dtb' not in tc or not tc['dtb']:
self._log.debug('DTB not provided, using existing one')
self._log.debug('Current conf:\n%s', tc)
self._log.warning('Using pre-installed DTB')
else:
self.tftp_deploy(tc['dtb'])
else:
raise ValueError('Kernel installation method not supported')
# Keep track of last installed kernel
self.kernel = tc['kernel']
if 'dtb' in tc:
self.dtb = tc['dtb']
if not reboot:
return
# Reboot target
self._log.info('Rebooting taget...')
self.reboot()
def tftp_deploy(self, src):
"""
.. TODO
"""
tftp = self.conf['tftp']
dst = tftp['folder']
if 'kernel' in src:
dst = os.path.join(dst, tftp['kernel'])
elif 'dtb' in src:
dst = os.path.join(dst, tftp['dtb'])
else:
dst = os.path.join(dst, os.path.basename(src))
cmd = 'cp {} {} && sync'.format(src, dst)
self._log.info('Deploy %s into %s', src, dst)
result = os.system(cmd)
if result != 0:
self._log.error('Failed to deploy image: %s', src)
raise ValueError('copy error')
def _feature(self, feature):
return feature in self.conf['__features__']
IFCFG_BCAST_RE = re.compile(
r'Bcast:(.*) '
)
# vim :set tabstop=4 shiftwidth=4 expandtab
| 37.274038 | 114 | 0.559551 |
import datetime
import json
import logging
import os
import re
import shutil
import sys
import time
import unittest
import devlib
from devlib.utils.misc import memoized
from devlib import Platform, TargetError
from trappy.stats.Topology import Topology
from wlgen import RTA
from energy import EnergyMeter
from energy_model import EnergyModel
from conf import JsonConf
from platforms.juno_energy import juno_energy
from platforms.hikey_energy import hikey_energy
from platforms.pixel_energy import pixel_energy
USERNAME_DEFAULT = 'root'
PASSWORD_DEFAULT = ''
WORKING_DIR_DEFAULT = '/data/local/schedtest'
FTRACE_EVENTS_DEFAULT = ['sched:*']
FTRACE_BUFSIZE_DEFAULT = 10240
OUT_PREFIX = 'results'
LATEST_LINK = 'results_latest'
basepath = os.path.dirname(os.path.realpath(__file__))
basepath = basepath.replace('/libs/utils', '')
def os_which(file):
for path in os.environ["PATH"].split(os.pathsep):
if os.path.exists(os.path.join(path, file)):
return os.path.join(path, file)
return None
class ShareState(object):
__shared_state = {}
def __init__(self):
self.__dict__ = self.__shared_state
class TestEnv(ShareState):
_initialized = False
def __init__(self, target_conf=None, test_conf=None, wipe=True,
force_new=False):
super(TestEnv, self).__init__()
if self._initialized and not force_new:
return
self.conf = {}
self.test_conf = {}
self.target = None
self.ftrace = None
self.workdir = WORKING_DIR_DEFAULT
self.__installed_tools = set()
self.__modules = []
self.__connection_settings = None
self._calib = None
self.ip = None
self.mac = None
self.kernel = None
self.dtb = None
self.emeter = None
self.platform = {}
self.LISA_HOME = os.environ.get('LISA_HOME', '/vagrant')
self.ANDROID_HOME = os.environ.get('ANDROID_HOME', None)
self.CATAPULT_HOME = os.environ.get('CATAPULT_HOME',
os.path.join(self.LISA_HOME, 'tools', 'catapult'))
self._log = logging.getLogger('TestEnv')
self._log.info('Using base path: %s', basepath)
if isinstance(target_conf, dict):
self._log.info('Loading custom (inline) target configuration')
self.conf = target_conf
elif isinstance(target_conf, str):
self._log.info('Loading custom (file) target configuration')
self.conf = self.loadTargetConfig(target_conf)
elif target_conf is None:
self._log.info('Loading default (file) target configuration')
self.conf = self.loadTargetConfig()
self._log.debug('Target configuration %s', self.conf)
if test_conf:
if isinstance(test_conf, dict):
self._log.info('Loading custom (inline) test configuration')
self.test_conf = test_conf
elif isinstance(test_conf, str):
self._log.info('Loading custom (file) test configuration')
self.test_conf = self.loadTargetConfig(test_conf)
else:
raise ValueError('test_conf must be either a dictionary or a filepath')
self._log.debug('Test configuration %s', self.conf)
if 'workdir' in self.conf:
self.workdir = self.conf['workdir']
test_conf_tools = self.test_conf.get('tools', [])
target_conf_tools = self.conf.get('tools', [])
self.__tools = list(set(test_conf_tools + target_conf_tools))
if 'ftrace' in self.test_conf:
self.conf['ftrace'] = self.test_conf['ftrace']
if self.conf.get('ftrace'):
self.__tools.append('trace-cmd')
if '__features__' not in self.conf:
self.conf['__features__'] = []
self._init()
self._init_ftrace(True)
self.calibration()
self.res_dir = (self.test_conf.get('results_dir') or
self.conf.get('results_dir'))
if self.res_dir and not os.path.isabs(self.res_dir):
self.res_dir = os.path.join(basepath, 'results', self.res_dir)
else:
self.res_dir = os.path.join(basepath, OUT_PREFIX)
self.res_dir = datetime.datetime.now()\
.strftime(self.res_dir + '/%Y%m%d_%H%M%S')
if wipe and os.path.exists(self.res_dir):
self._log.warning('Wipe previous contents of the results folder:')
self._log.warning(' %s', self.res_dir)
shutil.rmtree(self.res_dir, ignore_errors=True)
if not os.path.exists(self.res_dir):
os.makedirs(self.res_dir)
res_lnk = os.path.join(basepath, LATEST_LINK)
if os.path.islink(res_lnk):
os.remove(res_lnk)
os.symlink(self.res_dir, res_lnk)
self._init_energy(True)
self._log.info('Set results folder to:')
self._log.info(' %s', self.res_dir)
self._log.info('Experiment results available also in:')
self._log.info(' %s', res_lnk)
self._initialized = True
def loadTargetConfig(self, filepath='target.config'):
conf_file = os.path.join(basepath, filepath)
self._log.info('Loading target configuration [%s]...', conf_file)
conf = JsonConf(conf_file)
conf.load()
return conf.json
def _init(self, force = False):
self._init_target(force)
CLUSTERS = []
if self.target.big_core and \
(self.target.abi == 'arm64' or self.target.abi == 'armeabi'):
if self.target.big_core:
CLUSTERS.append(
[i for i,t in enumerate(self.target.core_names)
if t == self.target.little_core])
CLUSTERS.append(
[i for i,t in enumerate(self.target.core_names)
if t == self.target.big_core])
elif not self.target.big_core or \
self.target.abi == 'x86_64':
for c in set(self.target.core_clusters):
CLUSTERS.append(
[i for i,v in enumerate(self.target.core_clusters)
if v == c])
self.topology = Topology(clusters=CLUSTERS)
self._log.info('Topology:')
self._log.info(' %s', CLUSTERS)
self._init_platform()
def _init_target(self, force = False):
if not force and self.target is not None:
return self.target
self.__connection_settings = {}
if 'username' in self.conf:
self.__connection_settings['username'] = self.conf['username']
else:
self.__connection_settings['username'] = USERNAME_DEFAULT
if 'keyfile' in self.conf:
self.__connection_settings['keyfile'] = self.conf['keyfile']
elif 'password' in self.conf:
self.__connection_settings['password'] = self.conf['password']
else:
self.__connection_settings['password'] = PASSWORD_DEFAULT
if 'port' in self.conf:
self.__connection_settings['port'] = self.conf['port']
if 'host' in self.conf:
try:
if ':' in self.conf['host']:
(self.mac, self.ip) = self.resolv_host(self.conf['host'])
else:
self.ip = self.conf['host']
self.__connection_settings['host'] = self.ip
except KeyError:
raise ValueError('Config error: missing [host] parameter')
try:
platform_type = self.conf['platform']
except KeyError:
raise ValueError('Config error: missing [platform] parameter')
if platform_type.lower() == 'android':
self.ANDROID_HOME = self.conf.get('ANDROID_HOME',
self.ANDROID_HOME)
if self.ANDROID_HOME:
self._adb = os.path.join(self.ANDROID_HOME,
'platform-tools', 'adb')
self._fastboot = os.path.join(self.ANDROID_HOME,
'platform-tools', 'fastboot')
os.environ['ANDROID_HOME'] = self.ANDROID_HOME
os.environ['CATAPULT_HOME'] = self.CATAPULT_HOME
else:
self._log.info('Android SDK not found as ANDROID_HOME not defined, using PATH for platform tools')
self._adb = os_which('adb')
self._fastboot = os_which('fastboot')
if self._adb:
self._log.info('Using adb from ' + self._adb)
if self._fastboot:
self._log.info('Using fastboot from ' + self._fastboot)
self._log.info('External tools using:')
self._log.info(' ANDROID_HOME: %s', self.ANDROID_HOME)
self._log.info(' CATAPULT_HOME: %s', self.CATAPULT_HOME)
if not os.path.exists(self._adb):
raise RuntimeError('\nADB binary not found\n\t{}\ndoes not exists!\n\n'
'Please configure ANDROID_HOME to point to '
'a valid Android SDK installation folder.'\
.format(self._adb))
tch:
continue
macaddr = match.group(1)
break
else:
# When target is accessed via WiFi, there is not MAC address
# reported by arp. In these cases we can know only the IP
# of the remote target.
macaddr = 'UNKNOWN'
if not ipaddr or not macaddr:
raise ValueError('Unable to lookup for target IP/MAC address')
self._log.info('Target (%s) at IP address: %s', macaddr, ipaddr)
return (macaddr, ipaddr)
def reboot(self, reboot_time=120, ping_time=15):
# Send remote target a reboot command
if self._feature('no-reboot'):
self._log.warning('Reboot disabled by conf features')
else:
if 'reboot_time' in self.conf:
reboot_time = int(self.conf['reboot_time'])
if 'ping_time' in self.conf:
ping_time = int(self.conf['ping_time'])
# Before rebooting make sure to have IP and MAC addresses
# of the target
(self.mac, self.ip) = self.parse_arp_cache(self.ip)
self.target.execute('sleep 2 && reboot -f &', as_root=True)
# Wait for the target to complete the reboot
self._log.info('Waiting up to %s[s] for target [%s] to reboot...',
reboot_time, self.ip)
ping_cmd = "ping -c 1 {} >/dev/null".format(self.ip)
elapsed = 0
start = time.time()
while elapsed <= reboot_time:
time.sleep(ping_time)
self._log.debug('Trying to connect to [%s] target...', self.ip)
if os.system(ping_cmd) == 0:
break
elapsed = time.time() - start
if elapsed > reboot_time:
if self.mac:
self._log.warning('target [%s] not responding to PINGs, '
'trying to resolve MAC address...',
self.ip)
(self.mac, self.ip) = self.resolv_host(self.mac)
else:
self._log.warning('target [%s] not responding to PINGs, '
'trying to continue...',
self.ip)
# Force re-initialization of all the devlib modules
force = True
# Reset the connection to the target
self._init(force)
# Initialize FTrace events collection
self._init_ftrace(force)
# Initialize energy probe instrument
self._init_energy(force)
def install_kernel(self, tc, reboot=False):
# Default initialize the kernel/dtb settings
tc.setdefault('kernel', None)
tc.setdefault('dtb', None)
if self.kernel == tc['kernel'] and self.dtb == tc['dtb']:
return
self._log.info('Install kernel [%s] on target...', tc['kernel'])
# Install kernel/dtb via FTFP
if self._feature('no-kernel'):
self._log.warning('Kernel deploy disabled by conf features')
elif 'tftp' in self.conf:
self._log.info('Deploy kernel via TFTP...')
# Deploy kernel in TFTP folder (mandatory)
if 'kernel' not in tc or not tc['kernel']:
raise ValueError('Missing "kernel" parameter in conf: %s',
'KernelSetup', tc)
self.tftp_deploy(tc['kernel'])
# Deploy DTB in TFTP folder (if provided)
if 'dtb' not in tc or not tc['dtb']:
self._log.debug('DTB not provided, using existing one')
self._log.debug('Current conf:\n%s', tc)
self._log.warning('Using pre-installed DTB')
else:
self.tftp_deploy(tc['dtb'])
else:
raise ValueError('Kernel installation method not supported')
# Keep track of last installed kernel
self.kernel = tc['kernel']
if 'dtb' in tc:
self.dtb = tc['dtb']
if not reboot:
return
# Reboot target
self._log.info('Rebooting taget...')
self.reboot()
def tftp_deploy(self, src):
tftp = self.conf['tftp']
dst = tftp['folder']
if 'kernel' in src:
dst = os.path.join(dst, tftp['kernel'])
elif 'dtb' in src:
dst = os.path.join(dst, tftp['dtb'])
else:
dst = os.path.join(dst, os.path.basename(src))
cmd = 'cp {} {} && sync'.format(src, dst)
self._log.info('Deploy %s into %s', src, dst)
result = os.system(cmd)
if result != 0:
self._log.error('Failed to deploy image: %s', src)
raise ValueError('copy error')
def _feature(self, feature):
return feature in self.conf['__features__']
IFCFG_BCAST_RE = re.compile(
r'Bcast:(.*) '
)
# vim :set tabstop=4 shiftwidth=4 expandtab
| true | true |
f7fc597cf60e169ead5aa52a321aafdec4e6ab17 | 5,263 | py | Python | Lib/site-packages/pytz/lazy.py | inging44/python3 | fcd8d9d2ee54b46b757ecf34f284b4e60a43097a | [
"bzip2-1.0.6"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | Lib/site-packages/pytz/lazy.py | inging44/python3 | fcd8d9d2ee54b46b757ecf34f284b4e60a43097a | [
"bzip2-1.0.6"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | Lib/site-packages/pytz/lazy.py | inging44/python3 | fcd8d9d2ee54b46b757ecf34f284b4e60a43097a | [
"bzip2-1.0.6"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | from threading import RLock
try:
from UserDict import DictMixin
except ImportError:
from collections import Mapping as DictMixin
# With lazy loading, we might end up with multiple threads triggering
# it at the same time. We need a lock.
_fill_lock = RLock()
class LazyDict(DictMixin):
"""Dictionary populated on first use."""
data = None
def __getitem__(self, key):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return self.data[key.upper()]
def __contains__(self, key):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return key in self.data
def __iter__(self):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return iter(self.data)
def __len__(self):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return len(self.data)
def keys(self):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return self.data.keys()
class LazyList(list):
"""List populated on first use."""
_props = [
'__str__', '__repr__', '__unicode__',
'__hash__', '__sizeof__', '__cmp__',
'__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__',
'append', 'count', 'index', 'extend', 'insert', 'pop', 'remove',
'reverse', 'sort', '__add__', '__radd__', '__iadd__', '__mul__',
'__rmul__', '__imul__', '__contains__', '__len__', '__nonzero__',
'__getitem__', '__setitem__', '__delitem__', '__iter__',
'__reversed__', '__getslice__', '__setslice__', '__delslice__']
def __new__(cls, fill_iter=None):
if fill_iter is None:
return list()
# We need a new class as we will be dynamically messing with its
# methods.
class LazyList(list):
pass
fill_iter = [fill_iter]
def lazy(name):
def _lazy(self, *args, **kw):
_fill_lock.acquire()
try:
if len(fill_iter) > 0:
list.extend(self, fill_iter.pop())
for method_name in cls._props:
delattr(LazyList, method_name)
finally:
_fill_lock.release()
return getattr(list, name)(self, *args, **kw)
return _lazy
for name in cls._props:
setattr(LazyList, name, lazy(name))
new_list = LazyList()
return new_list
# Not all versions of Python declare the same magic methods.
# Filter out properties that don't exist in this version of Python
# from the list.
LazyList._props = [prop for prop in LazyList._props if hasattr(list, prop)]
class LazySet(set):
"""Set populated on first use."""
_props = (
'__str__', '__repr__', '__unicode__',
'__hash__', '__sizeof__', '__cmp__',
'__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__',
'__contains__', '__len__', '__nonzero__',
'__getitem__', '__setitem__', '__delitem__', '__iter__',
'__sub__', '__and__', '__xor__', '__or__',
'__rsub__', '__rand__', '__rxor__', '__ror__',
'__isub__', '__iand__', '__ixor__', '__ior__',
'add', 'clear', 'copy', 'difference', 'difference_update',
'discard', 'intersection', 'intersection_update', 'isdisjoint',
'issubset', 'issuperset', 'pop', 'remove',
'symmetric_difference', 'symmetric_difference_update',
'union', 'update')
def __new__(cls, fill_iter=None):
if fill_iter is None:
return set()
class LazySet(set):
pass
fill_iter = [fill_iter]
def lazy(name):
def _lazy(self, *args, **kw):
_fill_lock.acquire()
try:
if len(fill_iter) > 0:
for i in fill_iter.pop():
set.add(self, i)
for method_name in cls._props:
delattr(LazySet, method_name)
finally:
_fill_lock.release()
return getattr(set, name)(self, *args, **kw)
return _lazy
for name in cls._props:
setattr(LazySet, name, lazy(name))
new_set = LazySet()
return new_set
# Not all versions of Python declare the same magic methods.
# Filter out properties that don't exist in this version of Python
# from the list.
LazySet._props = [prop for prop in LazySet._props if hasattr(set, prop)]
| 31.142012 | 75 | 0.534296 | from threading import RLock
try:
from UserDict import DictMixin
except ImportError:
from collections import Mapping as DictMixin
_fill_lock = RLock()
class LazyDict(DictMixin):
data = None
def __getitem__(self, key):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return self.data[key.upper()]
def __contains__(self, key):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return key in self.data
def __iter__(self):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return iter(self.data)
def __len__(self):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return len(self.data)
def keys(self):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return self.data.keys()
class LazyList(list):
_props = [
'__str__', '__repr__', '__unicode__',
'__hash__', '__sizeof__', '__cmp__',
'__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__',
'append', 'count', 'index', 'extend', 'insert', 'pop', 'remove',
'reverse', 'sort', '__add__', '__radd__', '__iadd__', '__mul__',
'__rmul__', '__imul__', '__contains__', '__len__', '__nonzero__',
'__getitem__', '__setitem__', '__delitem__', '__iter__',
'__reversed__', '__getslice__', '__setslice__', '__delslice__']
def __new__(cls, fill_iter=None):
if fill_iter is None:
return list()
class LazyList(list):
pass
fill_iter = [fill_iter]
def lazy(name):
def _lazy(self, *args, **kw):
_fill_lock.acquire()
try:
if len(fill_iter) > 0:
list.extend(self, fill_iter.pop())
for method_name in cls._props:
delattr(LazyList, method_name)
finally:
_fill_lock.release()
return getattr(list, name)(self, *args, **kw)
return _lazy
for name in cls._props:
setattr(LazyList, name, lazy(name))
new_list = LazyList()
return new_list
# from the list.
LazyList._props = [prop for prop in LazyList._props if hasattr(list, prop)]
class LazySet(set):
_props = (
'__str__', '__repr__', '__unicode__',
'__hash__', '__sizeof__', '__cmp__',
'__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__',
'__contains__', '__len__', '__nonzero__',
'__getitem__', '__setitem__', '__delitem__', '__iter__',
'__sub__', '__and__', '__xor__', '__or__',
'__rsub__', '__rand__', '__rxor__', '__ror__',
'__isub__', '__iand__', '__ixor__', '__ior__',
'add', 'clear', 'copy', 'difference', 'difference_update',
'discard', 'intersection', 'intersection_update', 'isdisjoint',
'issubset', 'issuperset', 'pop', 'remove',
'symmetric_difference', 'symmetric_difference_update',
'union', 'update')
def __new__(cls, fill_iter=None):
if fill_iter is None:
return set()
class LazySet(set):
pass
fill_iter = [fill_iter]
def lazy(name):
def _lazy(self, *args, **kw):
_fill_lock.acquire()
try:
if len(fill_iter) > 0:
for i in fill_iter.pop():
set.add(self, i)
for method_name in cls._props:
delattr(LazySet, method_name)
finally:
_fill_lock.release()
return getattr(set, name)(self, *args, **kw)
return _lazy
for name in cls._props:
setattr(LazySet, name, lazy(name))
new_set = LazySet()
return new_set
# Not all versions of Python declare the same magic methods.
# Filter out properties that don't exist in this version of Python
LazySet._props = [prop for prop in LazySet._props if hasattr(set, prop)]
| true | true |
f7fc5a0780d1c180220fb33393752c42353aee2c | 4,957 | py | Python | pcd8544_fb.py | mbaser/micropython-pcd8544 | 791d4239d77b0d06192c7ab7903d81a72a53f992 | [
"MIT"
] | 46 | 2017-08-03T15:33:26.000Z | 2022-02-24T04:00:31.000Z | pcd8544_fb.py | mbaser/micropython-pcd8544 | 791d4239d77b0d06192c7ab7903d81a72a53f992 | [
"MIT"
] | 3 | 2018-08-20T23:42:26.000Z | 2021-11-01T13:07:35.000Z | pcd8544_fb.py | mcauser/micropython-pcd8544 | 791d4239d77b0d06192c7ab7903d81a72a53f992 | [
"MIT"
] | 12 | 2017-07-18T20:17:18.000Z | 2022-02-24T02:32:40.000Z | """
MicroPython Nokia 5110 PCD8544 84x48 LCD driver
https://github.com/mcauser/micropython-pcd8544
MIT License
Copyright (c) 2016-2018 Mike Causer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from micropython import const
from ustruct import pack
from utime import sleep_us
import framebuf
# Function set 0010 0xxx
FUNCTION_SET = const(0x20)
POWER_DOWN = const(0x04)
ADDRESSING_VERT = const(0x02)
EXTENDED_INSTR = const(0x01)
# Display control 0000 1x0x
DISPLAY_BLANK = const(0x08)
DISPLAY_ALL = const(0x09)
DISPLAY_NORMAL = const(0x0c)
DISPLAY_INVERSE = const(0x0d)
# Temperature control 0000 01xx
TEMP_COEFF_0 = const(0x04)
TEMP_COEFF_1 = const(0x05)
TEMP_COEFF_2 = const(0x06) # default
TEMP_COEFF_3 = const(0x07)
# Bias system 0001 0xxx
BIAS_1_100 = const(0x10)
BIAS_1_80 = const(0x11)
BIAS_1_65 = const(0x12)
BIAS_1_48 = const(0x13)
BIAS_1_40 = const(0x14) # default
BIAS_1_24 = const(0x15)
BIAS_1_18 = const(0x16)
BIAS_1_10 = const(0x17)
# Set operation voltage
SET_VOP = const(0x80)
# DDRAM addresses
COL_ADDR = const(0x80) # x pos (0~83)
BANK_ADDR = const(0x40) # y pos, in banks of 8 rows (0~5)
# Display dimensions
WIDTH = const(0x54) # 84
HEIGHT = const(0x30) # 48
class PCD8544_FB(framebuf.FrameBuffer):
def __init__(self, spi, cs, dc, rst=None):
self.spi = spi
self.cs = cs # chip enable, active LOW
self.dc = dc # data HIGH, command LOW
self.rst = rst # reset, active LOW
self.height = HEIGHT # For Writer class
self.width = WIDTH
self.cs.init(self.cs.OUT, value=1)
self.dc.init(self.dc.OUT, value=0)
if self.rst:
self.rst.init(self.rst.OUT, value=1)
self.buf = bytearray((HEIGHT // 8) * WIDTH)
super().__init__(self.buf, WIDTH, HEIGHT, framebuf.MONO_VLSB)
self.reset()
self.init()
def init(self, horizontal=True, contrast=0x3f, bias=BIAS_1_40, temp=TEMP_COEFF_2):
# power up, horizontal addressing, basic instruction set
self.fn = FUNCTION_SET
self.addressing(horizontal)
self.contrast(contrast, bias, temp)
self.cmd(DISPLAY_NORMAL)
self.clear()
def reset(self):
# issue reset impulse to reset the display
# you need to call power_on() or init() to resume
self.rst(1)
sleep_us(100)
self.rst(0)
sleep_us(100) # reset impulse has to be >100 ns and <100 ms
self.rst(1)
sleep_us(100)
def power_on(self):
self.cs(1)
self.fn &= ~POWER_DOWN
self.cmd(self.fn)
def power_off(self):
self.fn |= POWER_DOWN
self.cmd(self.fn)
def contrast(self, contrast=0x3f, bias=BIAS_1_40, temp=TEMP_COEFF_2):
for cmd in (
# extended instruction set is required to set temp, bias and vop
self.fn | EXTENDED_INSTR,
# set temperature coefficient
temp,
# set bias system (n=3 recommended mux rate 1:40/1:34)
bias,
# set contrast with operating voltage (0x00~0x7f)
# 0x00 = 3.00V, 0x3f = 6.84V, 0x7f = 10.68V
# starting at 3.06V, each bit increments voltage by 0.06V at room temperature
SET_VOP | contrast,
# revert to basic instruction set
self.fn & ~EXTENDED_INSTR):
self.cmd(cmd)
def invert(self, invert):
self.cmd(DISPLAY_INVERSE if invert else DISPLAY_NORMAL)
def clear(self):
# clear DDRAM, reset x,y position to 0,0
self.data([0] * (HEIGHT * WIDTH // 8))
self.position(0, 0)
def addressing(self, horizontal=True):
# vertical or horizontal addressing
if horizontal:
self.fn &= ~ADDRESSING_VERT
else:
self.fn |= ADDRESSING_VERT
self.cmd(self.fn)
def position(self, x, y):
# set cursor to column x (0~83), bank y (0~5)
self.cmd(COL_ADDR | x) # set x pos (0~83)
self.cmd(BANK_ADDR | y) # set y pos (0~5)
def cmd(self, command):
self.dc(0)
self.cs(0)
self.spi.write(bytearray([command]))
self.cs(1)
def data(self, data):
self.dc(1)
self.cs(0)
self.spi.write(pack('B'*len(data), *data))
self.cs(1)
def show(self):
self.data(self.buf)
| 28.988304 | 83 | 0.701231 |
from micropython import const
from ustruct import pack
from utime import sleep_us
import framebuf
FUNCTION_SET = const(0x20)
POWER_DOWN = const(0x04)
ADDRESSING_VERT = const(0x02)
EXTENDED_INSTR = const(0x01)
DISPLAY_BLANK = const(0x08)
DISPLAY_ALL = const(0x09)
DISPLAY_NORMAL = const(0x0c)
DISPLAY_INVERSE = const(0x0d)
TEMP_COEFF_0 = const(0x04)
TEMP_COEFF_1 = const(0x05)
TEMP_COEFF_2 = const(0x06)
TEMP_COEFF_3 = const(0x07)
BIAS_1_100 = const(0x10)
BIAS_1_80 = const(0x11)
BIAS_1_65 = const(0x12)
BIAS_1_48 = const(0x13)
BIAS_1_40 = const(0x14)
BIAS_1_24 = const(0x15)
BIAS_1_18 = const(0x16)
BIAS_1_10 = const(0x17)
SET_VOP = const(0x80)
COL_ADDR = const(0x80)
BANK_ADDR = const(0x40)
WIDTH = const(0x54)
HEIGHT = const(0x30)
class PCD8544_FB(framebuf.FrameBuffer):
def __init__(self, spi, cs, dc, rst=None):
self.spi = spi
self.cs = cs
self.dc = dc
self.rst = rst
self.height = HEIGHT
self.width = WIDTH
self.cs.init(self.cs.OUT, value=1)
self.dc.init(self.dc.OUT, value=0)
if self.rst:
self.rst.init(self.rst.OUT, value=1)
self.buf = bytearray((HEIGHT // 8) * WIDTH)
super().__init__(self.buf, WIDTH, HEIGHT, framebuf.MONO_VLSB)
self.reset()
self.init()
def init(self, horizontal=True, contrast=0x3f, bias=BIAS_1_40, temp=TEMP_COEFF_2):
self.fn = FUNCTION_SET
self.addressing(horizontal)
self.contrast(contrast, bias, temp)
self.cmd(DISPLAY_NORMAL)
self.clear()
def reset(self):
self.rst(1)
sleep_us(100)
self.rst(0)
sleep_us(100)
self.rst(1)
sleep_us(100)
def power_on(self):
self.cs(1)
self.fn &= ~POWER_DOWN
self.cmd(self.fn)
def power_off(self):
self.fn |= POWER_DOWN
self.cmd(self.fn)
def contrast(self, contrast=0x3f, bias=BIAS_1_40, temp=TEMP_COEFF_2):
for cmd in (
self.fn | EXTENDED_INSTR,
temp,
bias,
SET_VOP | contrast,
self.fn & ~EXTENDED_INSTR):
self.cmd(cmd)
def invert(self, invert):
self.cmd(DISPLAY_INVERSE if invert else DISPLAY_NORMAL)
def clear(self):
self.data([0] * (HEIGHT * WIDTH // 8))
self.position(0, 0)
def addressing(self, horizontal=True):
if horizontal:
self.fn &= ~ADDRESSING_VERT
else:
self.fn |= ADDRESSING_VERT
self.cmd(self.fn)
def position(self, x, y):
self.cmd(COL_ADDR | x)
self.cmd(BANK_ADDR | y)
def cmd(self, command):
self.dc(0)
self.cs(0)
self.spi.write(bytearray([command]))
self.cs(1)
def data(self, data):
self.dc(1)
self.cs(0)
self.spi.write(pack('B'*len(data), *data))
self.cs(1)
def show(self):
self.data(self.buf)
| true | true |
f7fc5a4829d2c5c3e2d1cfdd48692904f0fe3c3d | 521 | py | Python | python_algorithom/binary-search.py | Andrewpqc/Python_Ex | 6f1770f2c209d4510696bcbca92ee9f31b9d169b | [
"MIT"
] | null | null | null | python_algorithom/binary-search.py | Andrewpqc/Python_Ex | 6f1770f2c209d4510696bcbca92ee9f31b9d169b | [
"MIT"
] | null | null | null | python_algorithom/binary-search.py | Andrewpqc/Python_Ex | 6f1770f2c209d4510696bcbca92ee9f31b9d169b | [
"MIT"
] | null | null | null | """
二分查找
"""
def binary_search(list, item):
"""
list:有序列表(从小到大排列)
item:需要查找的元素
"""
low = 0
high = len(list) - 1
while low < high:
mid = (low + high) // 2
if item == list[mid]:
return mid
elif item > list[mid]: #查找值位于mid右边
low = mid + 1
else:
high = mid - 1 #查找值位于mid左边
return None #没找到则返回None
if __name__ == "__main__":
l = [1, 2, 3, 4, 5, 6]
print(binary_search(l, 3))
print(binary_search(l, 7))
| 17.965517 | 43 | 0.485605 |
def binary_search(list, item):
low = 0
high = len(list) - 1
while low < high:
mid = (low + high) // 2
if item == list[mid]:
return mid
elif item > list[mid]:
low = mid + 1
else:
high = mid - 1
return None
if __name__ == "__main__":
l = [1, 2, 3, 4, 5, 6]
print(binary_search(l, 3))
print(binary_search(l, 7))
| true | true |
f7fc5a9bde43e185bd4736081242be419b42f393 | 1,026 | py | Python | selfdrive/messaging_arne/messaging_pyx_setup.py | 1Thamer/openpilot0.6.6 | e31f5a864870d22cf62c0ef298598dbe96bfb3a0 | [
"MIT"
] | 1 | 2019-12-25T17:59:05.000Z | 2019-12-25T17:59:05.000Z | selfdrive/messaging_arne/messaging_pyx_setup.py | 1Thamer/openpilot0.6.6 | e31f5a864870d22cf62c0ef298598dbe96bfb3a0 | [
"MIT"
] | null | null | null | selfdrive/messaging_arne/messaging_pyx_setup.py | 1Thamer/openpilot0.6.6 | e31f5a864870d22cf62c0ef298598dbe96bfb3a0 | [
"MIT"
] | null | null | null | import os
import subprocess
from distutils.core import Extension, setup # pylint: disable=import-error,no-name-in-module
from Cython.Build import cythonize
from common.basedir import BASEDIR
from common.cython_hacks import BuildExtWithoutPlatformSuffix
sourcefiles = ['messaging_pyx.pyx']
extra_compile_args = ["-std=c++11"]
libraries = []
ARCH = subprocess.check_output(["uname", "-m"], encoding='utf8').rstrip() # pylint: disable=unexpected-keyword-arg
if ARCH == "aarch64":
extra_compile_args += ["-Wno-deprecated-register"]
libraries += ['gnustl_shared']
setup(name='CAN parser',
cmdclass={'build_ext': BuildExtWithoutPlatformSuffix},
ext_modules=cythonize(
Extension(
"messaging_pyx",
language="c++",
sources=sourcefiles,
extra_compile_args=extra_compile_args,
libraries=libraries,
extra_objects=[
os.path.join(BASEDIR, 'selfdrive', 'messaging_arne', 'messaging.a'),
]
)
),
nthreads=4,
)
| 29.314286 | 115 | 0.674464 | import os
import subprocess
from distutils.core import Extension, setup
from Cython.Build import cythonize
from common.basedir import BASEDIR
from common.cython_hacks import BuildExtWithoutPlatformSuffix
sourcefiles = ['messaging_pyx.pyx']
extra_compile_args = ["-std=c++11"]
libraries = []
ARCH = subprocess.check_output(["uname", "-m"], encoding='utf8').rstrip()
if ARCH == "aarch64":
extra_compile_args += ["-Wno-deprecated-register"]
libraries += ['gnustl_shared']
setup(name='CAN parser',
cmdclass={'build_ext': BuildExtWithoutPlatformSuffix},
ext_modules=cythonize(
Extension(
"messaging_pyx",
language="c++",
sources=sourcefiles,
extra_compile_args=extra_compile_args,
libraries=libraries,
extra_objects=[
os.path.join(BASEDIR, 'selfdrive', 'messaging_arne', 'messaging.a'),
]
)
),
nthreads=4,
)
| true | true |
f7fc5aae452fb92c82be02ab42e3ae71146bf451 | 969 | py | Python | 2015/Source/Day1.py | akshayupendran/AdventOfCodeInPython | 8f9ac317258e8286f11e9ccc7d654a1a16307860 | [
"MIT"
] | null | null | null | 2015/Source/Day1.py | akshayupendran/AdventOfCodeInPython | 8f9ac317258e8286f11e9ccc7d654a1a16307860 | [
"MIT"
] | null | null | null | 2015/Source/Day1.py | akshayupendran/AdventOfCodeInPython | 8f9ac317258e8286f11e9ccc7d654a1a16307860 | [
"MIT"
] | null | null | null | # Imports
import os # Need OS for checking if file exists
# Global Variables
# InputFile = None
if __name__ == '__main__':
# global InputFile
Path2File = '.\\..\\Inputs\\Day1_Input.txt'
EnablePartTwo = 0
FloorNumber = 0
CharacterPosition = 0
if os.path.exists(Path2File):
InputFile = open(Path2File, mode='r', encoding='utf-8', errors='strict')
a = InputFile.read()
for i in a:
if EnablePartTwo == 1:
CharacterPosition = CharacterPosition + 1
if i == '(':
FloorNumber = FloorNumber+1
elif i == ')':
FloorNumber = FloorNumber-1;
else:
print("Error Detected")
exit(1)
if EnablePartTwo == 1:
if FloorNumber < 0:
print(CharacterPosition)
exit(0)
print(FloorNumber)
else:
print('Could not find input file')
| 28.5 | 80 | 0.52322 |
import os
if __name__ == '__main__':
Path2File = '.\\..\\Inputs\\Day1_Input.txt'
EnablePartTwo = 0
FloorNumber = 0
CharacterPosition = 0
if os.path.exists(Path2File):
InputFile = open(Path2File, mode='r', encoding='utf-8', errors='strict')
a = InputFile.read()
for i in a:
if EnablePartTwo == 1:
CharacterPosition = CharacterPosition + 1
if i == '(':
FloorNumber = FloorNumber+1
elif i == ')':
FloorNumber = FloorNumber-1;
else:
print("Error Detected")
exit(1)
if EnablePartTwo == 1:
if FloorNumber < 0:
print(CharacterPosition)
exit(0)
print(FloorNumber)
else:
print('Could not find input file')
| true | true |
f7fc5bd172eb880e2a57ae66b2cc0fdd5f42d2bc | 610 | py | Python | homeworks/homework2/src/binomial.py | luizirber/cse891-parallel-computing | 9f14d97de655ee9175b6298421d2db69bc8352ea | [
"BSD-3-Clause"
] | 2 | 2015-06-10T20:58:59.000Z | 2015-11-05T14:51:36.000Z | homeworks/homework2/src/binomial.py | luizirber/cse891-parallel-computing | 9f14d97de655ee9175b6298421d2db69bc8352ea | [
"BSD-3-Clause"
] | null | null | null | homeworks/homework2/src/binomial.py | luizirber/cse891-parallel-computing | 9f14d97de655ee9175b6298421d2db69bc8352ea | [
"BSD-3-Clause"
] | null | null | null | import math
import sys
procs = int(sys.argv[1])
m = int(math.log(procs, 2))
sender = 0
receiver = 0
for i in range(1, m + 1):
for rank in range(procs):
sender = 0
receiver = 0
divider = 2 << (i - 1) #pow(2, i)
offset = divider >> 1 # divider / 2
if (rank % divider == 0):
receiver = 1
partner = rank + offset
elif (rank % divider == offset):
sender = 1
partner = rank - offset
if sender:
print rank, "->", partner
elif receiver:
print rank, "<-", partner
print
| 21.034483 | 43 | 0.485246 | import math
import sys
procs = int(sys.argv[1])
m = int(math.log(procs, 2))
sender = 0
receiver = 0
for i in range(1, m + 1):
for rank in range(procs):
sender = 0
receiver = 0
divider = 2 << (i - 1)
offset = divider >> 1
if (rank % divider == 0):
receiver = 1
partner = rank + offset
elif (rank % divider == offset):
sender = 1
partner = rank - offset
if sender:
print rank, "->", partner
elif receiver:
print rank, "<-", partner
print
| false | true |
f7fc5c855213598c594a98071c7bfcdb77768078 | 13,986 | py | Python | tests/python/unittest/onnx/test_node.py | MoisesHer/incubator-mxnet | 73d1b055d04a0f0f511a9cc2dd46ae2eb03a8628 | [
"BSL-1.0",
"Apache-2.0"
] | 1 | 2019-02-22T13:53:48.000Z | 2019-02-22T13:53:48.000Z | tests/python/unittest/onnx/test_node.py | MoisesHer/incubator-mxnet | 73d1b055d04a0f0f511a9cc2dd46ae2eb03a8628 | [
"BSL-1.0",
"Apache-2.0"
] | 1 | 2020-08-27T06:39:07.000Z | 2020-08-31T03:29:27.000Z | tests/python/unittest/onnx/test_node.py | MoisesHer/incubator-mxnet | 73d1b055d04a0f0f511a9cc2dd46ae2eb03a8628 | [
"BSL-1.0",
"Apache-2.0"
] | 1 | 2020-08-14T22:56:19.000Z | 2020-08-14T22:56:19.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Tests for individual operators
This module contains operator tests which currently do not exist on
ONNX backend test framework. Once we have PRs on the ONNX repo and get
those PRs merged, this file will get EOL'ed.
"""
# pylint: disable=too-many-locals,wrong-import-position,import-error
from __future__ import absolute_import
import sys
import os
import unittest
import logging
import tarfile
from collections import namedtuple
import numpy as np
import numpy.testing as npt
from onnx import checker, numpy_helper, helper, load_model
from onnx import TensorProto
from mxnet.test_utils import download
from mxnet.contrib import onnx as onnx_mxnet
import mxnet as mx
import backend
CURR_PATH = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(CURR_PATH, '../../python/unittest'))
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
def get_rnd(shape, low=-1.0, high=1.0, dtype=np.float32):
if dtype == np.float32:
return (np.random.uniform(low, high,
np.prod(shape)).reshape(shape).astype(np.float32))
elif dtype == np.int32:
return (np.random.randint(low, high,
np.prod(shape)).reshape(shape).astype(np.float32))
elif dtype == np.bool_:
return np.random.choice(a=[False, True], size=shape).astype(np.float32)
def _fix_attributes(attrs, attribute_mapping):
new_attrs = attrs
attr_modify = attribute_mapping.get('modify', {})
for k, v in attr_modify.items():
new_attrs[v] = new_attrs.pop(k, None)
attr_add = attribute_mapping.get('add', {})
for k, v in attr_add.items():
new_attrs[k] = v
attr_remove = attribute_mapping.get('remove', [])
for k in attr_remove:
if k in new_attrs:
del new_attrs[k]
return new_attrs
def forward_pass(sym, arg, aux, data_names, input_data):
""" Perform forward pass on given data
:param sym: Symbol
:param arg: Arg params
:param aux: Aux params
:param data_names: Input names (list)
:param input_data: Input data (list). If there is only one input,
pass it as a list. For example, if input is [1, 2],
pass input_data=[[1, 2]]
:return: result of forward pass
"""
data_shapes = []
data_forward = []
for idx in range(len(data_names)):
val = input_data[idx]
data_shapes.append((data_names[idx], np.shape(val)))
data_forward.append(mx.nd.array(val))
# create module
mod = mx.mod.Module(symbol=sym, data_names=data_names, context=mx.cpu(), label_names=None)
mod.bind(for_training=False, data_shapes=data_shapes, label_shapes=None)
if not arg and not aux:
mod.init_params()
else:
mod.set_params(arg_params=arg, aux_params=aux,
allow_missing=True, allow_extra=True)
# run inference
batch = namedtuple('Batch', ['data'])
mod.forward(batch(data_forward), is_train=False)
return mod.get_outputs()[0].asnumpy()
def get_input_tensors(input_data):
input_tensor = []
input_names = []
input_sym = []
for idx, ip in enumerate(input_data):
name = "input" + str(idx + 1)
input_sym.append(mx.sym.Variable(name))
input_names.append(name)
input_tensor.append(helper.make_tensor_value_info(name,
TensorProto.FLOAT, shape=np.shape(ip)))
return input_names, input_tensor, input_sym
def get_onnx_graph(testname, input_names, inputs, output_name, output_shape, attr):
outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=output_shape)]
nodes = [helper.make_node(output_name, input_names, ["output"], **attr)]
graph = helper.make_graph(nodes, testname, inputs, outputs)
model = helper.make_model(graph)
return model
class TestNode(unittest.TestCase):
""" Tests for models.
Tests are dynamically added.
Therefore edit test_models to add more tests.
"""
def test_import_export(self):
for test in test_cases:
test_name, mxnet_op, onnx_name, inputs, attrs, mxnet_specific, fix_attrs, check_value, check_shape = test
with self.subTest(test_name):
names, input_tensors, inputsym = get_input_tensors(inputs)
if inputs:
test_op = mxnet_op(*inputsym, **attrs)
mxnet_output = forward_pass(test_op, None, None, names, inputs)
outputshape = np.shape(mxnet_output)
else:
test_op = mxnet_op(**attrs)
shape = attrs.get('shape', (1,))
x = mx.nd.zeros(shape, dtype='float32')
xgrad = mx.nd.zeros(shape, dtype='float32')
exe = test_op.bind(ctx=mx.cpu(), args={'x': x}, args_grad={'x': xgrad})
mxnet_output = exe.forward(is_train=False)[0].asnumpy()
outputshape = np.shape(mxnet_output)
if mxnet_specific:
onnxmodelfile = onnx_mxnet.export_model(test_op, {}, [np.shape(ip) for ip in inputs],
np.float32,
onnx_name + ".onnx")
onnxmodel = load_model(onnxmodelfile)
else:
onnx_attrs = _fix_attributes(attrs, fix_attrs)
onnxmodel = get_onnx_graph(test_name, names, input_tensors, onnx_name, outputshape, onnx_attrs)
bkd_rep = backend.prepare(onnxmodel, operation='export', backend='mxnet')
output = bkd_rep.run(inputs)
if check_value:
npt.assert_almost_equal(output[0], mxnet_output)
if check_shape:
npt.assert_equal(output[0].shape, outputshape)
input1 = get_rnd((1, 10, 2, 3))
ipsym = mx.sym.Variable("input1")
for test in test_scalar_ops:
if test == 'Add':
outsym = 2 + ipsym
if test == "Sub":
outsym = ipsym - 2
if test == "rSub":
outsym = ipsym.__rsub__(2)
if test == "Mul":
outsym = 2 * ipsym
if test == "Div":
outsym = ipsym / 2
if test == "Pow":
outsym = ipsym ** 2
forward_op = forward_pass(outsym, None, None, ['input1'], input1)
converted_model = onnx_mxnet.export_model(outsym, {}, [np.shape(input1)], np.float32,
onnx_file_path=outsym.name + ".onnx")
sym, arg_params, aux_params = onnx_mxnet.import_model(converted_model)
result = forward_pass(sym, arg_params, aux_params, ['input1'], input1)
npt.assert_almost_equal(result, forward_op)
def test_imports(self):
for bk in ['mxnet', 'gluon']:
for test in import_test_cases:
test_name, onnx_name, inputs, np_op, attrs = test
with self.subTest(test_name):
names, input_tensors, inputsym = get_input_tensors(inputs)
np_out = [np_op(*inputs, **attrs)]
output_shape = np.shape(np_out)
onnx_model = get_onnx_graph(test_name, names, input_tensors, onnx_name, output_shape, attrs)
bkd_rep = backend.prepare(onnx_model, operation='import', backend=bk)
mxnet_out = bkd_rep.run(inputs)
npt.assert_almost_equal(np_out, mxnet_out, decimal=4)
def test_exports(self):
input_shape = (2,1,3,1)
for test in export_test_cases:
test_name, onnx_name, mx_op, attrs = test
input_sym = mx.sym.var('data')
outsym = mx_op(input_sym, **attrs)
converted_model = onnx_mxnet.export_model(outsym, {}, [input_shape], np.float32,
onnx_file_path=outsym.name + ".onnx")
model = load_model(converted_model)
checker.check_model(model)
# test_case = ("test_case_name", mxnet op, "ONNX_op_name", [input_list], attribute map, MXNet_specific=True/False,
# fix_attributes = {'modify': {mxnet_attr_name: onnx_attr_name},
# 'remove': [attr_name],
# 'add': {attr_name: value},
# check_value=True/False, check_shape=True/False)
test_cases = [
("test_equal", mx.sym.broadcast_equal, "Equal", [get_rnd((1, 3, 4, 5)), get_rnd((1, 5))], {}, False, {}, True,
False),
("test_greater", mx.sym.broadcast_greater, "Greater", [get_rnd((1, 3, 4, 5)), get_rnd((1, 5))], {}, False, {}, True,
False),
("test_less", mx.sym.broadcast_lesser, "Less", [get_rnd((1, 3, 4, 5)), get_rnd((1, 5))], {}, False, {}, True,
False),
("test_and", mx.sym.broadcast_logical_and, "And",
[get_rnd((3, 4, 5), dtype=np.bool_), get_rnd((3, 4, 5), dtype=np.bool_)], {}, False, {}, True, False),
("test_xor", mx.sym.broadcast_logical_xor, "Xor",
[get_rnd((3, 4, 5), dtype=np.bool_), get_rnd((3, 4, 5), dtype=np.bool_)], {}, False, {}, True, False),
("test_or", mx.sym.broadcast_logical_or, "Or",
[get_rnd((3, 4, 5), dtype=np.bool_), get_rnd((3, 4, 5), dtype=np.bool_)], {}, False, {}, True, False),
("test_not", mx.sym.logical_not, "Not", [get_rnd((3, 4, 5), dtype=np.bool_)], {}, False, {}, True, False),
("test_square", mx.sym.square, "Pow", [get_rnd((2, 3), dtype=np.int32)], {}, True, {}, True, False),
("test_spacetodepth", mx.sym.space_to_depth, "SpaceToDepth", [get_rnd((1, 1, 4, 6))],
{'block_size': 2}, False, {}, True, False),
("test_softmax", mx.sym.SoftmaxOutput, "Softmax", [get_rnd((1000, 1000)), get_rnd(1000)],
{'ignore_label': 0, 'use_ignore': False}, True, {}, True, False),
("test_logistic_regression", mx.sym.LogisticRegressionOutput, "Sigmoid",
[get_rnd((1000, 1000)), get_rnd((1000, 1000))], {}, True, {}, True, False),
("test_fullyconnected", mx.sym.FullyConnected, "Gemm", [get_rnd((4, 3)), get_rnd((4, 3)), get_rnd(4)],
{'num_hidden': 4, 'name': 'FC'}, True, {}, True, False),
("test_lppool1", mx.sym.Pooling, "LpPool", [get_rnd((2, 3, 20, 20))],
{'kernel': (4, 5), 'pad': (0, 0), 'stride': (1, 1), 'p_value': 1, 'pool_type': 'lp'}, False,
{'modify': {'kernel': 'kernel_shape', 'pad': 'pads', 'stride': 'strides', 'p_value': 'p'},
'remove': ['pool_type']}, True, False),
("test_lppool2", mx.sym.Pooling, "LpPool", [get_rnd((2, 3, 20, 20))],
{'kernel': (4, 5), 'pad': (0, 0), 'stride': (1, 1), 'p_value': 2, 'pool_type': 'lp'}, False,
{'modify': {'kernel': 'kernel_shape', 'pad': 'pads', 'stride': 'strides', 'p_value': 'p'},
'remove': ['pool_type']}, True, False),
("test_globallppool1", mx.sym.Pooling, "GlobalLpPool", [get_rnd((2, 3, 20, 20))],
{'kernel': (4, 5), 'pad': (0, 0), 'stride': (1, 1), 'p_value': 1, 'pool_type': 'lp', 'global_pool': True}, False,
{'modify': {'p_value': 'p'},
'remove': ['pool_type', 'kernel', 'pad', 'stride', 'global_pool']}, True, False),
("test_globallppool2", mx.sym.Pooling, "GlobalLpPool", [get_rnd((2, 3, 20, 20))],
{'kernel': (4, 5), 'pad': (0, 0), 'stride': (1, 1), 'p_value': 2, 'pool_type': 'lp', 'global_pool': True}, False,
{'modify': {'p_value': 'p'},
'remove': ['pool_type', 'kernel', 'pad', 'stride', 'global_pool']}, True, False),
("test_roipool", mx.sym.ROIPooling, "MaxRoiPool",
[[[get_rnd(shape=(8, 6), low=1, high=100, dtype=np.int32)]], [[0, 0, 0, 4, 4]]],
{'pooled_size': (2, 2), 'spatial_scale': 0.7}, False,
{'modify': {'pooled_size': 'pooled_shape'}}, True, False),
# since results would be random, checking for shape alone
("test_multinomial", mx.sym.sample_multinomial, "Multinomial",
[np.array([0, 0.1, 0.2, 0.3, 0.4]).astype("float32")],
{'shape': (10,)}, False, {'modify': {'shape': 'sample_size'}}, False, True),
("test_random_normal", mx.sym.random_normal, "RandomNormal", [],
{'shape': (2, 2), 'loc': 0, 'scale': 1}, False, {'modify': {'loc': 'mean'}}, False, True),
("test_random_uniform", mx.sym.random_uniform, "RandomUniform", [],
{'shape': (2, 2), 'low': 0.5, 'high': 1.0}, False, {}, False, True)
]
test_scalar_ops = ['Add', 'Sub', 'rSub' 'Mul', 'Div', 'Pow']
# test_case = ("test_case_name", "ONNX_op_name", [input_list], np_op, attribute map)
import_test_cases = [
("test_lpnormalization_default", "LpNormalization", [get_rnd([5, 3, 3, 2])], np.linalg.norm, {'ord':2, 'axis':-1}),
("test_lpnormalization_ord1", "LpNormalization", [get_rnd([5, 3, 3, 2])], np.linalg.norm, {'ord':1, 'axis':-1}),
("test_lpnormalization_ord2", "LpNormalization", [get_rnd([5, 3, 3, 2])], np.linalg.norm, {'ord':2, 'axis':1})
]
# test_case = ("test_case_name", "ONNX_op_name", mxnet_op, attribute map)
export_test_cases = [
("test_expand", "Expand", mx.sym.broadcast_to, {'shape': (2,1,3,1)}),
("test_tile", "Tile", mx.sym.tile, {'reps': (2,3)})
]
if __name__ == '__main__':
unittest.main()
| 46.932886 | 120 | 0.600887 |
from __future__ import absolute_import
import sys
import os
import unittest
import logging
import tarfile
from collections import namedtuple
import numpy as np
import numpy.testing as npt
from onnx import checker, numpy_helper, helper, load_model
from onnx import TensorProto
from mxnet.test_utils import download
from mxnet.contrib import onnx as onnx_mxnet
import mxnet as mx
import backend
CURR_PATH = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(CURR_PATH, '../../python/unittest'))
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
def get_rnd(shape, low=-1.0, high=1.0, dtype=np.float32):
if dtype == np.float32:
return (np.random.uniform(low, high,
np.prod(shape)).reshape(shape).astype(np.float32))
elif dtype == np.int32:
return (np.random.randint(low, high,
np.prod(shape)).reshape(shape).astype(np.float32))
elif dtype == np.bool_:
return np.random.choice(a=[False, True], size=shape).astype(np.float32)
def _fix_attributes(attrs, attribute_mapping):
new_attrs = attrs
attr_modify = attribute_mapping.get('modify', {})
for k, v in attr_modify.items():
new_attrs[v] = new_attrs.pop(k, None)
attr_add = attribute_mapping.get('add', {})
for k, v in attr_add.items():
new_attrs[k] = v
attr_remove = attribute_mapping.get('remove', [])
for k in attr_remove:
if k in new_attrs:
del new_attrs[k]
return new_attrs
def forward_pass(sym, arg, aux, data_names, input_data):
data_shapes = []
data_forward = []
for idx in range(len(data_names)):
val = input_data[idx]
data_shapes.append((data_names[idx], np.shape(val)))
data_forward.append(mx.nd.array(val))
mod = mx.mod.Module(symbol=sym, data_names=data_names, context=mx.cpu(), label_names=None)
mod.bind(for_training=False, data_shapes=data_shapes, label_shapes=None)
if not arg and not aux:
mod.init_params()
else:
mod.set_params(arg_params=arg, aux_params=aux,
allow_missing=True, allow_extra=True)
batch = namedtuple('Batch', ['data'])
mod.forward(batch(data_forward), is_train=False)
return mod.get_outputs()[0].asnumpy()
def get_input_tensors(input_data):
input_tensor = []
input_names = []
input_sym = []
for idx, ip in enumerate(input_data):
name = "input" + str(idx + 1)
input_sym.append(mx.sym.Variable(name))
input_names.append(name)
input_tensor.append(helper.make_tensor_value_info(name,
TensorProto.FLOAT, shape=np.shape(ip)))
return input_names, input_tensor, input_sym
def get_onnx_graph(testname, input_names, inputs, output_name, output_shape, attr):
outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=output_shape)]
nodes = [helper.make_node(output_name, input_names, ["output"], **attr)]
graph = helper.make_graph(nodes, testname, inputs, outputs)
model = helper.make_model(graph)
return model
class TestNode(unittest.TestCase):
def test_import_export(self):
for test in test_cases:
test_name, mxnet_op, onnx_name, inputs, attrs, mxnet_specific, fix_attrs, check_value, check_shape = test
with self.subTest(test_name):
names, input_tensors, inputsym = get_input_tensors(inputs)
if inputs:
test_op = mxnet_op(*inputsym, **attrs)
mxnet_output = forward_pass(test_op, None, None, names, inputs)
outputshape = np.shape(mxnet_output)
else:
test_op = mxnet_op(**attrs)
shape = attrs.get('shape', (1,))
x = mx.nd.zeros(shape, dtype='float32')
xgrad = mx.nd.zeros(shape, dtype='float32')
exe = test_op.bind(ctx=mx.cpu(), args={'x': x}, args_grad={'x': xgrad})
mxnet_output = exe.forward(is_train=False)[0].asnumpy()
outputshape = np.shape(mxnet_output)
if mxnet_specific:
onnxmodelfile = onnx_mxnet.export_model(test_op, {}, [np.shape(ip) for ip in inputs],
np.float32,
onnx_name + ".onnx")
onnxmodel = load_model(onnxmodelfile)
else:
onnx_attrs = _fix_attributes(attrs, fix_attrs)
onnxmodel = get_onnx_graph(test_name, names, input_tensors, onnx_name, outputshape, onnx_attrs)
bkd_rep = backend.prepare(onnxmodel, operation='export', backend='mxnet')
output = bkd_rep.run(inputs)
if check_value:
npt.assert_almost_equal(output[0], mxnet_output)
if check_shape:
npt.assert_equal(output[0].shape, outputshape)
input1 = get_rnd((1, 10, 2, 3))
ipsym = mx.sym.Variable("input1")
for test in test_scalar_ops:
if test == 'Add':
outsym = 2 + ipsym
if test == "Sub":
outsym = ipsym - 2
if test == "rSub":
outsym = ipsym.__rsub__(2)
if test == "Mul":
outsym = 2 * ipsym
if test == "Div":
outsym = ipsym / 2
if test == "Pow":
outsym = ipsym ** 2
forward_op = forward_pass(outsym, None, None, ['input1'], input1)
converted_model = onnx_mxnet.export_model(outsym, {}, [np.shape(input1)], np.float32,
onnx_file_path=outsym.name + ".onnx")
sym, arg_params, aux_params = onnx_mxnet.import_model(converted_model)
result = forward_pass(sym, arg_params, aux_params, ['input1'], input1)
npt.assert_almost_equal(result, forward_op)
def test_imports(self):
for bk in ['mxnet', 'gluon']:
for test in import_test_cases:
test_name, onnx_name, inputs, np_op, attrs = test
with self.subTest(test_name):
names, input_tensors, inputsym = get_input_tensors(inputs)
np_out = [np_op(*inputs, **attrs)]
output_shape = np.shape(np_out)
onnx_model = get_onnx_graph(test_name, names, input_tensors, onnx_name, output_shape, attrs)
bkd_rep = backend.prepare(onnx_model, operation='import', backend=bk)
mxnet_out = bkd_rep.run(inputs)
npt.assert_almost_equal(np_out, mxnet_out, decimal=4)
def test_exports(self):
input_shape = (2,1,3,1)
for test in export_test_cases:
test_name, onnx_name, mx_op, attrs = test
input_sym = mx.sym.var('data')
outsym = mx_op(input_sym, **attrs)
converted_model = onnx_mxnet.export_model(outsym, {}, [input_shape], np.float32,
onnx_file_path=outsym.name + ".onnx")
model = load_model(converted_model)
checker.check_model(model)
test_cases = [
("test_equal", mx.sym.broadcast_equal, "Equal", [get_rnd((1, 3, 4, 5)), get_rnd((1, 5))], {}, False, {}, True,
False),
("test_greater", mx.sym.broadcast_greater, "Greater", [get_rnd((1, 3, 4, 5)), get_rnd((1, 5))], {}, False, {}, True,
False),
("test_less", mx.sym.broadcast_lesser, "Less", [get_rnd((1, 3, 4, 5)), get_rnd((1, 5))], {}, False, {}, True,
False),
("test_and", mx.sym.broadcast_logical_and, "And",
[get_rnd((3, 4, 5), dtype=np.bool_), get_rnd((3, 4, 5), dtype=np.bool_)], {}, False, {}, True, False),
("test_xor", mx.sym.broadcast_logical_xor, "Xor",
[get_rnd((3, 4, 5), dtype=np.bool_), get_rnd((3, 4, 5), dtype=np.bool_)], {}, False, {}, True, False),
("test_or", mx.sym.broadcast_logical_or, "Or",
[get_rnd((3, 4, 5), dtype=np.bool_), get_rnd((3, 4, 5), dtype=np.bool_)], {}, False, {}, True, False),
("test_not", mx.sym.logical_not, "Not", [get_rnd((3, 4, 5), dtype=np.bool_)], {}, False, {}, True, False),
("test_square", mx.sym.square, "Pow", [get_rnd((2, 3), dtype=np.int32)], {}, True, {}, True, False),
("test_spacetodepth", mx.sym.space_to_depth, "SpaceToDepth", [get_rnd((1, 1, 4, 6))],
{'block_size': 2}, False, {}, True, False),
("test_softmax", mx.sym.SoftmaxOutput, "Softmax", [get_rnd((1000, 1000)), get_rnd(1000)],
{'ignore_label': 0, 'use_ignore': False}, True, {}, True, False),
("test_logistic_regression", mx.sym.LogisticRegressionOutput, "Sigmoid",
[get_rnd((1000, 1000)), get_rnd((1000, 1000))], {}, True, {}, True, False),
("test_fullyconnected", mx.sym.FullyConnected, "Gemm", [get_rnd((4, 3)), get_rnd((4, 3)), get_rnd(4)],
{'num_hidden': 4, 'name': 'FC'}, True, {}, True, False),
("test_lppool1", mx.sym.Pooling, "LpPool", [get_rnd((2, 3, 20, 20))],
{'kernel': (4, 5), 'pad': (0, 0), 'stride': (1, 1), 'p_value': 1, 'pool_type': 'lp'}, False,
{'modify': {'kernel': 'kernel_shape', 'pad': 'pads', 'stride': 'strides', 'p_value': 'p'},
'remove': ['pool_type']}, True, False),
("test_lppool2", mx.sym.Pooling, "LpPool", [get_rnd((2, 3, 20, 20))],
{'kernel': (4, 5), 'pad': (0, 0), 'stride': (1, 1), 'p_value': 2, 'pool_type': 'lp'}, False,
{'modify': {'kernel': 'kernel_shape', 'pad': 'pads', 'stride': 'strides', 'p_value': 'p'},
'remove': ['pool_type']}, True, False),
("test_globallppool1", mx.sym.Pooling, "GlobalLpPool", [get_rnd((2, 3, 20, 20))],
{'kernel': (4, 5), 'pad': (0, 0), 'stride': (1, 1), 'p_value': 1, 'pool_type': 'lp', 'global_pool': True}, False,
{'modify': {'p_value': 'p'},
'remove': ['pool_type', 'kernel', 'pad', 'stride', 'global_pool']}, True, False),
("test_globallppool2", mx.sym.Pooling, "GlobalLpPool", [get_rnd((2, 3, 20, 20))],
{'kernel': (4, 5), 'pad': (0, 0), 'stride': (1, 1), 'p_value': 2, 'pool_type': 'lp', 'global_pool': True}, False,
{'modify': {'p_value': 'p'},
'remove': ['pool_type', 'kernel', 'pad', 'stride', 'global_pool']}, True, False),
("test_roipool", mx.sym.ROIPooling, "MaxRoiPool",
[[[get_rnd(shape=(8, 6), low=1, high=100, dtype=np.int32)]], [[0, 0, 0, 4, 4]]],
{'pooled_size': (2, 2), 'spatial_scale': 0.7}, False,
{'modify': {'pooled_size': 'pooled_shape'}}, True, False),
("test_multinomial", mx.sym.sample_multinomial, "Multinomial",
[np.array([0, 0.1, 0.2, 0.3, 0.4]).astype("float32")],
{'shape': (10,)}, False, {'modify': {'shape': 'sample_size'}}, False, True),
("test_random_normal", mx.sym.random_normal, "RandomNormal", [],
{'shape': (2, 2), 'loc': 0, 'scale': 1}, False, {'modify': {'loc': 'mean'}}, False, True),
("test_random_uniform", mx.sym.random_uniform, "RandomUniform", [],
{'shape': (2, 2), 'low': 0.5, 'high': 1.0}, False, {}, False, True)
]
test_scalar_ops = ['Add', 'Sub', 'rSub' 'Mul', 'Div', 'Pow']
import_test_cases = [
("test_lpnormalization_default", "LpNormalization", [get_rnd([5, 3, 3, 2])], np.linalg.norm, {'ord':2, 'axis':-1}),
("test_lpnormalization_ord1", "LpNormalization", [get_rnd([5, 3, 3, 2])], np.linalg.norm, {'ord':1, 'axis':-1}),
("test_lpnormalization_ord2", "LpNormalization", [get_rnd([5, 3, 3, 2])], np.linalg.norm, {'ord':2, 'axis':1})
]
export_test_cases = [
("test_expand", "Expand", mx.sym.broadcast_to, {'shape': (2,1,3,1)}),
("test_tile", "Tile", mx.sym.tile, {'reps': (2,3)})
]
if __name__ == '__main__':
unittest.main()
| true | true |
f7fc5ecd94ea31dbd97c3848c46a2891c0fb0bee | 5,034 | py | Python | IPython/core/tests/test_oinspect.py | dchichkov/ipython | 8096bb8640ee7e7c5ebdf3f428fe69cd390e1cd4 | [
"BSD-3-Clause-Clear"
] | 1 | 2015-01-09T21:10:58.000Z | 2015-01-09T21:10:58.000Z | IPython/core/tests/test_oinspect.py | dchichkov/ipython | 8096bb8640ee7e7c5ebdf3f428fe69cd390e1cd4 | [
"BSD-3-Clause-Clear"
] | 3 | 2015-04-01T13:14:57.000Z | 2015-05-26T16:01:37.000Z | IPython/core/tests/test_oinspect.py | dchichkov/ipython | 8096bb8640ee7e7c5ebdf3f428fe69cd390e1cd4 | [
"BSD-3-Clause-Clear"
] | 1 | 2015-05-17T14:14:26.000Z | 2015-05-17T14:14:26.000Z | """Tests for the object inspection functionality.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team.
#
# Distributed under the terms of the BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
# Third-party imports
import nose.tools as nt
# Our own imports
from .. import oinspect
from IPython.utils import py3compat
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
inspector = oinspect.Inspector()
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
# A few generic objects we can then inspect in the tests below
class Call(object):
"""This is the class docstring."""
def __init__(self, x, y=1):
"""This is the constructor docstring."""
def __call__(self, *a, **kw):
"""This is the call docstring."""
def method(self, x, z=2):
"""Some method's docstring"""
class OldStyle:
"""An old-style class for testing."""
pass
def f(x, y=2, *a, **kw):
"""A simple function."""
def g(y, z=3, *a, **kw):
pass # no docstring
def check_calltip(obj, name, call, docstring):
"""Generic check pattern all calltip tests will use"""
info = inspector.info(obj, name)
call_line, ds = oinspect.call_tip(info)
nt.assert_equal(call_line, call)
nt.assert_equal(ds, docstring)
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def test_calltip_class():
check_calltip(Call, 'Call', 'Call(x, y=1)', Call.__init__.__doc__)
def test_calltip_instance():
c = Call(1)
check_calltip(c, 'c', 'c(*a, **kw)', c.__call__.__doc__)
def test_calltip_method():
c = Call(1)
check_calltip(c.method, 'c.method', 'c.method(x, z=2)', c.method.__doc__)
def test_calltip_function():
check_calltip(f, 'f', 'f(x, y=2, *a, **kw)', f.__doc__)
def test_calltip_function2():
check_calltip(g, 'g', 'g(y, z=3, *a, **kw)', '<no docstring>')
def test_calltip_builtin():
check_calltip(sum, 'sum', None, sum.__doc__)
def test_info():
"Check that Inspector.info fills out various fields as expected."
i = inspector.info(Call, oname='Call')
nt.assert_equal(i['type_name'], 'type')
expted_class = str(type(type)) # <class 'type'> (Python 3) or <type 'type'>
nt.assert_equal(i['base_class'], expted_class)
nt.assert_equal(i['string_form'], "<class 'IPython.core.tests.test_oinspect.Call'>")
fname = __file__
if fname.endswith(".pyc"):
fname = fname[:-1]
# case-insensitive comparison needed on some filesystems
# e.g. Windows:
nt.assert_equal(i['file'].lower(), fname.lower())
nt.assert_equal(i['definition'], 'Call(self, *a, **kw)\n')
nt.assert_equal(i['docstring'], Call.__doc__)
nt.assert_equal(i['source'], None)
nt.assert_true(i['isclass'])
nt.assert_equal(i['init_definition'], "Call(self, x, y=1)\n")
nt.assert_equal(i['init_docstring'], Call.__init__.__doc__)
i = inspector.info(Call, detail_level=1)
nt.assert_not_equal(i['source'], None)
nt.assert_equal(i['docstring'], None)
c = Call(1)
c.__doc__ = "Modified instance docstring"
i = inspector.info(c)
nt.assert_equal(i['type_name'], 'Call')
nt.assert_equal(i['docstring'], "Modified instance docstring")
nt.assert_equal(i['class_docstring'], Call.__doc__)
nt.assert_equal(i['init_docstring'], Call.__init__.__doc__)
nt.assert_equal(i['call_docstring'], c.__call__.__doc__)
# Test old-style classes, which for example may not have an __init__ method.
if not py3compat.PY3:
i = inspector.info(OldStyle)
nt.assert_equal(i['type_name'], 'classobj')
i = inspector.info(OldStyle())
nt.assert_equal(i['type_name'], 'instance')
nt.assert_equal(i['docstring'], OldStyle.__doc__)
def test_getdoc():
class A(object):
"""standard docstring"""
pass
class B(object):
"""standard docstring"""
def getdoc(self):
return "custom docstring"
class C(object):
"""standard docstring"""
def getdoc(self):
return None
a = A()
b = B()
c = C()
nt.assert_equal(oinspect.getdoc(a), "standard docstring")
nt.assert_equal(oinspect.getdoc(b), "custom docstring")
nt.assert_equal(oinspect.getdoc(c), "standard docstring")
| 31.4625 | 88 | 0.547874 |
from __future__ import print_function
import nose.tools as nt
from .. import oinspect
from IPython.utils import py3compat
inspector = oinspect.Inspector()
class Call(object):
def __init__(self, x, y=1):
def __call__(self, *a, **kw):
def method(self, x, z=2):
class OldStyle:
pass
def f(x, y=2, *a, **kw):
def g(y, z=3, *a, **kw):
pass
def check_calltip(obj, name, call, docstring):
info = inspector.info(obj, name)
call_line, ds = oinspect.call_tip(info)
nt.assert_equal(call_line, call)
nt.assert_equal(ds, docstring)
def test_calltip_class():
check_calltip(Call, 'Call', 'Call(x, y=1)', Call.__init__.__doc__)
def test_calltip_instance():
c = Call(1)
check_calltip(c, 'c', 'c(*a, **kw)', c.__call__.__doc__)
def test_calltip_method():
c = Call(1)
check_calltip(c.method, 'c.method', 'c.method(x, z=2)', c.method.__doc__)
def test_calltip_function():
check_calltip(f, 'f', 'f(x, y=2, *a, **kw)', f.__doc__)
def test_calltip_function2():
check_calltip(g, 'g', 'g(y, z=3, *a, **kw)', '<no docstring>')
def test_calltip_builtin():
check_calltip(sum, 'sum', None, sum.__doc__)
def test_info():
i = inspector.info(Call, oname='Call')
nt.assert_equal(i['type_name'], 'type')
expted_class = str(type(type))
nt.assert_equal(i['base_class'], expted_class)
nt.assert_equal(i['string_form'], "<class 'IPython.core.tests.test_oinspect.Call'>")
fname = __file__
if fname.endswith(".pyc"):
fname = fname[:-1]
nt.assert_equal(i['file'].lower(), fname.lower())
nt.assert_equal(i['definition'], 'Call(self, *a, **kw)\n')
nt.assert_equal(i['docstring'], Call.__doc__)
nt.assert_equal(i['source'], None)
nt.assert_true(i['isclass'])
nt.assert_equal(i['init_definition'], "Call(self, x, y=1)\n")
nt.assert_equal(i['init_docstring'], Call.__init__.__doc__)
i = inspector.info(Call, detail_level=1)
nt.assert_not_equal(i['source'], None)
nt.assert_equal(i['docstring'], None)
c = Call(1)
c.__doc__ = "Modified instance docstring"
i = inspector.info(c)
nt.assert_equal(i['type_name'], 'Call')
nt.assert_equal(i['docstring'], "Modified instance docstring")
nt.assert_equal(i['class_docstring'], Call.__doc__)
nt.assert_equal(i['init_docstring'], Call.__init__.__doc__)
nt.assert_equal(i['call_docstring'], c.__call__.__doc__)
if not py3compat.PY3:
i = inspector.info(OldStyle)
nt.assert_equal(i['type_name'], 'classobj')
i = inspector.info(OldStyle())
nt.assert_equal(i['type_name'], 'instance')
nt.assert_equal(i['docstring'], OldStyle.__doc__)
def test_getdoc():
class A(object):
pass
class B(object):
def getdoc(self):
return "custom docstring"
class C(object):
def getdoc(self):
return None
a = A()
b = B()
c = C()
nt.assert_equal(oinspect.getdoc(a), "standard docstring")
nt.assert_equal(oinspect.getdoc(b), "custom docstring")
nt.assert_equal(oinspect.getdoc(c), "standard docstring")
| true | true |
f7fc5f0ef70cd12d59ed2a98fd5202d893e4080d | 14,949 | py | Python | lib/roi_data_layer/roibatchLoader.py | Yoo-Youngjae/One-Shot-Object-Detection | c560a3dfb042776854bb928682dbbf545e2cd1bf | [
"MIT"
] | null | null | null | lib/roi_data_layer/roibatchLoader.py | Yoo-Youngjae/One-Shot-Object-Detection | c560a3dfb042776854bb928682dbbf545e2cd1bf | [
"MIT"
] | null | null | null | lib/roi_data_layer/roibatchLoader.py | Yoo-Youngjae/One-Shot-Object-Detection | c560a3dfb042776854bb928682dbbf545e2cd1bf | [
"MIT"
] | null | null | null |
"""The data layer used during training to train a Fast R-CNN network.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
from PIL import Image
import torch
from collections import Counter
from scipy.misc import imread
from model.utils.config import cfg
from roi_data_layer.minibatch import get_minibatch, get_minibatch
from model.utils.blob import prep_im_for_blob, im_list_to_blob, crop
from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes
import numpy as np
import cv2
import random
import time
import pdb
class roibatchLoader(data.Dataset):
def __init__(self, roidb, ratio_list, ratio_index, query, batch_size, num_classes, training=True, normalize=None, seen=True):
self._roidb = roidb
self._query = query
self._num_classes = num_classes
# we make the height of image consistent to trim_height, trim_width
self.trim_height = cfg.TRAIN.TRIM_HEIGHT
self.trim_width = cfg.TRAIN.TRIM_WIDTH
self.max_num_box = cfg.MAX_NUM_GT_BOXES
self.training = training
self.normalize = normalize
self.ratio_list = ratio_list
self.query_position = 0
if training:
self.ratio_index = ratio_index
else:
self.cat_list = ratio_index[1]
self.ratio_index = ratio_index[0]
self.batch_size = batch_size
self.data_size = len(self.ratio_list)
# given the ratio_list, we want to make the ratio same for each batch.
self.ratio_list_batch = torch.Tensor(self.data_size).zero_()
num_batch = int(np.ceil(len(ratio_index) / batch_size))
if self.training:
for i in range(num_batch):
left_idx = i*batch_size
right_idx = min((i+1)*batch_size-1, self.data_size-1)
if ratio_list[right_idx] < 1:
# for ratio < 1, we preserve the leftmost in each batch.
target_ratio = ratio_list[left_idx]
elif ratio_list[left_idx] > 1:
# for ratio > 1, we preserve the rightmost in each batch.
target_ratio = ratio_list[right_idx]
else:
# for ratio cross 1, we make it to be 1.
target_ratio = 1
self.ratio_list_batch[left_idx:(right_idx+1)] = target_ratio
# self._cat_ids = [
# 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13,
# 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
# 24, 25, 27, 28, 31, 32, 33, 34, 35, 36,
# 37, 38, 39, 40, 41, 42, 43, 44, 46, 47,
# 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
# 58, 59, 60, 61, 62, 63, 64, 65, 67, 70,
# 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
# 82, 84, 85, 86, 87, 88, 89, 90
# ]
# self._cat_ids = [27, 28, 31, 32, 33, 37, 39, 40, 44, 46, 47, 48, 49, 50, 51, 52, 53, 55, 57, 72, 73, 74, 75, 76, 77,
# 78, 80, 84, 85, 86, 87, 88, 89, 90]
self._cat_ids = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33, 34]
# num of cat == 34
# todo: Check changed code is acceptable
# todo 26 == backpack, blank is not skipped. so we have to rearrange the whold numbers!!
self._classes = {
ind + 1: cat_id for ind, cat_id in enumerate(self._cat_ids)
}
self._classes_inv = {
value: key for key, value in self._classes.items()
}
self.filter(seen)
self.probability()
def __getitem__(self, index):
index_ratio = int(self.ratio_index[index])
# get the anchor index for current sample index
# here we set the anchor index to the last one
# sample in this group
minibatch_db = [self._roidb[index_ratio]]
blobs = get_minibatch(minibatch_db , self._num_classes)
blobs['gt_boxes'] = [x for x in blobs['gt_boxes'] if x[-1] in self.list_ind]
blobs['gt_boxes'] = np.array(blobs['gt_boxes'])
if self.training:
# Random choice query catgory
try:
catgory = blobs['gt_boxes'][:,-1]
except:
print('minibatch_db', minibatch_db, "blobs['gt_boxes']", blobs['gt_boxes'])
exit()
cand = np.unique(catgory)
if len(cand)==1:
choice = cand[0]
else:
p = []
for i in cand:
p.append(self.show_time[i])
p = np.array(p)
p /= p.sum()
choice = np.random.choice(cand,1,p=p)[0]
# Delete useless gt_boxes
blobs['gt_boxes'][:,-1] = np.where(blobs['gt_boxes'][:,-1]==choice,1,0)
# Get query image
query = self.load_query(choice)
else:
query = self.load_query(index, minibatch_db[0]['img_id'])
data = torch.from_numpy(blobs['data'])
query = torch.from_numpy(query)
query = query.permute(0, 3, 1, 2).contiguous().squeeze(0)
im_info = torch.from_numpy(blobs['im_info'])
# we need to random shuffle the bounding box.
data_height, data_width = data.size(1), data.size(2)
if self.training:
np.random.shuffle(blobs['gt_boxes'])
gt_boxes = torch.from_numpy(blobs['gt_boxes'])
########################################################
# padding the input image to fixed size for each group #
########################################################
# NOTE1: need to cope with the case where a group cover both conditions. (done)
# NOTE2: need to consider the situation for the tail samples. (no worry)
# NOTE3: need to implement a parallel data loader. (no worry)
# get the index range
# if the image need to crop, crop to the target size.
ratio = self.ratio_list_batch[index]
if self._roidb[index_ratio]['need_crop']:
if ratio < 1:
# this means that data_width << data_height, we need to crop the
# data_height
min_y = int(torch.min(gt_boxes[:,1]))
max_y = int(torch.max(gt_boxes[:,3]))
trim_size = int(np.floor(data_width / ratio))
if trim_size > data_height:
trim_size = data_height
box_region = max_y - min_y + 1
if min_y == 0:
y_s = 0
else:
if (box_region-trim_size) < 0:
y_s_min = max(max_y-trim_size, 0)
y_s_max = min(min_y, data_height-trim_size)
if y_s_min == y_s_max:
y_s = y_s_min
else:
y_s = np.random.choice(range(y_s_min, y_s_max))
else:
y_s_add = int((box_region-trim_size)/2)
if y_s_add == 0:
y_s = min_y
else:
y_s = np.random.choice(range(min_y, min_y+y_s_add))
# crop the image
data = data[:, y_s:(y_s + trim_size), :, :]
# shift y coordiante of gt_boxes
gt_boxes[:, 1] = gt_boxes[:, 1] - float(y_s)
gt_boxes[:, 3] = gt_boxes[:, 3] - float(y_s)
# update gt bounding box according the trip
gt_boxes[:, 1].clamp_(0, trim_size - 1)
gt_boxes[:, 3].clamp_(0, trim_size - 1)
else:
# this means that data_width >> data_height, we need to crop the
# data_width
min_x = int(torch.min(gt_boxes[:,0]))
max_x = int(torch.max(gt_boxes[:,2]))
trim_size = int(np.ceil(data_height * ratio))
if trim_size > data_width:
trim_size = data_width
box_region = max_x - min_x + 1
if min_x == 0:
x_s = 0
else:
if (box_region-trim_size) < 0:
x_s_min = max(max_x-trim_size, 0)
x_s_max = min(min_x, data_width-trim_size)
if x_s_min == x_s_max:
x_s = x_s_min
else:
x_s = np.random.choice(range(x_s_min, x_s_max))
else:
x_s_add = int((box_region-trim_size)/2)
if x_s_add == 0:
x_s = min_x
else:
x_s = np.random.choice(range(min_x, min_x+x_s_add))
# crop the image
data = data[:, :, x_s:(x_s + trim_size), :]
# shift x coordiante of gt_boxes
gt_boxes[:, 0] = gt_boxes[:, 0] - float(x_s)
gt_boxes[:, 2] = gt_boxes[:, 2] - float(x_s)
# update gt bounding box according the trip
gt_boxes[:, 0].clamp_(0, trim_size - 1)
gt_boxes[:, 2].clamp_(0, trim_size - 1)
# based on the ratio, padding the image.
if ratio < 1:
# this means that data_width < data_height
trim_size = int(np.floor(data_width / ratio))
padding_data = torch.FloatTensor(int(np.ceil(data_width / ratio)), \
data_width, 3).zero_()
padding_data[:data_height, :, :] = data[0]
# update im_info
im_info[0, 0] = padding_data.size(0)
# print("height %d %d \n" %(index, anchor_idx))
elif ratio > 1:
# this means that data_width > data_height
# if the image need to crop.
padding_data = torch.FloatTensor(data_height, \
int(np.ceil(data_height * ratio)), 3).zero_()
padding_data[:, :data_width, :] = data[0]
im_info[0, 1] = padding_data.size(1)
else:
trim_size = min(data_height, data_width)
padding_data = torch.FloatTensor(trim_size, trim_size, 3).zero_()
padding_data = data[0][:trim_size, :trim_size, :]
# gt_boxes.clamp_(0, trim_size)
gt_boxes[:, :4].clamp_(0, trim_size)
im_info[0, 0] = trim_size
im_info[0, 1] = trim_size
# check the bounding box:
not_keep = (gt_boxes[:,0] == gt_boxes[:,2]) | (gt_boxes[:,1] == gt_boxes[:,3])
# not_keep = (gt_boxes[:,2] - gt_boxes[:,0]) < 10
# print(not_keep)
# not_keep = (gt_boxes[:,2] - gt_boxes[:,0]) < torch.FloatTensor([10]) | (gt_boxes[:,3] - gt_boxes[:,1]) < torch.FloatTensor([10])
keep = torch.nonzero(not_keep == 0).view(-1)
gt_boxes_padding = torch.FloatTensor(self.max_num_box, gt_boxes.size(1)).zero_()
if keep.numel() != 0 :
gt_boxes = gt_boxes[keep]
num_boxes = min(gt_boxes.size(0), self.max_num_box)
gt_boxes_padding[:num_boxes,:] = gt_boxes[:num_boxes]
else:
num_boxes = 0
# permute trim_data to adapt to downstream processing
padding_data = padding_data.permute(2, 0, 1).contiguous()
im_info = im_info.view(3)
return padding_data, query, im_info, gt_boxes_padding, num_boxes
else:
data = data.permute(0, 3, 1, 2).contiguous().view(3, data_height, data_width)
im_info = im_info.view(3)
# gt_boxes = torch.FloatTensor([1,1,1,1,1])
gt_boxes = torch.from_numpy(blobs['gt_boxes'])
choice = self.cat_list[index]
return data, query, im_info, gt_boxes, choice
def load_query(self, choice, id=0):
if self.training:
# Random choice query catgory image
all_data = self._query[choice]
# data = random.choice(all_data)
# todo: check changed code is acceptable.
while True:
data = random.choice(all_data)
if int(data['boxes'][1]) == int(data['boxes'][3]) or int(data['boxes'][0]) == int(data['boxes'][2]):
continue
else:
break
else:
# Take out the purpose category for testing
catgory = self.cat_list[choice]
# list all the candidate image
all_data = self._query[catgory]
# Use image_id to determine the random seed
# The list l is candidate sequence, which random by image_id
random.seed(id)
l = list(range(len(all_data)))
random.shuffle(l)
# choose the candidate sequence and take out the data information
position=l[self.query_position%len(l)]
data = all_data[position]
# Get image
path = data['image_path']
im = imread(path)
# todo: check changed code is acceptable.
# check_zero = True
# while check_zero:
# path = data['image_path']
# im = imread(path)
# if 0 not in im.shape[0:3]:
# check_zero = False
# break
# elif 0 in im.shape[0:3]:
# data = random.choice(all_data)
if len(im.shape) == 2:
im = im[:,:,np.newaxis]
im = np.concatenate((im,im,im), axis=2)
im = crop(im, data['boxes'], cfg.TRAIN.query_size)
# flip the channel, since the original one using cv2
# rgb -> bgr
# im = im[:,:,::-1]
if random.randint(0,99)/100 > 0.5 and self.training:
im = im[:, ::-1, :]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, cfg.TRAIN.query_size,
cfg.TRAIN.MAX_SIZE)
query = im_list_to_blob([im])
return query
def __len__(self):
return len(self.ratio_index)
def filter(self, seen):
if seen==1:
self.list = cfg.train_categories
# Group number to class
if len(self.list)==1:
self.list = [self._classes[cat] for cat in range(1,len(self._classes)+1) if cat%4 != self.list[0]]
# len(self._classes)
elif seen==2:
self.list = cfg.test_categories
# Group number to class
if len(self.list)==1:
self.list = [self._classes[cat] for cat in range(1,len(self._classes)+1) if cat%4 == self.list[0]]
elif seen==3:
self.list = cfg.train_categories + cfg.test_categories
# Group number to class
if len(self.list)==2:
self.list = [self._classes[cat] for cat in range(1,len(self._classes)+1)]
self.list_ind = [self._classes_inv[x] for x in self.list]
def probability(self):
show_time = {}
for i in self.list_ind:
show_time[i] = 0
for roi in self._roidb:
result = Counter(roi['gt_classes'])
for t in result:
if t in self.list_ind:
show_time[t] += result[t]
for i in self.list_ind:
show_time[i] = 1/show_time[i]
sum_prob = sum(show_time.values())
for i in self.list_ind:
show_time[i] = show_time[i]/sum_prob
self.show_time = show_time
| 37.466165 | 138 | 0.545455 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
from PIL import Image
import torch
from collections import Counter
from scipy.misc import imread
from model.utils.config import cfg
from roi_data_layer.minibatch import get_minibatch, get_minibatch
from model.utils.blob import prep_im_for_blob, im_list_to_blob, crop
from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes
import numpy as np
import cv2
import random
import time
import pdb
class roibatchLoader(data.Dataset):
def __init__(self, roidb, ratio_list, ratio_index, query, batch_size, num_classes, training=True, normalize=None, seen=True):
self._roidb = roidb
self._query = query
self._num_classes = num_classes
self.trim_height = cfg.TRAIN.TRIM_HEIGHT
self.trim_width = cfg.TRAIN.TRIM_WIDTH
self.max_num_box = cfg.MAX_NUM_GT_BOXES
self.training = training
self.normalize = normalize
self.ratio_list = ratio_list
self.query_position = 0
if training:
self.ratio_index = ratio_index
else:
self.cat_list = ratio_index[1]
self.ratio_index = ratio_index[0]
self.batch_size = batch_size
self.data_size = len(self.ratio_list)
self.ratio_list_batch = torch.Tensor(self.data_size).zero_()
num_batch = int(np.ceil(len(ratio_index) / batch_size))
if self.training:
for i in range(num_batch):
left_idx = i*batch_size
right_idx = min((i+1)*batch_size-1, self.data_size-1)
if ratio_list[right_idx] < 1:
target_ratio = ratio_list[left_idx]
elif ratio_list[left_idx] > 1:
target_ratio = ratio_list[right_idx]
else:
target_ratio = 1
self.ratio_list_batch[left_idx:(right_idx+1)] = target_ratio
self._cat_ids = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33, 34]
self._classes = {
ind + 1: cat_id for ind, cat_id in enumerate(self._cat_ids)
}
self._classes_inv = {
value: key for key, value in self._classes.items()
}
self.filter(seen)
self.probability()
def __getitem__(self, index):
index_ratio = int(self.ratio_index[index])
minibatch_db = [self._roidb[index_ratio]]
blobs = get_minibatch(minibatch_db , self._num_classes)
blobs['gt_boxes'] = [x for x in blobs['gt_boxes'] if x[-1] in self.list_ind]
blobs['gt_boxes'] = np.array(blobs['gt_boxes'])
if self.training:
try:
catgory = blobs['gt_boxes'][:,-1]
except:
print('minibatch_db', minibatch_db, "blobs['gt_boxes']", blobs['gt_boxes'])
exit()
cand = np.unique(catgory)
if len(cand)==1:
choice = cand[0]
else:
p = []
for i in cand:
p.append(self.show_time[i])
p = np.array(p)
p /= p.sum()
choice = np.random.choice(cand,1,p=p)[0]
blobs['gt_boxes'][:,-1] = np.where(blobs['gt_boxes'][:,-1]==choice,1,0)
query = self.load_query(choice)
else:
query = self.load_query(index, minibatch_db[0]['img_id'])
data = torch.from_numpy(blobs['data'])
query = torch.from_numpy(query)
query = query.permute(0, 3, 1, 2).contiguous().squeeze(0)
im_info = torch.from_numpy(blobs['im_info'])
data_height, data_width = data.size(1), data.size(2)
if self.training:
np.random.shuffle(blobs['gt_boxes'])
gt_boxes = torch.from_numpy(blobs['gt_boxes'])
if ratio < 1:
trim_size = int(np.floor(data_width / ratio))
padding_data = torch.FloatTensor(int(np.ceil(data_width / ratio)), \
data_width, 3).zero_()
padding_data[:data_height, :, :] = data[0]
im_info[0, 0] = padding_data.size(0)
elif ratio > 1:
padding_data = torch.FloatTensor(data_height, \
int(np.ceil(data_height * ratio)), 3).zero_()
padding_data[:, :data_width, :] = data[0]
im_info[0, 1] = padding_data.size(1)
else:
trim_size = min(data_height, data_width)
padding_data = torch.FloatTensor(trim_size, trim_size, 3).zero_()
padding_data = data[0][:trim_size, :trim_size, :]
gt_boxes[:, :4].clamp_(0, trim_size)
im_info[0, 0] = trim_size
im_info[0, 1] = trim_size
not_keep = (gt_boxes[:,0] == gt_boxes[:,2]) | (gt_boxes[:,1] == gt_boxes[:,3])
keep = torch.nonzero(not_keep == 0).view(-1)
gt_boxes_padding = torch.FloatTensor(self.max_num_box, gt_boxes.size(1)).zero_()
if keep.numel() != 0 :
gt_boxes = gt_boxes[keep]
num_boxes = min(gt_boxes.size(0), self.max_num_box)
gt_boxes_padding[:num_boxes,:] = gt_boxes[:num_boxes]
else:
num_boxes = 0
padding_data = padding_data.permute(2, 0, 1).contiguous()
im_info = im_info.view(3)
return padding_data, query, im_info, gt_boxes_padding, num_boxes
else:
data = data.permute(0, 3, 1, 2).contiguous().view(3, data_height, data_width)
im_info = im_info.view(3)
gt_boxes = torch.from_numpy(blobs['gt_boxes'])
choice = self.cat_list[index]
return data, query, im_info, gt_boxes, choice
def load_query(self, choice, id=0):
if self.training:
all_data = self._query[choice]
while True:
data = random.choice(all_data)
if int(data['boxes'][1]) == int(data['boxes'][3]) or int(data['boxes'][0]) == int(data['boxes'][2]):
continue
else:
break
else:
catgory = self.cat_list[choice]
all_data = self._query[catgory]
random.seed(id)
l = list(range(len(all_data)))
random.shuffle(l)
position=l[self.query_position%len(l)]
data = all_data[position]
path = data['image_path']
im = imread(path)
if len(im.shape) == 2:
im = im[:,:,np.newaxis]
im = np.concatenate((im,im,im), axis=2)
im = crop(im, data['boxes'], cfg.TRAIN.query_size)
if random.randint(0,99)/100 > 0.5 and self.training:
im = im[:, ::-1, :]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, cfg.TRAIN.query_size,
cfg.TRAIN.MAX_SIZE)
query = im_list_to_blob([im])
return query
def __len__(self):
return len(self.ratio_index)
def filter(self, seen):
if seen==1:
self.list = cfg.train_categories
if len(self.list)==1:
self.list = [self._classes[cat] for cat in range(1,len(self._classes)+1) if cat%4 != self.list[0]]
elif seen==2:
self.list = cfg.test_categories
if len(self.list)==1:
self.list = [self._classes[cat] for cat in range(1,len(self._classes)+1) if cat%4 == self.list[0]]
elif seen==3:
self.list = cfg.train_categories + cfg.test_categories
if len(self.list)==2:
self.list = [self._classes[cat] for cat in range(1,len(self._classes)+1)]
self.list_ind = [self._classes_inv[x] for x in self.list]
def probability(self):
show_time = {}
for i in self.list_ind:
show_time[i] = 0
for roi in self._roidb:
result = Counter(roi['gt_classes'])
for t in result:
if t in self.list_ind:
show_time[t] += result[t]
for i in self.list_ind:
show_time[i] = 1/show_time[i]
sum_prob = sum(show_time.values())
for i in self.list_ind:
show_time[i] = show_time[i]/sum_prob
self.show_time = show_time
| true | true |
f7fc60173d989e7edf88fe61822fd317c7a6c21c | 20,488 | py | Python | custom_components/spacex/sensor.py | QziP22/HomeAssistantConfig | 6b05fa20c8267222dc66f90e94f03f5d865c57a9 | [
"Unlicense"
] | 136 | 2019-06-27T08:11:47.000Z | 2022-03-11T12:26:53.000Z | custom_components/spacex/sensor.py | QziP22/HomeAssistantConfig | 6b05fa20c8267222dc66f90e94f03f5d865c57a9 | [
"Unlicense"
] | 5 | 2020-05-30T00:19:22.000Z | 2022-03-25T18:49:47.000Z | custom_components/spacex/sensor.py | QziP22/HomeAssistantConfig | 6b05fa20c8267222dc66f90e94f03f5d865c57a9 | [
"Unlicense"
] | 63 | 2019-07-15T21:11:58.000Z | 2022-03-13T09:43:24.000Z | """Definition and setup of the SpaceX Binary Sensors for Home Assistant."""
import logging
import time
import datetime
from homeassistant.util.dt import as_local, utc_from_timestamp
from homeassistant.components.sensor import ENTITY_ID_FORMAT, DEVICE_CLASS_TIMESTAMP
from homeassistant.const import LENGTH_KILOMETERS, SPEED_KILOMETERS_PER_HOUR, ATTR_NAME
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from . import SpaceXUpdateCoordinator
from .const import ATTR_IDENTIFIERS, ATTR_MANUFACTURER, ATTR_MODEL, DOMAIN, COORDINATOR
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the sensor platforms."""
coordinator = hass.data[DOMAIN][entry.entry_id][COORDINATOR]
sensors = []
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Mission",
"spacex_next_launch_mission",
"mdi:information-outline",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Day",
"spacex_next_launch_day",
"mdi:calendar",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Time",
"spacex_next_launch_time",
"mdi:clock-outline",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Countdown",
"spacex_next_launch_countdown",
"mdi:clock-outline",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Site",
"spacex_next_launch_site",
"mdi:map-marker",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Rocket",
"spacex_next_launch_rocket",
"mdi:rocket",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Payload",
"spacex_next_launch_payload",
"mdi:package",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Confirmed Launch Day",
"spacex_next_confirmed_launch_day",
"mdi:calendar",
"spacexlaunch"
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Confirmed Launch Time",
"spacex_next_confirmed_launch_time",
"mdi:clock-outline",
"spacexlaunch"
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Mission",
"spacex_latest_launch_mission",
"mdi:information-outline",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Day",
"spacex_latest_launch_day",
"mdi:calendar",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Time",
"spacex_latest_launch_time",
"mdi:clock-outline",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Site",
"spacex_latest_launch_site",
"mdi:map-marker",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Rocket",
"spacex_latest_launch_rocket",
"mdi:rocket",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Payload",
"spacex_latest_launch_payload",
"mdi:package",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Starman Speed",
"spacex_starman_speed",
"mdi:account-star",
"spacexstarman",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Starman Distance",
"spacex_starman_distance",
"mdi:map-marker-distance",
"spacexstarman",
)
)
async_add_entities(sensors)
class SpaceXSensor(CoordinatorEntity):
"""Defines a SpaceX Binary sensor."""
def __init__(
self,
coordinator: SpaceXUpdateCoordinator,
name: str,
entity_id: str,
icon:str,
device_identifier:str,
):
"""Initialize Entities."""
super().__init__(coordinator=coordinator)
self._name = name
self._unique_id = f"spacex_{entity_id}"
self._state = None
self._icon = icon
self._kind = entity_id
self._device_identifier = device_identifier
self._unit_of_measure = None
self.attrs = {}
if self._kind == "spacex_starman_speed":
self._unit_of_measure = SPEED_KILOMETERS_PER_HOUR
elif self._kind == "spacex_starman_distance":
self._unit_of_measure = LENGTH_KILOMETERS
@property
def unique_id(self):
"""Return the unique Home Assistant friendly identifier for this entity."""
return self._unique_id
@property
def name(self):
"""Return the friendly name of this entity."""
return self._name
@property
def icon(self):
"""Return the icon for this entity."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the unit of measurement for this entity."""
return self._unit_of_measure
@property
def device_state_attributes(self):
"""Return the attributes."""
coordinator_data = self.coordinator.data
starman_data = coordinator_data["starman"]
launch_data = coordinator_data["next_launch"]
latest_launch_data = coordinator_data["latest_launch"]
if self._kind == "spacex_next_launch_mission":
self.attrs["mission_patch"] = launch_data["links"].get("patch",{}).get("large")
if launch_data.get("details"):
self.attrs["details"] = launch_data["details"][0:255]
if len(launch_data["details"]) > 255:
self.attrs["details2"] = launch_data["details"][255:510]
else:
self.attrs["details2"] = ""
if len(launch_data["details"]) > 510:
self.attrs["details3"] = launch_data["details"][510:765]
else:
self.attrs["details3"] = ""
self.attrs["video_link"] = launch_data["links"].get("webcast")
elif self._kind == "spacex_next_launch_day":
self.attrs["launch_date_unix"] = launch_data["date_unix"]
self.attrs["launch_date_utc"] = launch_data["date_utc"]
elif self._kind == "spacex_next_launch_time":
self.attrs["launch_time_24h"] = self._state = as_local(utc_from_timestamp(
launch_data["date_unix"]
)).strftime("%H:%M")
elif self._kind == "spacex_next_launch_countdown":
if launch_data["tbd"]:
self.attrs["t0_countdown"] = "NA"
else:
t0_countdown = int(launch_data["date_unix"]) - int(time.time())
day = t0_countdown // (24 * 3600)
t0_countdown = t0_countdown % (24 * 3600)
hour = t0_countdown // 3600
t0_countdown %= 3600
minutes = t0_countdown // 60
t0_countdown %= 60
seconds = t0_countdown
countdown_string = ""
if day > 0:
countdown_string = f"{day} days, "
if hour > 0:
countdown_string = f"{countdown_string}{hour} hours, "
if minutes > 0:
countdown_string = f"{countdown_string}{minutes} minutes, "
countdown_string = f"{countdown_string}{seconds} seconds until the launch of {launch_data['name']}."
self.attrs["t0_countdown"] = countdown_string
elif self._kind == "spacex_next_confirmed_launch_day":
if launch_data["tbd"]:
self.attrs["launch_date_unix"] = "NA"
self.attrs["launch_date_utc"] = "NA"
else:
self.attrs["launch_date_unix"] = launch_data["date_unix"]
self.attrs["launch_date_utc"] = launch_data["date_utc"]
elif self._kind == "spacex_next_confirmed_launch_time":
self.attrs["launch_time_24h"] = self._state = as_local(utc_from_timestamp(
launch_data["date_unix"]
)).strftime("%H:%M")
elif self._kind == "spacex_next_launch_site":
self.attrs["short_name"] = launch_data["launch_site"]["name"]
elif self._kind == "spacex_next_launch_rocket":
core_counter = 1
for this_core in launch_data["cores_detail"]:
if this_core.get("details"):
self.attrs["core_" + str(core_counter) + "_serial"] = this_core["details"].get("serial")
self.attrs["core_" + str(core_counter) + "_block"] = this_core["details"].get("block")
self.attrs["core_" + str(core_counter) + "_flight"] = this_core.get(
"flight"
)
self.attrs[
"core_" + str(core_counter) + "_landing_intent"
] = this_core.get("landing_attempt")
if this_core.get("landpad"):
self.attrs["core_" + str(core_counter) + "_lz"] = this_core["landpad"][
"name"
]
self.attrs["core_" + str(core_counter) + "_lz_long"] = this_core["landpad"][
"full_name"
]
else:
self.attrs["core_" + str(core_counter) + "_lz"] = "NA"
self.attrs["core_" + str(core_counter) + "_lz_long"] = "NA"
core_counter = core_counter + 1
if launch_data.get("fairings"):
self.attrs["fairings_reused"] = launch_data.get("fairings",{}).get(
"reused"
)
else:
self.attrs["fairings_reused"] = "NA"
elif self._kind == "spacex_next_launch_payload":
if len(launch_data["payloads_detail"]):
if len(launch_data["payloads_detail"][0]["nationalities"]):
self.attrs["nationality"] = launch_data["payloads_detail"][0]["nationalities"][0]
else:
self.attrs["nationality"] = "NA"
if len(launch_data["payloads_detail"][0]["manufacturers"]):
self.attrs["manufacturer"] = launch_data["payloads_detail"][0]["manufacturers"][0]
else:
self.attrs["manufacturer"] = "NA"
self.attrs["payload_type"] = launch_data["payloads_detail"][0]["type"]
self.attrs["payload_mass"] = (
str(
launch_data["payloads_detail"][0]["mass_kg"]
)
+ " kg"
)
self.attrs["payload_mass_us"] = (
str(
launch_data["payloads_detail"][0]["mass_lbs"]
)
+ " lbs"
)
self.attrs["orbit"] = launch_data["payloads_detail"][0]["orbit"]
elif self._kind == "spacex_latest_launch_mission":
self.attrs["mission_patch"] = latest_launch_data["links"].get("patch",{}).get("large")
if latest_launch_data.get("details"):
self.attrs["details"] = latest_launch_data["details"][0:255]
if len(latest_launch_data["details"]) > 255:
self.attrs["details2"] = latest_launch_data["details"][255:510]
else:
self.attrs["details2"] = ""
if len(latest_launch_data["details"]) > 510:
self.attrs["details3"] = latest_launch_data["details"][510:765]
else:
self.attrs["details3"] = ""
self.attrs["video_link"] = latest_launch_data["links"].get("webcast")
elif self._kind == "spacex_latest_launch_day":
self.attrs["launch_date_unix"] = latest_launch_data["date_unix"]
self.attrs["launch_date_utc"] = latest_launch_data["date_utc"]
elif self._kind == "spacex_latest_launch_time":
self.attrs["launch_time_24h"] = self._state = as_local(utc_from_timestamp(
latest_launch_data["date_unix"]
)).strftime("%H:%M")
elif self._kind == "spacex_latest_launch_site":
self.attrs["short_name"] = latest_launch_data["launch_site"]["name"]
elif self._kind == "spacex_latest_launch_rocket":
core_counter = 1
for this_core in latest_launch_data["cores_detail"]:
self.attrs["core_" + str(core_counter) + "_serial"] = this_core["details"][
"serial"
]
self.attrs["core_" + str(core_counter) + "_flight"] = this_core[
"flight"
]
self.attrs["core_" + str(core_counter) + "_block"] = this_core["details"][
"block"
]
self.attrs[
"core_" + str(core_counter) + "_landing_intent"
] = this_core["landing_attempt"]
self.attrs["core_" + str(core_counter) + "_lz"] = this_core["landpad"][
"name"
]
self.attrs["core_" + str(core_counter) + "_lz_long"] = this_core["landpad"][
"full_name"
]
core_counter = core_counter + 1
if latest_launch_data.get("fairings"):
self.attrs["fairings_reused"] = latest_launch_data["fairings"].get(
"reused"
)
elif self._kind == "spacex_latest_launch_payload":
if len(latest_launch_data["payloads_detail"]):
if len(latest_launch_data["payloads_detail"][0]["nationalities"]):
self.attrs["nationality"] = latest_launch_data["payloads_detail"][0]["nationalities"][0]
else:
self.attrs["nationality"] = "NA"
if len(latest_launch_data["payloads_detail"][0]["manufacturers"]):
self.attrs["manufacturer"] = latest_launch_data["payloads_detail"][0]["manufacturers"][0]
else:
self.attrs["manufacturer"] = "NA"
self.attrs["payload_type"] = latest_launch_data["payloads_detail"][0]["type"]
self.attrs["payload_mass"] = (
str(
latest_launch_data["payloads_detail"][0]["mass_kg"]
)
+ " kg"
)
self.attrs["payload_mass_us"] = (
str(
latest_launch_data["payloads_detail"][0]["mass_lbs"]
)
+ " lbs"
)
self.attrs["orbit"] = latest_launch_data["payloads_detail"][0]["orbit"]
elif self._kind == "spacex_starman_speed":
self.attrs["machspeed"] = float(starman_data["speed_kph"]) / 1235
elif self._kind == "spacex_starman_distance":
self.attrs["au_distance"] = float(starman_data["earth_distance_km"]) / (1.496 * (10**8))
return self.attrs
@property
def device_info(self):
"""Define the device based on device_identifier."""
device_name = "SpaceX Launches"
device_model = "Launch"
if self._device_identifier != "spacexlaunch":
device_name = "SpaceX Starman"
device_model = "Starman"
return {
ATTR_IDENTIFIERS: {(DOMAIN, self._device_identifier)},
ATTR_NAME: device_name,
ATTR_MANUFACTURER: "SpaceX",
ATTR_MODEL: device_model,
}
@property
def state(self):
"""Return the state."""
coordinator_data = self.coordinator.data
starman_data = coordinator_data["starman"]
launch_data = coordinator_data["next_launch"]
latest_launch_data = coordinator_data["latest_launch"]
if self._kind == "spacex_next_launch_mission":
self._state = launch_data["name"]
elif self._kind == "spacex_next_launch_day":
self._state = as_local(utc_from_timestamp(
launch_data["date_unix"]
)).strftime("%d-%b-%Y")
elif self._kind == "spacex_next_launch_time":
self._state = as_local(utc_from_timestamp(
launch_data["date_unix"]
)).strftime("%I:%M %p")
elif self._kind == "spacex_next_launch_countdown":
if launch_data["tbd"]:
self._state = None
else:
t0_countdown = int(launch_data["date_unix"]) - int(time.time())
self._state = str(datetime.timedelta(seconds=t0_countdown))
elif self._kind == "spacex_next_confirmed_launch_day":
if launch_data["tbd"]:
self._state = None
else:
self._state = as_local(utc_from_timestamp(
launch_data["date_unix"]
)).strftime("%d-%b-%Y")
elif self._kind == "spacex_next_confirmed_launch_time":
if launch_data["tbd"]:
self._state = None
else:
self._state = as_local(utc_from_timestamp(
launch_data["date_unix"]
)).strftime("%I:%M %p")
elif self._kind == "spacex_next_launch_site":
self._state = launch_data["launch_site"]["full_name"]
elif self._kind == "spacex_next_launch_rocket":
self._state = launch_data["rocket"]["name"]
elif self._kind == "spacex_next_launch_payload":
self._state = launch_data["payloads_detail"][0]["name"]
elif self._kind == "spacex_latest_launch_mission":
self._state = latest_launch_data["name"]
elif self._kind == "spacex_latest_launch_day":
self._state = as_local(utc_from_timestamp(
latest_launch_data["date_unix"]
)).strftime("%d-%b-%Y")
elif self._kind == "spacex_latest_launch_time":
self._state = as_local(utc_from_timestamp(
latest_launch_data["date_unix"]
)).strftime("%I:%M %p")
elif self._kind == "spacex_latest_launch_site":
self._state = latest_launch_data["launch_site"]["full_name"]
elif self._kind == "spacex_latest_launch_rocket":
self._state = latest_launch_data["rocket"]["name"]
elif self._kind == "spacex_latest_launch_payload":
self._state = latest_launch_data["payloads_detail"][0]["name"]
elif self._kind == "spacex_starman_speed":
self._state = int(starman_data["speed_kph"])
self._unit_of_measure = SPEED_KILOMETERS_PER_HOUR
elif self._kind == "spacex_starman_distance":
self._state = int(starman_data["earth_distance_km"])
self._unit_of_measure = LENGTH_KILOMETERS
return self._state
async def async_update(self):
"""Update SpaceX Binary Sensor Entity."""
await self.coordinator.async_request_refresh()
_LOGGER.debug("Updating state of the sensors.")
async def async_added_to_hass(self):
"""Subscribe to updates."""
self.async_on_remove(
self.coordinator.async_add_listener(self.async_write_ha_state)
)
| 34.608108 | 116 | 0.540951 |
import logging
import time
import datetime
from homeassistant.util.dt import as_local, utc_from_timestamp
from homeassistant.components.sensor import ENTITY_ID_FORMAT, DEVICE_CLASS_TIMESTAMP
from homeassistant.const import LENGTH_KILOMETERS, SPEED_KILOMETERS_PER_HOUR, ATTR_NAME
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from . import SpaceXUpdateCoordinator
from .const import ATTR_IDENTIFIERS, ATTR_MANUFACTURER, ATTR_MODEL, DOMAIN, COORDINATOR
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
coordinator = hass.data[DOMAIN][entry.entry_id][COORDINATOR]
sensors = []
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Mission",
"spacex_next_launch_mission",
"mdi:information-outline",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Day",
"spacex_next_launch_day",
"mdi:calendar",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Time",
"spacex_next_launch_time",
"mdi:clock-outline",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Countdown",
"spacex_next_launch_countdown",
"mdi:clock-outline",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Site",
"spacex_next_launch_site",
"mdi:map-marker",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Rocket",
"spacex_next_launch_rocket",
"mdi:rocket",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Payload",
"spacex_next_launch_payload",
"mdi:package",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Confirmed Launch Day",
"spacex_next_confirmed_launch_day",
"mdi:calendar",
"spacexlaunch"
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Confirmed Launch Time",
"spacex_next_confirmed_launch_time",
"mdi:clock-outline",
"spacexlaunch"
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Mission",
"spacex_latest_launch_mission",
"mdi:information-outline",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Day",
"spacex_latest_launch_day",
"mdi:calendar",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Time",
"spacex_latest_launch_time",
"mdi:clock-outline",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Site",
"spacex_latest_launch_site",
"mdi:map-marker",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Rocket",
"spacex_latest_launch_rocket",
"mdi:rocket",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Payload",
"spacex_latest_launch_payload",
"mdi:package",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Starman Speed",
"spacex_starman_speed",
"mdi:account-star",
"spacexstarman",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Starman Distance",
"spacex_starman_distance",
"mdi:map-marker-distance",
"spacexstarman",
)
)
async_add_entities(sensors)
class SpaceXSensor(CoordinatorEntity):
def __init__(
self,
coordinator: SpaceXUpdateCoordinator,
name: str,
entity_id: str,
icon:str,
device_identifier:str,
):
super().__init__(coordinator=coordinator)
self._name = name
self._unique_id = f"spacex_{entity_id}"
self._state = None
self._icon = icon
self._kind = entity_id
self._device_identifier = device_identifier
self._unit_of_measure = None
self.attrs = {}
if self._kind == "spacex_starman_speed":
self._unit_of_measure = SPEED_KILOMETERS_PER_HOUR
elif self._kind == "spacex_starman_distance":
self._unit_of_measure = LENGTH_KILOMETERS
@property
def unique_id(self):
return self._unique_id
@property
def name(self):
return self._name
@property
def icon(self):
return self._icon
@property
def unit_of_measurement(self):
return self._unit_of_measure
@property
def device_state_attributes(self):
coordinator_data = self.coordinator.data
starman_data = coordinator_data["starman"]
launch_data = coordinator_data["next_launch"]
latest_launch_data = coordinator_data["latest_launch"]
if self._kind == "spacex_next_launch_mission":
self.attrs["mission_patch"] = launch_data["links"].get("patch",{}).get("large")
if launch_data.get("details"):
self.attrs["details"] = launch_data["details"][0:255]
if len(launch_data["details"]) > 255:
self.attrs["details2"] = launch_data["details"][255:510]
else:
self.attrs["details2"] = ""
if len(launch_data["details"]) > 510:
self.attrs["details3"] = launch_data["details"][510:765]
else:
self.attrs["details3"] = ""
self.attrs["video_link"] = launch_data["links"].get("webcast")
elif self._kind == "spacex_next_launch_day":
self.attrs["launch_date_unix"] = launch_data["date_unix"]
self.attrs["launch_date_utc"] = launch_data["date_utc"]
elif self._kind == "spacex_next_launch_time":
self.attrs["launch_time_24h"] = self._state = as_local(utc_from_timestamp(
launch_data["date_unix"]
)).strftime("%H:%M")
elif self._kind == "spacex_next_launch_countdown":
if launch_data["tbd"]:
self.attrs["t0_countdown"] = "NA"
else:
t0_countdown = int(launch_data["date_unix"]) - int(time.time())
day = t0_countdown // (24 * 3600)
t0_countdown = t0_countdown % (24 * 3600)
hour = t0_countdown // 3600
t0_countdown %= 3600
minutes = t0_countdown // 60
t0_countdown %= 60
seconds = t0_countdown
countdown_string = ""
if day > 0:
countdown_string = f"{day} days, "
if hour > 0:
countdown_string = f"{countdown_string}{hour} hours, "
if minutes > 0:
countdown_string = f"{countdown_string}{minutes} minutes, "
countdown_string = f"{countdown_string}{seconds} seconds until the launch of {launch_data['name']}."
self.attrs["t0_countdown"] = countdown_string
elif self._kind == "spacex_next_confirmed_launch_day":
if launch_data["tbd"]:
self.attrs["launch_date_unix"] = "NA"
self.attrs["launch_date_utc"] = "NA"
else:
self.attrs["launch_date_unix"] = launch_data["date_unix"]
self.attrs["launch_date_utc"] = launch_data["date_utc"]
elif self._kind == "spacex_next_confirmed_launch_time":
self.attrs["launch_time_24h"] = self._state = as_local(utc_from_timestamp(
launch_data["date_unix"]
)).strftime("%H:%M")
elif self._kind == "spacex_next_launch_site":
self.attrs["short_name"] = launch_data["launch_site"]["name"]
elif self._kind == "spacex_next_launch_rocket":
core_counter = 1
for this_core in launch_data["cores_detail"]:
if this_core.get("details"):
self.attrs["core_" + str(core_counter) + "_serial"] = this_core["details"].get("serial")
self.attrs["core_" + str(core_counter) + "_block"] = this_core["details"].get("block")
self.attrs["core_" + str(core_counter) + "_flight"] = this_core.get(
"flight"
)
self.attrs[
"core_" + str(core_counter) + "_landing_intent"
] = this_core.get("landing_attempt")
if this_core.get("landpad"):
self.attrs["core_" + str(core_counter) + "_lz"] = this_core["landpad"][
"name"
]
self.attrs["core_" + str(core_counter) + "_lz_long"] = this_core["landpad"][
"full_name"
]
else:
self.attrs["core_" + str(core_counter) + "_lz"] = "NA"
self.attrs["core_" + str(core_counter) + "_lz_long"] = "NA"
core_counter = core_counter + 1
if launch_data.get("fairings"):
self.attrs["fairings_reused"] = launch_data.get("fairings",{}).get(
"reused"
)
else:
self.attrs["fairings_reused"] = "NA"
elif self._kind == "spacex_next_launch_payload":
if len(launch_data["payloads_detail"]):
if len(launch_data["payloads_detail"][0]["nationalities"]):
self.attrs["nationality"] = launch_data["payloads_detail"][0]["nationalities"][0]
else:
self.attrs["nationality"] = "NA"
if len(launch_data["payloads_detail"][0]["manufacturers"]):
self.attrs["manufacturer"] = launch_data["payloads_detail"][0]["manufacturers"][0]
else:
self.attrs["manufacturer"] = "NA"
self.attrs["payload_type"] = launch_data["payloads_detail"][0]["type"]
self.attrs["payload_mass"] = (
str(
launch_data["payloads_detail"][0]["mass_kg"]
)
+ " kg"
)
self.attrs["payload_mass_us"] = (
str(
launch_data["payloads_detail"][0]["mass_lbs"]
)
+ " lbs"
)
self.attrs["orbit"] = launch_data["payloads_detail"][0]["orbit"]
elif self._kind == "spacex_latest_launch_mission":
self.attrs["mission_patch"] = latest_launch_data["links"].get("patch",{}).get("large")
if latest_launch_data.get("details"):
self.attrs["details"] = latest_launch_data["details"][0:255]
if len(latest_launch_data["details"]) > 255:
self.attrs["details2"] = latest_launch_data["details"][255:510]
else:
self.attrs["details2"] = ""
if len(latest_launch_data["details"]) > 510:
self.attrs["details3"] = latest_launch_data["details"][510:765]
else:
self.attrs["details3"] = ""
self.attrs["video_link"] = latest_launch_data["links"].get("webcast")
elif self._kind == "spacex_latest_launch_day":
self.attrs["launch_date_unix"] = latest_launch_data["date_unix"]
self.attrs["launch_date_utc"] = latest_launch_data["date_utc"]
elif self._kind == "spacex_latest_launch_time":
self.attrs["launch_time_24h"] = self._state = as_local(utc_from_timestamp(
latest_launch_data["date_unix"]
)).strftime("%H:%M")
elif self._kind == "spacex_latest_launch_site":
self.attrs["short_name"] = latest_launch_data["launch_site"]["name"]
elif self._kind == "spacex_latest_launch_rocket":
core_counter = 1
for this_core in latest_launch_data["cores_detail"]:
self.attrs["core_" + str(core_counter) + "_serial"] = this_core["details"][
"serial"
]
self.attrs["core_" + str(core_counter) + "_flight"] = this_core[
"flight"
]
self.attrs["core_" + str(core_counter) + "_block"] = this_core["details"][
"block"
]
self.attrs[
"core_" + str(core_counter) + "_landing_intent"
] = this_core["landing_attempt"]
self.attrs["core_" + str(core_counter) + "_lz"] = this_core["landpad"][
"name"
]
self.attrs["core_" + str(core_counter) + "_lz_long"] = this_core["landpad"][
"full_name"
]
core_counter = core_counter + 1
if latest_launch_data.get("fairings"):
self.attrs["fairings_reused"] = latest_launch_data["fairings"].get(
"reused"
)
elif self._kind == "spacex_latest_launch_payload":
if len(latest_launch_data["payloads_detail"]):
if len(latest_launch_data["payloads_detail"][0]["nationalities"]):
self.attrs["nationality"] = latest_launch_data["payloads_detail"][0]["nationalities"][0]
else:
self.attrs["nationality"] = "NA"
if len(latest_launch_data["payloads_detail"][0]["manufacturers"]):
self.attrs["manufacturer"] = latest_launch_data["payloads_detail"][0]["manufacturers"][0]
else:
self.attrs["manufacturer"] = "NA"
self.attrs["payload_type"] = latest_launch_data["payloads_detail"][0]["type"]
self.attrs["payload_mass"] = (
str(
latest_launch_data["payloads_detail"][0]["mass_kg"]
)
+ " kg"
)
self.attrs["payload_mass_us"] = (
str(
latest_launch_data["payloads_detail"][0]["mass_lbs"]
)
+ " lbs"
)
self.attrs["orbit"] = latest_launch_data["payloads_detail"][0]["orbit"]
elif self._kind == "spacex_starman_speed":
self.attrs["machspeed"] = float(starman_data["speed_kph"]) / 1235
elif self._kind == "spacex_starman_distance":
self.attrs["au_distance"] = float(starman_data["earth_distance_km"]) / (1.496 * (10**8))
return self.attrs
@property
def device_info(self):
device_name = "SpaceX Launches"
device_model = "Launch"
if self._device_identifier != "spacexlaunch":
device_name = "SpaceX Starman"
device_model = "Starman"
return {
ATTR_IDENTIFIERS: {(DOMAIN, self._device_identifier)},
ATTR_NAME: device_name,
ATTR_MANUFACTURER: "SpaceX",
ATTR_MODEL: device_model,
}
@property
def state(self):
coordinator_data = self.coordinator.data
starman_data = coordinator_data["starman"]
launch_data = coordinator_data["next_launch"]
latest_launch_data = coordinator_data["latest_launch"]
if self._kind == "spacex_next_launch_mission":
self._state = launch_data["name"]
elif self._kind == "spacex_next_launch_day":
self._state = as_local(utc_from_timestamp(
launch_data["date_unix"]
)).strftime("%d-%b-%Y")
elif self._kind == "spacex_next_launch_time":
self._state = as_local(utc_from_timestamp(
launch_data["date_unix"]
)).strftime("%I:%M %p")
elif self._kind == "spacex_next_launch_countdown":
if launch_data["tbd"]:
self._state = None
else:
t0_countdown = int(launch_data["date_unix"]) - int(time.time())
self._state = str(datetime.timedelta(seconds=t0_countdown))
elif self._kind == "spacex_next_confirmed_launch_day":
if launch_data["tbd"]:
self._state = None
else:
self._state = as_local(utc_from_timestamp(
launch_data["date_unix"]
)).strftime("%d-%b-%Y")
elif self._kind == "spacex_next_confirmed_launch_time":
if launch_data["tbd"]:
self._state = None
else:
self._state = as_local(utc_from_timestamp(
launch_data["date_unix"]
)).strftime("%I:%M %p")
elif self._kind == "spacex_next_launch_site":
self._state = launch_data["launch_site"]["full_name"]
elif self._kind == "spacex_next_launch_rocket":
self._state = launch_data["rocket"]["name"]
elif self._kind == "spacex_next_launch_payload":
self._state = launch_data["payloads_detail"][0]["name"]
elif self._kind == "spacex_latest_launch_mission":
self._state = latest_launch_data["name"]
elif self._kind == "spacex_latest_launch_day":
self._state = as_local(utc_from_timestamp(
latest_launch_data["date_unix"]
)).strftime("%d-%b-%Y")
elif self._kind == "spacex_latest_launch_time":
self._state = as_local(utc_from_timestamp(
latest_launch_data["date_unix"]
)).strftime("%I:%M %p")
elif self._kind == "spacex_latest_launch_site":
self._state = latest_launch_data["launch_site"]["full_name"]
elif self._kind == "spacex_latest_launch_rocket":
self._state = latest_launch_data["rocket"]["name"]
elif self._kind == "spacex_latest_launch_payload":
self._state = latest_launch_data["payloads_detail"][0]["name"]
elif self._kind == "spacex_starman_speed":
self._state = int(starman_data["speed_kph"])
self._unit_of_measure = SPEED_KILOMETERS_PER_HOUR
elif self._kind == "spacex_starman_distance":
self._state = int(starman_data["earth_distance_km"])
self._unit_of_measure = LENGTH_KILOMETERS
return self._state
async def async_update(self):
await self.coordinator.async_request_refresh()
_LOGGER.debug("Updating state of the sensors.")
async def async_added_to_hass(self):
self.async_on_remove(
self.coordinator.async_add_listener(self.async_write_ha_state)
)
| true | true |
f7fc62d633477d6d3ece9ba91ce54f2bc62a7f27 | 7,646 | py | Python | sdk/python/pulumi_azure_native/web/v20201001/get_web_app_source_control_slot.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/web/v20201001/get_web_app_source_control_slot.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/web/v20201001/get_web_app_source_control_slot.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetWebAppSourceControlSlotResult',
'AwaitableGetWebAppSourceControlSlotResult',
'get_web_app_source_control_slot',
]
@pulumi.output_type
class GetWebAppSourceControlSlotResult:
"""
Source control configuration for an app.
"""
def __init__(__self__, branch=None, deployment_rollback_enabled=None, id=None, is_git_hub_action=None, is_manual_integration=None, is_mercurial=None, kind=None, name=None, repo_url=None, system_data=None, type=None):
if branch and not isinstance(branch, str):
raise TypeError("Expected argument 'branch' to be a str")
pulumi.set(__self__, "branch", branch)
if deployment_rollback_enabled and not isinstance(deployment_rollback_enabled, bool):
raise TypeError("Expected argument 'deployment_rollback_enabled' to be a bool")
pulumi.set(__self__, "deployment_rollback_enabled", deployment_rollback_enabled)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if is_git_hub_action and not isinstance(is_git_hub_action, bool):
raise TypeError("Expected argument 'is_git_hub_action' to be a bool")
pulumi.set(__self__, "is_git_hub_action", is_git_hub_action)
if is_manual_integration and not isinstance(is_manual_integration, bool):
raise TypeError("Expected argument 'is_manual_integration' to be a bool")
pulumi.set(__self__, "is_manual_integration", is_manual_integration)
if is_mercurial and not isinstance(is_mercurial, bool):
raise TypeError("Expected argument 'is_mercurial' to be a bool")
pulumi.set(__self__, "is_mercurial", is_mercurial)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if repo_url and not isinstance(repo_url, str):
raise TypeError("Expected argument 'repo_url' to be a str")
pulumi.set(__self__, "repo_url", repo_url)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def branch(self) -> Optional[str]:
"""
Name of branch to use for deployment.
"""
return pulumi.get(self, "branch")
@property
@pulumi.getter(name="deploymentRollbackEnabled")
def deployment_rollback_enabled(self) -> Optional[bool]:
"""
<code>true</code> to enable deployment rollback; otherwise, <code>false</code>.
"""
return pulumi.get(self, "deployment_rollback_enabled")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isGitHubAction")
def is_git_hub_action(self) -> Optional[bool]:
"""
<code>true</code> if this is deployed via GitHub action.
"""
return pulumi.get(self, "is_git_hub_action")
@property
@pulumi.getter(name="isManualIntegration")
def is_manual_integration(self) -> Optional[bool]:
"""
<code>true</code> to limit to manual integration; <code>false</code> to enable continuous integration (which configures webhooks into online repos like GitHub).
"""
return pulumi.get(self, "is_manual_integration")
@property
@pulumi.getter(name="isMercurial")
def is_mercurial(self) -> Optional[bool]:
"""
<code>true</code> for a Mercurial repository; <code>false</code> for a Git repository.
"""
return pulumi.get(self, "is_mercurial")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="repoUrl")
def repo_url(self) -> Optional[str]:
"""
Repository or source control URL.
"""
return pulumi.get(self, "repo_url")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system metadata relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetWebAppSourceControlSlotResult(GetWebAppSourceControlSlotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebAppSourceControlSlotResult(
branch=self.branch,
deployment_rollback_enabled=self.deployment_rollback_enabled,
id=self.id,
is_git_hub_action=self.is_git_hub_action,
is_manual_integration=self.is_manual_integration,
is_mercurial=self.is_mercurial,
kind=self.kind,
name=self.name,
repo_url=self.repo_url,
system_data=self.system_data,
type=self.type)
def get_web_app_source_control_slot(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
slot: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebAppSourceControlSlotResult:
"""
Source control configuration for an app.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str slot: Name of the deployment slot. If a slot is not specified, the API will get the source control configuration for the production slot.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['slot'] = slot
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:web/v20201001:getWebAppSourceControlSlot', __args__, opts=opts, typ=GetWebAppSourceControlSlotResult).value
return AwaitableGetWebAppSourceControlSlotResult(
branch=__ret__.branch,
deployment_rollback_enabled=__ret__.deployment_rollback_enabled,
id=__ret__.id,
is_git_hub_action=__ret__.is_git_hub_action,
is_manual_integration=__ret__.is_manual_integration,
is_mercurial=__ret__.is_mercurial,
kind=__ret__.kind,
name=__ret__.name,
repo_url=__ret__.repo_url,
system_data=__ret__.system_data,
type=__ret__.type)
| 38.23 | 220 | 0.654852 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetWebAppSourceControlSlotResult',
'AwaitableGetWebAppSourceControlSlotResult',
'get_web_app_source_control_slot',
]
@pulumi.output_type
class GetWebAppSourceControlSlotResult:
def __init__(__self__, branch=None, deployment_rollback_enabled=None, id=None, is_git_hub_action=None, is_manual_integration=None, is_mercurial=None, kind=None, name=None, repo_url=None, system_data=None, type=None):
if branch and not isinstance(branch, str):
raise TypeError("Expected argument 'branch' to be a str")
pulumi.set(__self__, "branch", branch)
if deployment_rollback_enabled and not isinstance(deployment_rollback_enabled, bool):
raise TypeError("Expected argument 'deployment_rollback_enabled' to be a bool")
pulumi.set(__self__, "deployment_rollback_enabled", deployment_rollback_enabled)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if is_git_hub_action and not isinstance(is_git_hub_action, bool):
raise TypeError("Expected argument 'is_git_hub_action' to be a bool")
pulumi.set(__self__, "is_git_hub_action", is_git_hub_action)
if is_manual_integration and not isinstance(is_manual_integration, bool):
raise TypeError("Expected argument 'is_manual_integration' to be a bool")
pulumi.set(__self__, "is_manual_integration", is_manual_integration)
if is_mercurial and not isinstance(is_mercurial, bool):
raise TypeError("Expected argument 'is_mercurial' to be a bool")
pulumi.set(__self__, "is_mercurial", is_mercurial)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if repo_url and not isinstance(repo_url, str):
raise TypeError("Expected argument 'repo_url' to be a str")
pulumi.set(__self__, "repo_url", repo_url)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def branch(self) -> Optional[str]:
return pulumi.get(self, "branch")
@property
@pulumi.getter(name="deploymentRollbackEnabled")
def deployment_rollback_enabled(self) -> Optional[bool]:
return pulumi.get(self, "deployment_rollback_enabled")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isGitHubAction")
def is_git_hub_action(self) -> Optional[bool]:
return pulumi.get(self, "is_git_hub_action")
@property
@pulumi.getter(name="isManualIntegration")
def is_manual_integration(self) -> Optional[bool]:
return pulumi.get(self, "is_manual_integration")
@property
@pulumi.getter(name="isMercurial")
def is_mercurial(self) -> Optional[bool]:
return pulumi.get(self, "is_mercurial")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="repoUrl")
def repo_url(self) -> Optional[str]:
return pulumi.get(self, "repo_url")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetWebAppSourceControlSlotResult(GetWebAppSourceControlSlotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebAppSourceControlSlotResult(
branch=self.branch,
deployment_rollback_enabled=self.deployment_rollback_enabled,
id=self.id,
is_git_hub_action=self.is_git_hub_action,
is_manual_integration=self.is_manual_integration,
is_mercurial=self.is_mercurial,
kind=self.kind,
name=self.name,
repo_url=self.repo_url,
system_data=self.system_data,
type=self.type)
def get_web_app_source_control_slot(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
slot: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebAppSourceControlSlotResult:
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['slot'] = slot
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:web/v20201001:getWebAppSourceControlSlot', __args__, opts=opts, typ=GetWebAppSourceControlSlotResult).value
return AwaitableGetWebAppSourceControlSlotResult(
branch=__ret__.branch,
deployment_rollback_enabled=__ret__.deployment_rollback_enabled,
id=__ret__.id,
is_git_hub_action=__ret__.is_git_hub_action,
is_manual_integration=__ret__.is_manual_integration,
is_mercurial=__ret__.is_mercurial,
kind=__ret__.kind,
name=__ret__.name,
repo_url=__ret__.repo_url,
system_data=__ret__.system_data,
type=__ret__.type)
| true | true |
f7fc63f7e8ee91a7e9ad6fbd19fb602bc0c5b147 | 10,770 | py | Python | pdil/_core/math.py | Mikfr83/fossil | 5bba97e6ea05bc59a76dae7f6ea11d09fe09e3bf | [
"BSD-3-Clause"
] | null | null | null | pdil/_core/math.py | Mikfr83/fossil | 5bba97e6ea05bc59a76dae7f6ea11d09fe09e3bf | [
"BSD-3-Clause"
] | null | null | null | pdil/_core/math.py | Mikfr83/fossil | 5bba97e6ea05bc59a76dae7f6ea11d09fe09e3bf | [
"BSD-3-Clause"
] | null | null | null | '''
Utilities to deal with math nodes with ease so it feel more like writing
equations as well as a few other things.
All regular math operations functions (`add`, `divide`, etc.) take two params
which can either be numbers, plugs or mixed and returns the plug of the output.
Examples are better:
add(someCube.tx, 3) >> somePlane.ry
ratio = divide( someCurve.length, obj.originalSize )
multiple( ratio, 3 ) >> someObj.sx
'''
from __future__ import print_function, absolute_import
import ast
import inspect
import math
import numbers
from pymel.core import *
class VECTOR: pass # noqa
class _VECTOR_ATTR(VECTOR): pass # noqa
class _VECTOR_NUMBER(VECTOR): pass # noqa
VECTOR_ATTR = _VECTOR_ATTR() # noqa
VECTOR_NUMBER = _VECTOR_NUMBER() # noqa
class _NUMBER: pass # noqa
class _NUMBER_ATTR: pass # noqa
NUMBER = _NUMBER() # noqa
NUMBER_ATTR = _NUMBER_ATTR() # noqa
def _assignInput( plug, input):
if isinstance( input, numbers.Number ):
plug.set( input )
else:
input >> plug
def _assignVectorInput(plug, x, y, z, valueType, value):
''' plug is compound, x/y/z are the components.
'''
if isinstance(valueType, _VECTOR_NUMBER):
plug.set(value)
elif isinstance(valueType, _VECTOR_ATTR):
value >> plug
elif isinstance(valueType, _NUMBER):
plug.set(value, value, value)
else: # Otherwise it's a scalar plug
value >> x
value >> y
value >> z
def getType(data):
try:
if len(data) == 3:
return VECTOR_NUMBER
except TypeError:
pass
if isinstance(data, Attribute):
if data.type().endswith('3'): # double3 and float3 are
return VECTOR_ATTR
else:
return NUMBER_ATTR
return NUMBER
def add( a, b, operation=1, name='add' ):
''' Add two number, vectors or combo of the two as direct inputs or plugs. Returning the appropriate plug
Ex
add( objA.t, objB.t ) >> objC.t
add( objA.tx, objB.tx ) >> objC.tx
add( objA.tx, (1, 2 ,3) ) >> objC.tx
add( objA.t, 5 ) >> objC.t # Since the other input is a vector, converts 5 to (5, 5, 5)
'''
node = createNode('plusMinusAverage')
node.operation.set( operation )
node.rename(name)
aType = getType(a)
bType = getType(b)
#print(bType, type(bType), isinstance(bType, VECTOR))
if isinstance(aType, VECTOR) or isinstance(bType, VECTOR):
leftPlug = node.input3D[0]
_assignVectorInput(leftPlug, leftPlug.input3Dx, leftPlug.input3Dy, leftPlug.input3Dz, aType, a)
rightPlug = node.input3D[1]
_assignVectorInput(rightPlug, rightPlug.input3Dx, rightPlug.input3Dy, rightPlug.input3Dz, bType, b)
'''
if isinstance(aType, _VECTOR_NUMBER):
node.input3D[0].set(a)
elif isinstance(aType, _VECTOR_ATTR):
a >> node.input3D[0]
elif isinstance(aType, _NUMBER):
node.input3D[0].set(a, a, a)
else:
a >> node.input3D[0].input3Dx
a >> node.input3D[0].input3Dy
a >> node.input3D[0].input3Dz
if isinstance(bType, _VECTOR_NUMBER):
node.input3D[1].set(b)
elif isinstance(bType, _VECTOR_ATTR):
b >> node.input3D[1]
elif isinstance(bType, _NUMBER):
node.input3D[1].set(b, b, b)
else:
b >> node.input3D[1].input3Dx
b >> node.input3D[1].input3Dy
b >> node.input3D[1].input3Dz
'''
return node.output3D
else:
_assignInput( node.input1D[0], a )
_assignInput( node.input1D[1], b )
return node.output1D
def sub( a, b ):
return add(a, b, operation=2, name='minus')
'''
node = createNode('plusMinusAverage')
node.operation.set( 2 )
_assignInput( node.input1D[0], a )
_assignInput( node.input1D[1], b )
node.rename('minus')
return node.output1D
'''
def multiply( left, right, operation=1, name='mult' ):
node = createNode('multiplyDivide')
node.operation.set( operation )
node.rename( name )
leftType = getType(left)
rightType = getType(right)
if isinstance(leftType, VECTOR) or isinstance(rightType, VECTOR):
_assignVectorInput(node.input1,
node.input1X,
node.input1Y,
node.input1Z,
leftType,
left)
_assignVectorInput(node.input2,
node.input2X,
node.input2Y,
node.input2Z,
rightType,
right)
return node.output
else:
_assignInput( node.input1X, left )
_assignInput( node.input2X, right )
return node.outputX
'''
if isinstance(a, (tuple, list)):
_assignInput( node.input1X, a[0] )
_assignInput( node.input1Y, a[1] )
_assignInput( node.input1Z, a[2] )
else:
_assignInput( node.input1X, a )
if isinstance(b, (tuple, list)):
_assignInput( node.input2X, b[0] )
_assignInput( node.input2Y, b[1] )
_assignInput( node.input2Z, b[2] )
else:
_assignInput( node.input2X, b )
'''
def divide(left, right):
return multiply(left, right, operation=2, name='div')
'''
def divide( a, b ):
node = createNode('multiplyDivide')
node.operation.set( 2 )
_assignInput( node.input1X, a )
_assignInput( node.input2X, b )
node.rename('div')
return node.outputX
'''
def opposite( a ):
'''
Calculates 1-value
'''
#return add(1, a, operation=2, name='opposite')
node = createNode( 'plusMinusAverage' )
node.operation.set( 2 )
node.input1D[0].set( 1 )
_assignInput( node.input1D[1], a )
node.rename('opposite')
return node.output1D
def condition( a, symbol, b, true=1, false=0 ):
'''
Takes 2 input values and string of the condition (for readability), and
values if the condition is true or false (defaults to 1 and 0 respectively)
'''
mapping = {
'=': (0, 'EQ'), # noqa e241
'!=': (1, 'NE'),
'>': (2, 'GT'), # noqa e241
'>=': (3, 'GE'),
'<': (4, 'LT'), # noqa e241
'<=': (5, 'LE'), }
node = createNode( 'condition' )
node.operation.set( mapping[symbol][0] )
_assignInput( node.firstTerm, a )
_assignInput( node.secondTerm, b )
_assignInput( node.colorIfTrueR, true )
_assignInput( node.colorIfFalseR, false )
node.rename( mapping[symbol][1] )
return node.outColorR
def isCloseF(a, b, tolerance=0.001):
# isClose for a single float instead of a vector.
return (abs(a - b) < tolerance)
def isClose(a, b, tolerance=0.001):
'''
Return True if each axis of the given vector/3 element list is with a
tolerance (default to 0.001). Mainly to resist float error.
'''
if ( abs(a[0] - b[0]) < tolerance
and abs(a[1] - b[1]) < tolerance
and abs(a[2] - b[2]) < tolerance ): # noqa e125
return True
return False
def clampNode(lower, upper):
clampNode = createNode('clamp')
clampNode.min.set(lower)
clampNode.max.set(upper)
return clampNode
def clamp(driver, lower, upper):
clampNode = createNode('clamp')
driverType = getType(driver)
lowerType = getType(lower)
upperType = getType(upper)
_assignVectorInput(clampNode.input,
clampNode.inputR, clampNode.inputG, clampNode.inputB,
driverType,
driver)
_assignVectorInput(clampNode.min,
clampNode.min.inputR, clampNode.min.inputG, clampNode.min.inputB,
lowerType,
lower)
_assignVectorInput(clampNode.max,
clampNode.max.inputR, clampNode.max.inputG, clampNode.max.inputB,
upperType,
upper)
if isinstance(driverType, VECTOR) or isinstance(lowerType, VECTOR) or isinstance(upperType, VECTOR):
return clampNode.output
else:
return clampNode.outputR
def eulerFromMatrix( matrix, degrees=False ):
'''
Returns the euler rotation from a matrix, optionally in degrees.
'''
easy = matrix[0][2]
if isCloseF(easy, 1, 0.000000000000001):
z = math.pi
y = -math.pi / 2.0
x = -z + math.atan2( -matrix[1][0], -matrix[2][0] )
elif isCloseF(easy, -1, 0.000000000000001):
z = math.pi
y = math.pi / 2.0
x = z + math.atan2( matrix[1][0], matrix[2][0] )
else:
y = -math.asin( easy )
cosY = math.cos( y )
x = math.atan2( matrix[1][2] * cosY, matrix[2][2] * cosY )
z = math.atan2( matrix[0][1] * cosY, matrix[0][0] * cosY )
angles = x, y, z
if degrees:
return map( math.degrees, angles )
return angles
binop = {
ast.Add: add,
ast.Sub: sub,
ast.Mult: multiply,
ast.Div: divide,
}
def parse(s, objs=None):
''' Takes a mathematical expression using the variables defined in the calling scope.
*NOTE* Vector * Vector is piece-wise, like addition, since to allow for minimizing nodes.
Ex `parse('cube.t * 3 + (1, 2, 3)') >> otherCube.t` The calling scope must
have PyNode('cube'), each element being multiplied by 3, then adding
vector(1, 2, 3).
'''
lookup = {}
frame = inspect.currentframe()
if objs:
lookup.update(objs)
lookup.update( frame.f_back.f_globals )
lookup.update( frame.f_back.f_locals )
temp = ast.parse(s.strip())
return process( temp.body[0].value, lookup )
def process(node, objs):
if isinstance(node, ast.BinOp):
return binop[ type(node.op) ](
process(node.left, objs),
process(node.right, objs)
)
elif isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.Attribute):
return objs[node.value.id].attr( node.attr )
elif isinstance(node, ast.Name):
return objs[node.id]
#elif isinstance(node, ast.Str):
# if '.' in node.s:
# obj, attr = node.s.split('.')
# return objs[obj].attr(attr)
# else:
# return objs[node.s]
elif isinstance(node, ast.Call):
if node.func.id == 'clamp':
return clamp(
process(node.args[0], objs),
process(node.args[1], objs),
process(node.args[2], objs)
)
elif isinstance(node, ast.Tuple):
return [n.n for n in node.elts] | 25.951807 | 110 | 0.578273 | from __future__ import print_function, absolute_import
import ast
import inspect
import math
import numbers
from pymel.core import *
class VECTOR: pass
class _VECTOR_ATTR(VECTOR): pass
class _VECTOR_NUMBER(VECTOR): pass
VECTOR_ATTR = _VECTOR_ATTR()
VECTOR_NUMBER = _VECTOR_NUMBER()
class _NUMBER: pass
class _NUMBER_ATTR: pass
NUMBER = _NUMBER()
NUMBER_ATTR = _NUMBER_ATTR()
def _assignInput( plug, input):
if isinstance( input, numbers.Number ):
plug.set( input )
else:
input >> plug
def _assignVectorInput(plug, x, y, z, valueType, value):
if isinstance(valueType, _VECTOR_NUMBER):
plug.set(value)
elif isinstance(valueType, _VECTOR_ATTR):
value >> plug
elif isinstance(valueType, _NUMBER):
plug.set(value, value, value)
else:
value >> x
value >> y
value >> z
def getType(data):
try:
if len(data) == 3:
return VECTOR_NUMBER
except TypeError:
pass
if isinstance(data, Attribute):
if data.type().endswith('3'): # double3 and float3 are
return VECTOR_ATTR
else:
return NUMBER_ATTR
return NUMBER
def add( a, b, operation=1, name='add' ):
node = createNode('plusMinusAverage')
node.operation.set( operation )
node.rename(name)
aType = getType(a)
bType = getType(b)
#print(bType, type(bType), isinstance(bType, VECTOR))
if isinstance(aType, VECTOR) or isinstance(bType, VECTOR):
leftPlug = node.input3D[0]
_assignVectorInput(leftPlug, leftPlug.input3Dx, leftPlug.input3Dy, leftPlug.input3Dz, aType, a)
rightPlug = node.input3D[1]
_assignVectorInput(rightPlug, rightPlug.input3Dx, rightPlug.input3Dy, rightPlug.input3Dz, bType, b)
return node.output3D
else:
_assignInput( node.input1D[0], a )
_assignInput( node.input1D[1], b )
return node.output1D
def sub( a, b ):
return add(a, b, operation=2, name='minus')
def multiply( left, right, operation=1, name='mult' ):
node = createNode('multiplyDivide')
node.operation.set( operation )
node.rename( name )
leftType = getType(left)
rightType = getType(right)
if isinstance(leftType, VECTOR) or isinstance(rightType, VECTOR):
_assignVectorInput(node.input1,
node.input1X,
node.input1Y,
node.input1Z,
leftType,
left)
_assignVectorInput(node.input2,
node.input2X,
node.input2Y,
node.input2Z,
rightType,
right)
return node.output
else:
_assignInput( node.input1X, left )
_assignInput( node.input2X, right )
return node.outputX
def divide(left, right):
return multiply(left, right, operation=2, name='div')
def opposite( a ):
#return add(1, a, operation=2, name='opposite')
node = createNode( 'plusMinusAverage' )
node.operation.set( 2 )
node.input1D[0].set( 1 )
_assignInput( node.input1D[1], a )
node.rename('opposite')
return node.output1D
def condition( a, symbol, b, true=1, false=0 ):
mapping = {
'=': (0, 'EQ'), # noqa e241
'!=': (1, 'NE'),
'>': (2, 'GT'), # noqa e241
'>=': (3, 'GE'),
'<': (4, 'LT'), # noqa e241
'<=': (5, 'LE'), }
node = createNode( 'condition' )
node.operation.set( mapping[symbol][0] )
_assignInput( node.firstTerm, a )
_assignInput( node.secondTerm, b )
_assignInput( node.colorIfTrueR, true )
_assignInput( node.colorIfFalseR, false )
node.rename( mapping[symbol][1] )
return node.outColorR
def isCloseF(a, b, tolerance=0.001):
# isClose for a single float instead of a vector.
return (abs(a - b) < tolerance)
def isClose(a, b, tolerance=0.001):
if ( abs(a[0] - b[0]) < tolerance
and abs(a[1] - b[1]) < tolerance
and abs(a[2] - b[2]) < tolerance ): # noqa e125
return True
return False
def clampNode(lower, upper):
clampNode = createNode('clamp')
clampNode.min.set(lower)
clampNode.max.set(upper)
return clampNode
def clamp(driver, lower, upper):
clampNode = createNode('clamp')
driverType = getType(driver)
lowerType = getType(lower)
upperType = getType(upper)
_assignVectorInput(clampNode.input,
clampNode.inputR, clampNode.inputG, clampNode.inputB,
driverType,
driver)
_assignVectorInput(clampNode.min,
clampNode.min.inputR, clampNode.min.inputG, clampNode.min.inputB,
lowerType,
lower)
_assignVectorInput(clampNode.max,
clampNode.max.inputR, clampNode.max.inputG, clampNode.max.inputB,
upperType,
upper)
if isinstance(driverType, VECTOR) or isinstance(lowerType, VECTOR) or isinstance(upperType, VECTOR):
return clampNode.output
else:
return clampNode.outputR
def eulerFromMatrix( matrix, degrees=False ):
easy = matrix[0][2]
if isCloseF(easy, 1, 0.000000000000001):
z = math.pi
y = -math.pi / 2.0
x = -z + math.atan2( -matrix[1][0], -matrix[2][0] )
elif isCloseF(easy, -1, 0.000000000000001):
z = math.pi
y = math.pi / 2.0
x = z + math.atan2( matrix[1][0], matrix[2][0] )
else:
y = -math.asin( easy )
cosY = math.cos( y )
x = math.atan2( matrix[1][2] * cosY, matrix[2][2] * cosY )
z = math.atan2( matrix[0][1] * cosY, matrix[0][0] * cosY )
angles = x, y, z
if degrees:
return map( math.degrees, angles )
return angles
binop = {
ast.Add: add,
ast.Sub: sub,
ast.Mult: multiply,
ast.Div: divide,
}
def parse(s, objs=None):
lookup = {}
frame = inspect.currentframe()
if objs:
lookup.update(objs)
lookup.update( frame.f_back.f_globals )
lookup.update( frame.f_back.f_locals )
temp = ast.parse(s.strip())
return process( temp.body[0].value, lookup )
def process(node, objs):
if isinstance(node, ast.BinOp):
return binop[ type(node.op) ](
process(node.left, objs),
process(node.right, objs)
)
elif isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.Attribute):
return objs[node.value.id].attr( node.attr )
elif isinstance(node, ast.Name):
return objs[node.id]
#elif isinstance(node, ast.Str):
# if '.' in node.s:
# obj, attr = node.s.split('.')
# return objs[obj].attr(attr)
# else:
# return objs[node.s]
elif isinstance(node, ast.Call):
if node.func.id == 'clamp':
return clamp(
process(node.args[0], objs),
process(node.args[1], objs),
process(node.args[2], objs)
)
elif isinstance(node, ast.Tuple):
return [n.n for n in node.elts] | true | true |
f7fc6594baba7e7e616e0caf25f35439aa83c89e | 112 | py | Python | init_classes/repost_first.py | xtuyaowu/jtyd_python_spider | ca5c3efd5519f592c0d587c22f03812e7756c8ea | [
"MIT"
] | 7 | 2017-08-19T22:36:29.000Z | 2018-06-03T07:02:04.000Z | init_classes/repost_first.py | xtuyaowu/jtyd_python_spider | ca5c3efd5519f592c0d587c22f03812e7756c8ea | [
"MIT"
] | 2 | 2021-04-30T20:37:14.000Z | 2021-12-13T19:46:29.000Z | init_classes/repost_first.py | xtuyaowu/jtyd_python_spider | ca5c3efd5519f592c0d587c22f03812e7756c8ea | [
"MIT"
] | 4 | 2017-09-06T03:00:11.000Z | 2017-12-10T08:04:21.000Z | # coding:utf-8
from celery_tasks.weibo import repost
if __name__ == '__main__':
repost.excute_repost_task() | 22.4 | 37 | 0.758929 |
from celery_tasks.weibo import repost
if __name__ == '__main__':
repost.excute_repost_task() | true | true |
f7fc6670dc680e9f1455e31ca758e0e10a321e53 | 2,918 | py | Python | models/visual7w_attention_model.py | ronghanghu/cmn | 85644ad56f8f62d04a5e8636ad3efe9ef7b34705 | [
"MIT"
] | 72 | 2017-04-12T17:07:36.000Z | 2021-06-18T08:20:47.000Z | models/visual7w_attention_model.py | ronghanghu/cmn | 85644ad56f8f62d04a5e8636ad3efe9ef7b34705 | [
"MIT"
] | 8 | 2017-07-06T04:24:04.000Z | 2020-09-17T10:29:44.000Z | models/visual7w_attention_model.py | ronghanghu/cmn | 85644ad56f8f62d04a5e8636ad3efe9ef7b34705 | [
"MIT"
] | 21 | 2017-04-19T07:38:09.000Z | 2021-02-28T13:39:22.000Z | from models import modules, fastrcnn_vgg_net, lstm_net
import tensorflow as tf
from tensorflow import convert_to_tensor as to_T
def visual7w_attbilstm_net(input_batch, bbox_batch1, spatial_batch1,
bbox_batch2, spatial_batch2, expr_obj, num_vocab, embed_dim, lstm_dim,
vgg_dropout, lstm_dropout):
# a sentence is parsed into [expr_obj1, expr_relation, expr_obj2]
# bbox_batch1 has shape [N_batch*N1, 5] and
# spatial_batch1 has shape [N_batch, N1, D_spatial] and
# bbox_batch2 has shape [N2, 5] and
# spatial_batch2 has shape [1, N2, D_spatial] and
# expr_obj has shape [T, N_batch]
# where N1 is the number of choices (= 4 in Visual 7W) and
# N2 is the number of proposals (~ 300 for RPN in Faster RCNN)
N_batch = tf.shape(spatial_batch1)[0]
N1 = tf.shape(spatial_batch1)[1]
N2 = tf.shape(spatial_batch2)[1]
# Extract visual features
vis_feat1 = fastrcnn_vgg_net.vgg_roi_fc7(input_batch,
tf.reshape(bbox_batch1, [-1, 5]), "vgg_local",
apply_dropout=vgg_dropout)
D_vis = vis_feat1.get_shape().as_list()[-1]
vis_feat1 = tf.reshape(vis_feat1, to_T([N_batch, N1, D_vis]))
vis_feat1.set_shape([None, None, D_vis])
# Reshape and tile vis_feat2 and spatial_batch2
vis_feat2 = fastrcnn_vgg_net.vgg_roi_fc7(input_batch,
tf.reshape(bbox_batch2, [-1, 5]), "vgg_local",
apply_dropout=vgg_dropout, reuse=True)
vis_feat2 = tf.reshape(vis_feat2, to_T([1, N2, D_vis]))
vis_feat2 = tf.tile(vis_feat2, to_T([N_batch, 1, 1]))
vis_feat2.set_shape([None, None, D_vis])
spatial_batch2 = tf.tile(spatial_batch2, to_T([N_batch, 1, 1]))
# Extract representation using attention
lang_obj1, lang_obj2, lang_relation = lstm_net.attbilstm(
expr_obj, "lstm", num_vocab=num_vocab, embed_dim=embed_dim,
lstm_dim=lstm_dim, apply_dropout=lstm_dropout)
# Score for each bounding box matching the first object
# scores_obj1 has shape [N_batch, N1, 1]
scores_obj1 = modules.localization_module_batch_score(vis_feat1,
spatial_batch1, lang_obj1)
# Score for each bounding box matching the second object
# scores_obj2 has shape [N_batch, N2, 1]
scores_obj2 = modules.localization_module_batch_score(vis_feat2,
spatial_batch2, lang_obj2, reuse=True)
# Scores for each pair of bounding box matching the relationship
# Tile the scores by broadcasting add
# scores_rel has shape [N_batch, N1, N2, 1]
scores_rel = modules.relationship_module_spatial_only_batch_score(
spatial_batch1, scores_obj1, spatial_batch2, scores_obj2, lang_relation,
rescale_scores=True)
# marginal_scores has shape [N_batch, N1, 1]
tf.add_to_collection("s_pair", scores_rel)
marginal_scores = tf.reduce_max(scores_rel, reduction_indices=2)
final_scores = tf.reshape(marginal_scores, to_T([N_batch, -1]))
return final_scores
| 44.212121 | 80 | 0.719671 | from models import modules, fastrcnn_vgg_net, lstm_net
import tensorflow as tf
from tensorflow import convert_to_tensor as to_T
def visual7w_attbilstm_net(input_batch, bbox_batch1, spatial_batch1,
bbox_batch2, spatial_batch2, expr_obj, num_vocab, embed_dim, lstm_dim,
vgg_dropout, lstm_dropout):
N_batch = tf.shape(spatial_batch1)[0]
N1 = tf.shape(spatial_batch1)[1]
N2 = tf.shape(spatial_batch2)[1]
vis_feat1 = fastrcnn_vgg_net.vgg_roi_fc7(input_batch,
tf.reshape(bbox_batch1, [-1, 5]), "vgg_local",
apply_dropout=vgg_dropout)
D_vis = vis_feat1.get_shape().as_list()[-1]
vis_feat1 = tf.reshape(vis_feat1, to_T([N_batch, N1, D_vis]))
vis_feat1.set_shape([None, None, D_vis])
vis_feat2 = fastrcnn_vgg_net.vgg_roi_fc7(input_batch,
tf.reshape(bbox_batch2, [-1, 5]), "vgg_local",
apply_dropout=vgg_dropout, reuse=True)
vis_feat2 = tf.reshape(vis_feat2, to_T([1, N2, D_vis]))
vis_feat2 = tf.tile(vis_feat2, to_T([N_batch, 1, 1]))
vis_feat2.set_shape([None, None, D_vis])
spatial_batch2 = tf.tile(spatial_batch2, to_T([N_batch, 1, 1]))
lang_obj1, lang_obj2, lang_relation = lstm_net.attbilstm(
expr_obj, "lstm", num_vocab=num_vocab, embed_dim=embed_dim,
lstm_dim=lstm_dim, apply_dropout=lstm_dropout)
scores_obj1 = modules.localization_module_batch_score(vis_feat1,
spatial_batch1, lang_obj1)
scores_obj2 = modules.localization_module_batch_score(vis_feat2,
spatial_batch2, lang_obj2, reuse=True)
scores_rel = modules.relationship_module_spatial_only_batch_score(
spatial_batch1, scores_obj1, spatial_batch2, scores_obj2, lang_relation,
rescale_scores=True)
tf.add_to_collection("s_pair", scores_rel)
marginal_scores = tf.reduce_max(scores_rel, reduction_indices=2)
final_scores = tf.reshape(marginal_scores, to_T([N_batch, -1]))
return final_scores
| true | true |
f7fc66a4a6bddc05971a524df8faeb0b86622937 | 15,988 | py | Python | cime/scripts/lib/CIME/case/case_run.py | apcraig/E3SM | 61479712b58efe14e1c906818aaa7381d077e227 | [
"zlib-acknowledgement",
"RSA-MD",
"FTL"
] | 1 | 2020-10-16T07:24:21.000Z | 2020-10-16T07:24:21.000Z | cime/scripts/lib/CIME/case/case_run.py | apcraig/E3SM | 61479712b58efe14e1c906818aaa7381d077e227 | [
"zlib-acknowledgement",
"RSA-MD",
"FTL"
] | 3 | 2019-04-25T18:18:33.000Z | 2019-05-07T16:43:36.000Z | cime/scripts/lib/CIME/case/case_run.py | apcraig/E3SM | 61479712b58efe14e1c906818aaa7381d077e227 | [
"zlib-acknowledgement",
"RSA-MD",
"FTL"
] | 1 | 2020-03-02T21:55:25.000Z | 2020-03-02T21:55:25.000Z | """
case_run is a member of Class Case
'"""
from CIME.XML.standard_module_setup import *
from CIME.utils import gzip_existing_file, new_lid, run_and_log_case_status
from CIME.utils import run_sub_or_cmd, append_status, safe_copy, model_log
from CIME.get_timing import get_timing
from CIME.provenance import save_prerun_provenance, save_postrun_provenance
import shutil, time, sys, os, glob
logger = logging.getLogger(__name__)
###############################################################################
def _pre_run_check(case, lid, skip_pnl=False, da_cycle=0):
###############################################################################
# Pre run initialization code..
if da_cycle > 0:
case.create_namelists(component='cpl')
return
caseroot = case.get_value("CASEROOT")
din_loc_root = case.get_value("DIN_LOC_ROOT")
rundir = case.get_value("RUNDIR")
build_complete = case.get_value("BUILD_COMPLETE")
if case.get_value("TESTCASE") == "PFS":
env_mach_pes = os.path.join(caseroot,"env_mach_pes.xml")
safe_copy(env_mach_pes,"{}.{}".format(env_mach_pes, lid))
# check for locked files.
case.check_lockedfiles()
logger.debug("check_lockedfiles OK")
# check that build is done
expect(build_complete,
"BUILD_COMPLETE is not true\nPlease rebuild the model interactively")
logger.debug("build complete is {} ".format(build_complete))
# load the module environment...
case.load_env(reset=True)
# create the timing directories, optionally cleaning them if needed.
if os.path.isdir(os.path.join(rundir, "timing")):
shutil.rmtree(os.path.join(rundir, "timing"))
os.makedirs(os.path.join(rundir, "timing", "checkpoints"))
# This needs to be done everytime the LID changes in order for log files to be set up correctly
# The following also needs to be called in case a user changes a user_nl_xxx file OR an env_run.xml
# variable while the job is in the queue
model_log("e3sm", logger, "{} NAMELIST CREATION BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")))
if skip_pnl:
case.create_namelists(component='cpl')
else:
logger.info("Generating namelists for {}".format(caseroot))
case.create_namelists()
model_log("e3sm", logger, "{} NAMELIST CREATION HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")))
logger.info("-------------------------------------------------------------------------")
logger.info(" - Prestage required restarts into {}".format(rundir))
logger.info(" - Case input data directory (DIN_LOC_ROOT) is {} ".format(din_loc_root))
logger.info(" - Checking for required input datasets in DIN_LOC_ROOT")
logger.info("-------------------------------------------------------------------------")
###############################################################################
def _run_model_impl(case, lid, skip_pnl=False, da_cycle=0):
###############################################################################
model_log("e3sm", logger, "{} PRE_RUN_CHECK BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")))
_pre_run_check(case, lid, skip_pnl=skip_pnl, da_cycle=da_cycle)
model_log("e3sm", logger, "{} PRE_RUN_CHECK HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")))
model = case.get_value("MODEL")
# Set OMP_NUM_THREADS
os.environ["OMP_NUM_THREADS"] = str(case.thread_count)
# Run the model
cmd = case.get_mpirun_cmd(allow_unresolved_envvars=False)
logger.info("run command is {} ".format(cmd))
rundir = case.get_value("RUNDIR")
loop = True
# MPIRUN_RETRY_REGEX allows the mpi command to be reattempted if the
# failure described by that regular expression is matched in the model log
# case.spare_nodes is overloaded and may also represent the number of
# retries to attempt if ALLOCATE_SPARE_NODES is False
retry_run_re = case.get_value("MPIRUN_RETRY_REGEX")
node_fail_re = case.get_value("NODE_FAIL_REGEX")
retry_count = 0
if retry_run_re:
retry_run_regex = re.compile(re.escape(retry_run_re))
retry_count = case.get_value("MPIRUN_RETRY_COUNT")
if node_fail_re:
node_fail_regex = re.compile(re.escape(node_fail_re))
while loop:
loop = False
model_log("e3sm", logger, "{} SAVE_PRERUN_PROVENANCE BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")))
save_prerun_provenance(case)
model_log("e3sm", logger, "{} SAVE_PRERUN_PROVENANCE HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")))
model_log("e3sm", logger, "{} MODEL EXECUTION BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")))
run_func = lambda: run_cmd(cmd, from_dir=rundir)[0]
stat = run_and_log_case_status(run_func, "model execution", caseroot=case.get_value("CASEROOT"))
model_log("e3sm", logger, "{} MODEL EXECUTION HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")))
model_logfile = os.path.join(rundir, model + ".log." + lid)
# Determine if failure was due to a failed node, if so, try to restart
if retry_run_re or node_fail_re:
model_logfile = os.path.join(rundir, model + ".log." + lid)
if os.path.exists(model_logfile):
num_node_fails=0
num_retry_fails=0
if node_fail_re:
num_node_fails = len(node_fail_regex.findall(open(model_logfile, 'r').read()))
if retry_run_re:
num_retry_fails = len(retry_run_regex.findall(open(model_logfile, 'r').read()))
logger.debug ("RETRY: num_retry_fails {} spare_nodes {} retry_count {}".
format(num_retry_fails, case.spare_nodes, retry_count))
if num_node_fails > 0 and case.spare_nodes >= num_node_fails:
# We failed due to node failure!
logger.warning("Detected model run failed due to node failure, restarting")
case.spare_nodes -= num_node_fails
loop = True
case.set_value("CONTINUE_RUN",
case.get_value("RESUBMIT_SETS_CONTINUE_RUN"))
elif num_retry_fails > 0 and retry_count >= num_retry_fails:
logger.warning("Detected model run failed, restarting")
retry_count -= 1
loop = True
if loop:
# Archive the last consistent set of restart files and restore them
if case.get_value("DOUT_S"):
case.case_st_archive(resubmit=False)
case.restore_from_archive()
lid = new_lid()
case.create_namelists()
if stat != 0 and not loop:
# We failed and we're not restarting
expect(False, "RUN FAIL: Command '{}' failed\nSee log file for details: {}".format(cmd, model_logfile))
model_log("e3sm", logger, "{} POST_RUN_CHECK BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")))
_post_run_check(case, lid)
model_log("e3sm", logger, "{} POST_RUN_CHECK HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")))
return lid
###############################################################################
def _run_model(case, lid, skip_pnl=False, da_cycle=0):
###############################################################################
functor = lambda: _run_model_impl(case, lid, skip_pnl=skip_pnl, da_cycle=da_cycle)
return run_and_log_case_status(functor, "case.run", caseroot=case.get_value("CASEROOT"))
###############################################################################
def _post_run_check(case, lid):
###############################################################################
rundir = case.get_value("RUNDIR")
model = case.get_value("MODEL")
driver = case.get_value("COMP_INTERFACE")
if driver == 'nuopc':
file_prefix = 'med'
else:
file_prefix = 'cpl'
cpl_ninst = 1
if case.get_value("MULTI_DRIVER"):
cpl_ninst = case.get_value("NINST_MAX")
cpl_logs = []
if cpl_ninst > 1:
for inst in range(cpl_ninst):
cpl_logs.append(os.path.join(rundir, file_prefix + "_%04d.log." % (inst+1) + lid))
else:
cpl_logs = [os.path.join(rundir, file_prefix + ".log." + lid)]
cpl_logfile = cpl_logs[0]
# find the last model.log and cpl.log
model_logfile = os.path.join(rundir, model + ".log." + lid)
if not os.path.isfile(model_logfile):
expect(False, "Model did not complete, no {} log file ".format(model_logfile))
elif os.stat(model_logfile).st_size == 0:
expect(False, "Run FAILED")
else:
count_ok = 0
for cpl_logfile in cpl_logs:
if not os.path.isfile(cpl_logfile):
break
with open(cpl_logfile, 'r') as fd:
if 'SUCCESSFUL TERMINATION' in fd.read():
count_ok += 1
if count_ok != cpl_ninst:
expect(False, "Model did not complete - see {} \n " .format(cpl_logfile))
###############################################################################
def _save_logs(case, lid):
###############################################################################
rundir = case.get_value("RUNDIR")
logfiles = glob.glob(os.path.join(rundir, "*.log.{}".format(lid)))
for logfile in logfiles:
if os.path.isfile(logfile):
gzip_existing_file(logfile)
######################################################################################
def _resubmit_check(case):
###############################################################################
# check to see if we need to do resubmission from this particular job,
# Note that Mira requires special logic
dout_s = case.get_value("DOUT_S")
logger.warning("dout_s {} ".format(dout_s))
mach = case.get_value("MACH")
logger.warning("mach {} ".format(mach))
resubmit_num = case.get_value("RESUBMIT")
logger.warning("resubmit_num {}".format(resubmit_num))
# If dout_s is True than short-term archiving handles the resubmit
# If dout_s is True and machine is mira submit the st_archive script
resubmit = False
if not dout_s and resubmit_num > 0:
resubmit = True
elif dout_s and mach == 'mira':
caseroot = case.get_value("CASEROOT")
cimeroot = case.get_value("CIMEROOT")
cmd = "ssh cooleylogin1 'cd {case}; CIMEROOT={root} ./case.submit {case} --job case.st_archive'".format(case=caseroot, root=cimeroot)
run_cmd(cmd, verbose=True)
if resubmit:
job = case.get_primary_job()
case.submit(job=job, resubmit=True)
###############################################################################
def _do_external(script_name, caseroot, rundir, lid, prefix):
###############################################################################
expect(os.path.isfile(script_name), "External script {} not found".format(script_name))
filename = "{}.external.log.{}".format(prefix, lid)
outfile = os.path.join(rundir, filename)
append_status("Starting script {}".format(script_name), "CaseStatus")
run_sub_or_cmd(script_name, [caseroot], (os.path.basename(script_name).split('.',1))[0], [caseroot], logfile=outfile) # For sub, use case?
append_status("Completed script {}".format(script_name), "CaseStatus")
###############################################################################
def _do_data_assimilation(da_script, caseroot, cycle, lid, rundir):
###############################################################################
expect(os.path.isfile(da_script), "Data Assimilation script {} not found".format(da_script))
filename = "da.log.{}".format(lid)
outfile = os.path.join(rundir, filename)
run_sub_or_cmd(da_script, [caseroot, cycle], os.path.basename(da_script), [caseroot, cycle], logfile=outfile) # For sub, use case?
###############################################################################
def case_run(self, skip_pnl=False, set_continue_run=False, submit_resubmits=False):
###############################################################################
model_log("e3sm", logger, "{} CASE.RUN BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")))
# Set up the run, run the model, do the postrun steps
prerun_script = self.get_value("PRERUN_SCRIPT")
postrun_script = self.get_value("POSTRUN_SCRIPT")
data_assimilation_cycles = self.get_value("DATA_ASSIMILATION_CYCLES")
data_assimilation_script = self.get_value("DATA_ASSIMILATION_SCRIPT")
data_assimilation = (data_assimilation_cycles > 0 and
len(data_assimilation_script) > 0 and
os.path.isfile(data_assimilation_script))
# set up the LID
lid = new_lid()
if prerun_script:
model_log("e3sm", logger, "{} PRERUN_SCRIPT BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")))
self.flush()
_do_external(prerun_script, self.get_value("CASEROOT"), self.get_value("RUNDIR"),
lid, prefix="prerun")
self.read_xml()
model_log("e3sm", logger, "{} PRERUN_SCRIPT HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")))
for cycle in range(data_assimilation_cycles):
# After the first DA cycle, runs are restart runs
if cycle > 0:
lid = new_lid()
self.set_value("CONTINUE_RUN",
self.get_value("RESUBMIT_SETS_CONTINUE_RUN"))
model_log("e3sm", logger, "{} RUN_MODEL BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")))
lid = _run_model(self, lid, skip_pnl, da_cycle=cycle)
model_log("e3sm", logger, "{} RUN_MODEL HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")))
if self.get_value("CHECK_TIMING") or self.get_value("SAVE_TIMING"):
model_log("e3sm", logger, "{} GET_TIMING BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")))
get_timing(self, lid) # Run the getTiming script
model_log("e3sm", logger, "{} GET_TIMING HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")))
if data_assimilation:
model_log("e3sm", logger, "{} DO_DATA_ASSIMILATION BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")))
self.flush()
_do_data_assimilation(data_assimilation_script, self.get_value("CASEROOT"), cycle, lid,
self.get_value("RUNDIR"))
self.read_xml()
model_log("e3sm", logger, "{} DO_DATA_ASSIMILATION HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")))
_save_logs(self, lid) # Copy log files back to caseroot
model_log("e3sm", logger, "{} SAVE_POSTRUN_PROVENANCE BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")))
save_postrun_provenance(self)
model_log("e3sm", logger, "{} SAVE_POSTRUN_PROVENANCE HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")))
if postrun_script:
model_log("e3sm", logger, "{} POSTRUN_SCRIPT BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")))
self.flush()
_do_external(postrun_script, self.get_value("CASEROOT"), self.get_value("RUNDIR"),
lid, prefix="postrun")
self.read_xml()
_save_logs(self, lid)
model_log("e3sm", logger, "{} POSTRUN_SCRIPT HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")))
if set_continue_run:
self.set_value("CONTINUE_RUN",
self.get_value("RESUBMIT_SETS_CONTINUE_RUN"))
logger.warning("check for resubmit")
if submit_resubmits:
_resubmit_check(self)
model_log("e3sm", logger, "{} CASE.RUN HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")))
return True
| 47.301775 | 142 | 0.575557 | from CIME.XML.standard_module_setup import *
from CIME.utils import gzip_existing_file, new_lid, run_and_log_case_status
from CIME.utils import run_sub_or_cmd, append_status, safe_copy, model_log
from CIME.get_timing import get_timing
from CIME.provenance import save_prerun_provenance, save_postrun_provenance
import shutil, time, sys, os, glob
logger = logging.getLogger(__name__)
):
model_log("e3sm", logger, "{} GET_TIMING BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")))
get_timing(self, lid) # Run the getTiming script
model_log("e3sm", logger, "{} GET_TIMING HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")))
if data_assimilation:
model_log("e3sm", logger, "{} DO_DATA_ASSIMILATION BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")))
self.flush()
_do_data_assimilation(data_assimilation_script, self.get_value("CASEROOT"), cycle, lid,
self.get_value("RUNDIR"))
self.read_xml()
model_log("e3sm", logger, "{} DO_DATA_ASSIMILATION HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")))
_save_logs(self, lid) # Copy log files back to caseroot
model_log("e3sm", logger, "{} SAVE_POSTRUN_PROVENANCE BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")))
save_postrun_provenance(self)
model_log("e3sm", logger, "{} SAVE_POSTRUN_PROVENANCE HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")))
if postrun_script:
model_log("e3sm", logger, "{} POSTRUN_SCRIPT BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")))
self.flush()
_do_external(postrun_script, self.get_value("CASEROOT"), self.get_value("RUNDIR"),
lid, prefix="postrun")
self.read_xml()
_save_logs(self, lid)
model_log("e3sm", logger, "{} POSTRUN_SCRIPT HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")))
if set_continue_run:
self.set_value("CONTINUE_RUN",
self.get_value("RESUBMIT_SETS_CONTINUE_RUN"))
logger.warning("check for resubmit")
if submit_resubmits:
_resubmit_check(self)
model_log("e3sm", logger, "{} CASE.RUN HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")))
return True
| true | true |
f7fc673eb617a6d182d67356cefed3b69a0c2fda | 1,102 | py | Python | python/src/nnabla/backward_function/reshape.py | daniel-falk/nnabla | 3fe132ea52dc10521cc029a5d6ba8f565cf65ccf | [
"Apache-2.0"
] | 2,792 | 2017-06-26T13:05:44.000Z | 2022-03-28T07:55:26.000Z | python/src/nnabla/backward_function/reshape.py | daniel-falk/nnabla | 3fe132ea52dc10521cc029a5d6ba8f565cf65ccf | [
"Apache-2.0"
] | 138 | 2017-06-27T07:04:44.000Z | 2022-02-28T01:37:15.000Z | python/src/nnabla/backward_function/reshape.py | daniel-falk/nnabla | 3fe132ea52dc10521cc029a5d6ba8f565cf65ccf | [
"Apache-2.0"
] | 380 | 2017-06-26T13:23:52.000Z | 2022-03-25T16:51:30.000Z | # Copyright 2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla.functions as F
def reshape_backward(inputs, shape, inplace=True):
"""
Args:
inputs (list of nn.Variable): Incomming grads/inputs to/of the forward function.
kwargs (dict of arguments): Dictionary of the corresponding function arguments.
Return:
list of Variable: Return the gradients wrt inputs of the corresponding function.
"""
dy = inputs[0]
x0 = inputs[1]
x0_shape = x0.shape
dx0 = F.reshape(dy, x0_shape, inplace=inplace)
return dx0
| 33.393939 | 86 | 0.72686 |
import nnabla.functions as F
def reshape_backward(inputs, shape, inplace=True):
dy = inputs[0]
x0 = inputs[1]
x0_shape = x0.shape
dx0 = F.reshape(dy, x0_shape, inplace=inplace)
return dx0
| true | true |
f7fc6791f440d5a1abb097672ea3935ce3129e62 | 6,094 | py | Python | scripts/revredirs.py | Facenapalm/NapalmBot | ce775a270f374e626bcabc313676e4e2f9dbb843 | [
"MIT"
] | 4 | 2016-05-14T17:42:03.000Z | 2018-09-24T18:43:03.000Z | scripts/revredirs.py | Facenapalm/NapalmBot | ce775a270f374e626bcabc313676e4e2f9dbb843 | [
"MIT"
] | null | null | null | scripts/revredirs.py | Facenapalm/NapalmBot | ce775a270f374e626bcabc313676e4e2f9dbb843 | [
"MIT"
] | 1 | 2021-05-08T15:45:30.000Z | 2021-05-08T15:45:30.000Z | """This script automatically reviews obvious redirects in ruwiki."""
import re
import pywikibot
from pywikibot.data.api import Request
def get_review_token(site):
"""Get a review token on the given site."""
return site.get_tokens(["csrf"])["csrf"]
def review(site, token, page):
"""Review the latest revision of the given page."""
revid = page.latest_revision_id
request = Request(site=site,
action="review",
token=token,
revid=revid)
request.submit()
def get_unreviewed_redirects(site, namespace="0"):
"""Get a list of the unreviewed redirects with their targets."""
result = []
def _submit_and_parse(request):
"""Divide the answer to the list of values and continue info."""
answer = request.submit()
if "pages" not in answer["query"]:
return ([], {})
values = list(answer["query"]["pages"].values())
if "query-continue" in answer:
contin = answer["query-continue"]
else:
contin = {}
return (values, contin)
kwargs = {
"action": "query",
"prop": "links",
"pllimit": "5000",
"generator": "unreviewedpages",
"gurnamespace": namespace,
"gurfilterredir": "redirects",
"gurlimit": "5000"
}
while True:
# iterate for gurstart, get list of redirects
request = Request(site=site, **kwargs)
(values, contin) = _submit_and_parse(request)
chunk = [{"title": value["title"], "links": []} for value in values]
while True:
# iterate for plcontinue, get list of links (ie target candidates)
for key, value in enumerate(values):
if "links" in value:
chunk[key]["links"] += [links["title"] for links in value["links"]]
if "links" in contin:
request["plcontinue"] = contin["links"]["plcontinue"]
(values, contin) = _submit_and_parse(request)
continue
else:
break
result += chunk
if "unreviewedpages" in contin:
kwargs["gurstart"] = contin["unreviewedpages"]["gurstart"]
continue
else:
break
# filter result: redirects with two or more links aren't any interesting for us
result = [(x["title"], x["links"][0]) for x in filter(lambda x: len(x["links"]) == 1, result)]
return result
def primary_check(redirect, target):
"""Checks if redirect→target pair seeems to be legal."""
def compare(redirect, target):
"""Return true if redirect == target except 'е'' → 'ё', '-' → '—' and other replaces."""
replaces = {
"е": "ё",
"'": "’",
"\"": "«»„“",
"-": "—"
}
if len(redirect) != len(target):
return False
for idx, rchar in enumerate(redirect):
tchar = target[idx]
if rchar == tchar:
continue
if rchar in replaces:
if tchar in replaces[rchar]:
continue
return False
return True
# redirects that help people typing page titles
if compare(redirect, target):
return True
# redirects to pages with disambiguations
match = re.match(r"^([^\(\)]+) \([^\(\)]+\)$", target)
if match:
if redirect == match.group(1):
return True
# persones redirect
match = re.match(r"^([а-яё\-]+), ([а-яё]+) ([а-яё]{5,})$", target, flags=re.I)
if match:
candidates = [
"{surname} {name} {fathername}",
"{name} {fathername} {surname}",
"{surname}, {name}",
"{surname} {name}",
"{name} {surname}",
"{surname}"
]
for candidate in candidates:
if compare(redirect, candidate.format(surname=match.group(1), name=match.group(2),
fathername=match.group(3))):
return True
match = re.match(r"^([а-яё\-]+), ([а-яё]+)$", target, flags=re.I)
if match:
candidates = [
"{surname} {name}",
"{name} {surname}",
"{surname}"
]
for candidate in candidates:
if compare(redirect, candidate.format(surname=match.group(1), name=match.group(2))):
return True
match = re.match(r"^([а-яё\-]+), ([а-яё ]+ (?:фон|де|оглы))$", target, flags=re.I)
if match:
candidates = [
"{name} {surname}",
"{surname}"
]
for candidate in candidates:
if compare(redirect, candidate.format(surname=match.group(1), name=match.group(2))):
return True
return False
def secondary_check(rpage, tpage):
"""
Check if redirect have no implicit problems: for example, if this is not an article replaced
by redirect.
"""
matcher = re.compile(r"^\s*#(redirect|перенаправление)\s*:?\s*\[\[[^\[\]\|\n]+\]\]\s*$", flags=re.I)
for revision in rpage.revisions(content=True):
if not matcher.match(revision["text"]):
return False
if not tpage.exists():
return False
if tpage.isRedirectPage():
return False
if rpage.namespace().id != tpage.namespace().id:
return False
return True
def main():
"""Main script function."""
site = pywikibot.Site()
lst = filter(lambda x: primary_check(x[0], x[1]), get_unreviewed_redirects(site))
pywikibot.output("List loaded")
token = get_review_token(site)
for redirect, target in lst:
rpage = pywikibot.Page(site, redirect)
tpage = pywikibot.Page(site, target)
if not secondary_check(rpage, tpage):
pywikibot.output("[[{}]] → [[{}]]: secondary check failed".format(redirect, target))
continue
review(site, token, rpage)
pywikibot.output("[[{}]] → [[{}]]: reviewed".format(redirect, target))
if __name__ == "__main__":
main()
| 33.855556 | 104 | 0.541844 | import re
import pywikibot
from pywikibot.data.api import Request
def get_review_token(site):
return site.get_tokens(["csrf"])["csrf"]
def review(site, token, page):
revid = page.latest_revision_id
request = Request(site=site,
action="review",
token=token,
revid=revid)
request.submit()
def get_unreviewed_redirects(site, namespace="0"):
result = []
def _submit_and_parse(request):
answer = request.submit()
if "pages" not in answer["query"]:
return ([], {})
values = list(answer["query"]["pages"].values())
if "query-continue" in answer:
contin = answer["query-continue"]
else:
contin = {}
return (values, contin)
kwargs = {
"action": "query",
"prop": "links",
"pllimit": "5000",
"generator": "unreviewedpages",
"gurnamespace": namespace,
"gurfilterredir": "redirects",
"gurlimit": "5000"
}
while True:
request = Request(site=site, **kwargs)
(values, contin) = _submit_and_parse(request)
chunk = [{"title": value["title"], "links": []} for value in values]
while True:
for key, value in enumerate(values):
if "links" in value:
chunk[key]["links"] += [links["title"] for links in value["links"]]
if "links" in contin:
request["plcontinue"] = contin["links"]["plcontinue"]
(values, contin) = _submit_and_parse(request)
continue
else:
break
result += chunk
if "unreviewedpages" in contin:
kwargs["gurstart"] = contin["unreviewedpages"]["gurstart"]
continue
else:
break
result = [(x["title"], x["links"][0]) for x in filter(lambda x: len(x["links"]) == 1, result)]
return result
def primary_check(redirect, target):
def compare(redirect, target):
replaces = {
"е": "ё",
"'": "’",
"\"": "«»„“",
"-": "—"
}
if len(redirect) != len(target):
return False
for idx, rchar in enumerate(redirect):
tchar = target[idx]
if rchar == tchar:
continue
if rchar in replaces:
if tchar in replaces[rchar]:
continue
return False
return True
# redirects that help people typing page titles
if compare(redirect, target):
return True
# redirects to pages with disambiguations
match = re.match(r"^([^\(\)]+) \([^\(\)]+\)$", target)
if match:
if redirect == match.group(1):
return True
# persones redirect
match = re.match(r"^([а-яё\-]+), ([а-яё]+) ([а-яё]{5,})$", target, flags=re.I)
if match:
candidates = [
"{surname} {name} {fathername}",
"{name} {fathername} {surname}",
"{surname}, {name}",
"{surname} {name}",
"{name} {surname}",
"{surname}"
]
for candidate in candidates:
if compare(redirect, candidate.format(surname=match.group(1), name=match.group(2),
fathername=match.group(3))):
return True
match = re.match(r"^([а-яё\-]+), ([а-яё]+)$", target, flags=re.I)
if match:
candidates = [
"{surname} {name}",
"{name} {surname}",
"{surname}"
]
for candidate in candidates:
if compare(redirect, candidate.format(surname=match.group(1), name=match.group(2))):
return True
match = re.match(r"^([а-яё\-]+), ([а-яё ]+ (?:фон|де|оглы))$", target, flags=re.I)
if match:
candidates = [
"{name} {surname}",
"{surname}"
]
for candidate in candidates:
if compare(redirect, candidate.format(surname=match.group(1), name=match.group(2))):
return True
return False
def secondary_check(rpage, tpage):
matcher = re.compile(r"^\s*
for revision in rpage.revisions(content=True):
if not matcher.match(revision["text"]):
return False
if not tpage.exists():
return False
if tpage.isRedirectPage():
return False
if rpage.namespace().id != tpage.namespace().id:
return False
return True
def main():
site = pywikibot.Site()
lst = filter(lambda x: primary_check(x[0], x[1]), get_unreviewed_redirects(site))
pywikibot.output("List loaded")
token = get_review_token(site)
for redirect, target in lst:
rpage = pywikibot.Page(site, redirect)
tpage = pywikibot.Page(site, target)
if not secondary_check(rpage, tpage):
pywikibot.output("[[{}]] → [[{}]]: secondary check failed".format(redirect, target))
continue
review(site, token, rpage)
pywikibot.output("[[{}]] → [[{}]]: reviewed".format(redirect, target))
if __name__ == "__main__":
main()
| true | true |
f7fc685b8192cfcf4f40e99374acadfaaa852b48 | 10,441 | py | Python | src/vs_definitions.py | DIAGNijmegen/adhesion_detection | 21a9c810a4dee3c640d31f30ee5fdff1bbce9146 | [
"Apache-2.0"
] | 2 | 2021-10-08T13:14:49.000Z | 2022-03-18T17:53:45.000Z | src/vs_definitions.py | DIAGNijmegen/adhesion_detection | 21a9c810a4dee3c640d31f30ee5fdff1bbce9146 | [
"Apache-2.0"
] | 6 | 2021-10-12T20:55:53.000Z | 2021-10-12T21:03:45.000Z | src/vs_definitions.py | DIAGNijmegen/adhesion_detection | 21a9c810a4dee3c640d31f30ee5fdff1bbce9146 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from pathlib import Path
from enum import Enum, unique
from cinemri.contour import Contour
from config import *
@unique
class VSExpectationNormType(Enum):
# Division by mean
mean_div = 0
# Standardize with std
standardize = 1
# Transformation to apply to visceral slide values
@unique
class VSTransform(Enum):
none = 0
log = 1
sqrt = 2
class Region:
"""An object representing a coordinates region with a value at a specific coordinate
"""
def __init__(self, x, y, values, mean=None, std=None):
"""
Parameters
----------
x, y : list of int or int
The coordinates of the region
values : list of float or float
The values that correspond to coordinates
mean : float
A mean of the region
std : float
A standard deviation of the region
"""
self.x = np.atleast_1d(x)
self.y = np.atleast_1d(y)
self.values = np.atleast_1d(values)
self.mean = mean
self.std = std
self.points = np.column_stack((self.x, self.y, self.values))
@classmethod
def from_point(cls, point):
"""
Initialises a Region object from a single point
Parameters
----------
point : tuple
(x, y, value) - the coordinates of the point and the value at this point
Returns
-------
region : Region
A region that consists of a single point
"""
x, y, value = point[0], point[1], point[2]
region = cls(x, y, value)
return region
@classmethod
def from_points(cls, points):
"""
Initialises a Region object from points
Parameters
----------
points : ndarray
An array of points coordinates and values at a point. The first column - coordinates by x axis,
the second - coordinates by y axis, the third - values
Returns
-------
region : Region
A region that consists of a single point
"""
x, y, values = points[:, 0], points[:, 1], points[:, 2]
region = cls(x, y, values)
return region
def append(self, x, y, value):
"""
Appends one coordinate and its value to the region
Parameters
----------
x, y : int
A coordinate to append
value : float
The corresponding value
"""
self.x = np.concatenate((self.x, [x]))
self.y = np.concatenate((self.y, [y]))
self.values = np.concatenate((self.values, [value]))
self.points = np.column_stack((self.x, self.y, self.values))
def append_point(self, point):
"""
Appends one coordinate and its values to the region
Parameters
----------
point : tuple of (int, int, float)
A coordinate and the corresponding value to append
value : float
The corresponding value
"""
self.x = np.concatenate((self.x, [point[0]]))
self.y = np.concatenate((self.y, [point[1]]))
self.values = np.concatenate((self.values, [point[2]]))
self.points = np.column_stack((self.x, self.y, self.values))
def extend(self, x, y, values):
"""
Appends one coordinate and its values to the region
Parameters
----------
x, y : list of int
Coordinates to extend with
values : list of floats
Corresponding values
"""
self.x = np.concatenate((self.x, x))
self.y = np.concatenate((self.y, y))
self.values = np.concatenate((self.values, values))
self.points = np.column_stack((self.x, self.y, self.values))
@property
def size(self):
""" Size of a rectangle that encloses the region
"""
if len(self.x) == 0:
return 0, 0
def compute_len(axis=0):
values = [point[axis] for point in self.points]
length = np.max(values) - np.min(values) + 1
return length
width = compute_len(axis=0)
height = compute_len(axis=1)
return width, height
def exceeded_size(self, size):
"""
Checks if the size of a rectangle that encloses the region is larger than a given size
Parameters
----------
size : tuple
A size to compare with
Returns
-------
flag: bool
A boolean flag indicating whether region size is larger than the given size
"""
width, height = self.size
return width >= size[0] or height >= size[1]
class VisceralSlide(Contour):
"""An object representing visceral slide for a Cine-MRI slice
"""
def __init__(self, patient_id, study_id, slice_id, visceral_slide_data):
"""
Parameters
----------
patient_id : str
An id of a patient a Cine-MRI slice belongs to
study_id : str
An id of a study a Cine-MRI slice belongs to
slice_id : str
An id of a Cine-MRI slice
visceral_slide_data : dict
A dictionary containing the coordinates of abdominal cavity contour
and visceral slide value at each coordinate
"""
super().__init__(visceral_slide_data["x"], visceral_slide_data["y"])
self.values = np.array(visceral_slide_data["slide"])
self.patient_id = patient_id
self.study_id = study_id
self.slice_id = slice_id
self.full_id = SEPARATOR.join([patient_id, study_id, slice_id])
def zeros_fix(self):
""" Replace zeros with highest non 0 in visceral slide values
"""
zero_placeholder = np.min([value for value in self.values if value > 0])
self.values = np.array([value if value > 0 else zero_placeholder for value in self.values])
def to_regions(self, means=None, stds=None):
"""
Splits visceral slide into chunks starting from the bottom left point of the contour
in the clock-wise direction. The provided mean and stds should correspond to these chunks
Parameters
----------
means : list of float, optional
A list of visceral slide mean by chunk
stds : list of float, optional
A list of visceral slide standard deviation by chunk
Returns
-------
regions : list of Regions
A list of objects of Region type that represent chunks of visceral slide map
and include mean and standard deviation of visceral slide value in each chunk
"""
regions_num = len(means)
xs, ys = self.x, self.y
values = self.values
vs_len = len(values)
indices = np.arange(vs_len)
chunks = np.array_split(indices, regions_num)
chunk_lens = np.array([len(chunk) for chunk in chunks])
np.random.shuffle(chunk_lens)
x_bottom_left, y_bottom_left = self.bottom_left_point
coords = np.column_stack((xs, ys))
ind = np.where((coords == (x_bottom_left, y_bottom_left)).all(axis=1))[0][0]
reg_start = ind
reg_end = reg_start + chunk_lens[0]
last_ind = reg_start
regions = []
for i in range(regions_num):
if reg_end >= vs_len:
reg_end -= vs_len
if reg_start >= vs_len:
reg_start -= vs_len
mean = means[i] if means is not None else None
std = stds[i] if stds is not None else None
# Normal situation, take the connected region
if reg_start < reg_end:
x_reg = xs[reg_start:reg_end]
y_reg = ys[reg_start:reg_end]
val_reg = values[reg_start:reg_end]
region = Region(x_reg, y_reg, val_reg, mean, std)
else:
# We went beyond the fist contour coordinate, so add up a region from the region start till
# the end of contour and from the start of contour till the region end
x_reg = xs[reg_start:]
y_reg = ys[reg_start:]
val_reg = values[reg_start:]
region = Region(x_reg, y_reg, val_reg, mean, std)
region.extend(xs[:reg_end], ys[:reg_end], values[:reg_end])
regions.append(region)
reg_start = reg_end
if i < regions_num - 1:
if i == regions_num - 2:
reg_end = last_ind
else:
reg_end += chunk_lens[i + 1]
return regions
def norm_with_expectation(self, means, stds, expectation_norm_type=VSExpectationNormType.mean_div):
"""Normalises visceral slide by provided means and standard deviations assuming that
means and standard deviations correspond to regions obtained with to_regions method
Parameters
----------
means, stds : list of float
A list of visceral slide mean by chunk
stds : list of float
A list of visceral slide standard deviation by chunk
expectation_norm_type : VSExpectationNormType, default=VSExpectationNormType.mean_div
A type of normalisation to apply
"""
vs_regs = self.to_regions(means, stds)
reg = vs_regs[0]
if expectation_norm_type == VSExpectationNormType.mean_div:
values = reg.values / reg.mean
else:
values = (reg.values - reg.mean) / reg.std
for i in range(1, len(vs_regs)):
reg = vs_regs[i]
if expectation_norm_type == VSExpectationNormType.mean_div:
vs_norm = reg.values / reg.mean
else:
vs_norm = (reg.values - reg.mean) / reg.std
values = np.concatenate((values, vs_norm))
self.values = values
def build_path(self, relative_path, extension=".mha"):
"""
Build a path to a folder that contains visceral slide assuming standard folders hierarchy
Parameters
----------
relative_path : Path
A relative path to locate a slice file
Returns
-------
path : Path
A path to a folder that contains visceral slide
"""
return Path(relative_path) / self.patient_id / self.study_id / (self.slice_id + extension)
| 32.730408 | 107 | 0.57552 | import numpy as np
from pathlib import Path
from enum import Enum, unique
from cinemri.contour import Contour
from config import *
@unique
class VSExpectationNormType(Enum):
mean_div = 0
standardize = 1
@unique
class VSTransform(Enum):
none = 0
log = 1
sqrt = 2
class Region:
def __init__(self, x, y, values, mean=None, std=None):
self.x = np.atleast_1d(x)
self.y = np.atleast_1d(y)
self.values = np.atleast_1d(values)
self.mean = mean
self.std = std
self.points = np.column_stack((self.x, self.y, self.values))
@classmethod
def from_point(cls, point):
x, y, value = point[0], point[1], point[2]
region = cls(x, y, value)
return region
@classmethod
def from_points(cls, points):
x, y, values = points[:, 0], points[:, 1], points[:, 2]
region = cls(x, y, values)
return region
def append(self, x, y, value):
self.x = np.concatenate((self.x, [x]))
self.y = np.concatenate((self.y, [y]))
self.values = np.concatenate((self.values, [value]))
self.points = np.column_stack((self.x, self.y, self.values))
def append_point(self, point):
self.x = np.concatenate((self.x, [point[0]]))
self.y = np.concatenate((self.y, [point[1]]))
self.values = np.concatenate((self.values, [point[2]]))
self.points = np.column_stack((self.x, self.y, self.values))
def extend(self, x, y, values):
self.x = np.concatenate((self.x, x))
self.y = np.concatenate((self.y, y))
self.values = np.concatenate((self.values, values))
self.points = np.column_stack((self.x, self.y, self.values))
@property
def size(self):
if len(self.x) == 0:
return 0, 0
def compute_len(axis=0):
values = [point[axis] for point in self.points]
length = np.max(values) - np.min(values) + 1
return length
width = compute_len(axis=0)
height = compute_len(axis=1)
return width, height
def exceeded_size(self, size):
width, height = self.size
return width >= size[0] or height >= size[1]
class VisceralSlide(Contour):
def __init__(self, patient_id, study_id, slice_id, visceral_slide_data):
super().__init__(visceral_slide_data["x"], visceral_slide_data["y"])
self.values = np.array(visceral_slide_data["slide"])
self.patient_id = patient_id
self.study_id = study_id
self.slice_id = slice_id
self.full_id = SEPARATOR.join([patient_id, study_id, slice_id])
def zeros_fix(self):
zero_placeholder = np.min([value for value in self.values if value > 0])
self.values = np.array([value if value > 0 else zero_placeholder for value in self.values])
def to_regions(self, means=None, stds=None):
regions_num = len(means)
xs, ys = self.x, self.y
values = self.values
vs_len = len(values)
indices = np.arange(vs_len)
chunks = np.array_split(indices, regions_num)
chunk_lens = np.array([len(chunk) for chunk in chunks])
np.random.shuffle(chunk_lens)
x_bottom_left, y_bottom_left = self.bottom_left_point
coords = np.column_stack((xs, ys))
ind = np.where((coords == (x_bottom_left, y_bottom_left)).all(axis=1))[0][0]
reg_start = ind
reg_end = reg_start + chunk_lens[0]
last_ind = reg_start
regions = []
for i in range(regions_num):
if reg_end >= vs_len:
reg_end -= vs_len
if reg_start >= vs_len:
reg_start -= vs_len
mean = means[i] if means is not None else None
std = stds[i] if stds is not None else None
if reg_start < reg_end:
x_reg = xs[reg_start:reg_end]
y_reg = ys[reg_start:reg_end]
val_reg = values[reg_start:reg_end]
region = Region(x_reg, y_reg, val_reg, mean, std)
else:
x_reg = xs[reg_start:]
y_reg = ys[reg_start:]
val_reg = values[reg_start:]
region = Region(x_reg, y_reg, val_reg, mean, std)
region.extend(xs[:reg_end], ys[:reg_end], values[:reg_end])
regions.append(region)
reg_start = reg_end
if i < regions_num - 1:
if i == regions_num - 2:
reg_end = last_ind
else:
reg_end += chunk_lens[i + 1]
return regions
def norm_with_expectation(self, means, stds, expectation_norm_type=VSExpectationNormType.mean_div):
vs_regs = self.to_regions(means, stds)
reg = vs_regs[0]
if expectation_norm_type == VSExpectationNormType.mean_div:
values = reg.values / reg.mean
else:
values = (reg.values - reg.mean) / reg.std
for i in range(1, len(vs_regs)):
reg = vs_regs[i]
if expectation_norm_type == VSExpectationNormType.mean_div:
vs_norm = reg.values / reg.mean
else:
vs_norm = (reg.values - reg.mean) / reg.std
values = np.concatenate((values, vs_norm))
self.values = values
def build_path(self, relative_path, extension=".mha"):
return Path(relative_path) / self.patient_id / self.study_id / (self.slice_id + extension)
| true | true |
f7fc68b41ca221c80c0798f92eb11cddfffd90e7 | 23,561 | py | Python | src/python/pants/backend/jvm/tasks/jvm_compile/zinc/zinc_compile.py | traviscrawford/pants | 587df52d68e612d2f2c87dbe1f504a7bc69b2c46 | [
"Apache-2.0"
] | 1 | 2016-12-27T08:46:10.000Z | 2016-12-27T08:46:10.000Z | src/python/pants/backend/jvm/tasks/jvm_compile/zinc/zinc_compile.py | traviscrawford/pants | 587df52d68e612d2f2c87dbe1f504a7bc69b2c46 | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/jvm/tasks/jvm_compile/zinc/zinc_compile.py | traviscrawford/pants | 587df52d68e612d2f2c87dbe1f504a7bc69b2c46 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import errno
import logging
import os
import re
import textwrap
from contextlib import closing
from hashlib import sha1
from xml.etree import ElementTree
from pants.backend.jvm.subsystems.java import Java
from pants.backend.jvm.subsystems.jvm_platform import JvmPlatform
from pants.backend.jvm.subsystems.scala_platform import ScalaPlatform
from pants.backend.jvm.subsystems.zinc import Zinc
from pants.backend.jvm.targets.annotation_processor import AnnotationProcessor
from pants.backend.jvm.targets.javac_plugin import JavacPlugin
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.backend.jvm.targets.scalac_plugin import ScalacPlugin
from pants.backend.jvm.tasks.classpath_util import ClasspathUtil
from pants.backend.jvm.tasks.jvm_compile.analysis_tools import AnalysisTools
from pants.backend.jvm.tasks.jvm_compile.jvm_compile import JvmCompile
from pants.backend.jvm.tasks.jvm_compile.zinc.zinc_analysis import ZincAnalysis
from pants.backend.jvm.tasks.jvm_compile.zinc.zinc_analysis_parser import ZincAnalysisParser
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.hash_utils import hash_file
from pants.base.workunit import WorkUnitLabel
from pants.java.distribution.distribution import DistributionLocator
from pants.util.contextutil import open_zip
from pants.util.dirutil import safe_open
from pants.util.memo import memoized_method, memoized_property
# Well known metadata file required to register scalac plugins with nsc.
_SCALAC_PLUGIN_INFO_FILE = 'scalac-plugin.xml'
# Well known metadata file to register javac plugins.
_JAVAC_PLUGIN_INFO_FILE = 'META-INF/services/com.sun.source.util.Plugin'
# Well known metadata file to register annotation processors with a java 1.6+ compiler.
_PROCESSOR_INFO_FILE = 'META-INF/services/javax.annotation.processing.Processor'
logger = logging.getLogger(__name__)
class BaseZincCompile(JvmCompile):
"""An abstract base class for zinc compilation tasks."""
_name = 'zinc'
@staticmethod
def _write_scalac_plugin_info(resources_dir, scalac_plugin_target):
scalac_plugin_info_file = os.path.join(resources_dir, _SCALAC_PLUGIN_INFO_FILE)
with safe_open(scalac_plugin_info_file, 'w') as f:
f.write(textwrap.dedent("""
<plugin>
<name>{}</name>
<classname>{}</classname>
</plugin>
""".format(scalac_plugin_target.plugin, scalac_plugin_target.classname)).strip())
@staticmethod
def _write_javac_plugin_info(resources_dir, javac_plugin_target):
javac_plugin_info_file = os.path.join(resources_dir, _JAVAC_PLUGIN_INFO_FILE)
with safe_open(javac_plugin_info_file, 'w') as f:
f.write(javac_plugin_target.classname)
@staticmethod
def validate_arguments(log, whitelisted_args, args):
"""Validate that all arguments match whitelisted regexes."""
valid_patterns = {re.compile(p): v for p, v in whitelisted_args.items()}
def validate(idx):
arg = args[idx]
for pattern, has_argument in valid_patterns.items():
if pattern.match(arg):
return 2 if has_argument else 1
log.warn("Zinc argument '{}' is not supported, and is subject to change/removal!".format(arg))
return 1
arg_index = 0
while arg_index < len(args):
arg_index += validate(arg_index)
@staticmethod
def _get_zinc_arguments(settings):
"""Extracts and formats the zinc arguments given in the jvm platform settings.
This is responsible for the symbol substitution which replaces $JAVA_HOME with the path to an
appropriate jvm distribution.
:param settings: The jvm platform settings from which to extract the arguments.
:type settings: :class:`JvmPlatformSettings`
"""
zinc_args = [
'-C-source', '-C{}'.format(settings.source_level),
'-C-target', '-C{}'.format(settings.target_level),
]
if settings.args:
settings_args = settings.args
if any('$JAVA_HOME' in a for a in settings.args):
try:
distribution = JvmPlatform.preferred_jvm_distribution([settings], strict=True)
except DistributionLocator.Error:
distribution = JvmPlatform.preferred_jvm_distribution([settings], strict=False)
logger.debug('Substituting "$JAVA_HOME" with "{}" in jvm-platform args.'
.format(distribution.home))
settings_args = (a.replace('$JAVA_HOME', distribution.home) for a in settings.args)
zinc_args.extend(settings_args)
return zinc_args
@classmethod
def implementation_version(cls):
return super(BaseZincCompile, cls).implementation_version() + [('BaseZincCompile', 7)]
@classmethod
def compiler_plugin_types(cls):
"""A tuple of target types which are compiler plugins."""
return (AnnotationProcessor, JavacPlugin, ScalacPlugin)
@classmethod
def get_jvm_options_default(cls, bootstrap_option_values):
return ('-Dfile.encoding=UTF-8', '-Dzinc.analysis.cache.limit=1000',
'-Djava.awt.headless=true', '-Xmx2g')
@classmethod
def get_args_default(cls, bootstrap_option_values):
return ('-C-encoding', '-CUTF-8', '-S-encoding', '-SUTF-8', '-S-g:vars')
@classmethod
def get_warning_args_default(cls):
return ('-C-deprecation', '-C-Xlint:all', '-C-Xlint:-serial', '-C-Xlint:-path',
'-S-deprecation', '-S-unchecked', '-S-Xlint')
@classmethod
def get_no_warning_args_default(cls):
return ('-C-nowarn', '-C-Xlint:none', '-S-nowarn', '-S-Xlint:none', )
@classmethod
def get_fatal_warnings_enabled_args_default(cls):
return ('-S-Xfatal-warnings', '-C-Werror')
@classmethod
def get_fatal_warnings_disabled_args_default(cls):
return ()
@classmethod
def register_options(cls, register):
super(BaseZincCompile, cls).register_options(register)
# TODO: Sort out JVM compile config model: https://github.com/pantsbuild/pants/issues/4483.
register('--whitelisted-args', advanced=True, type=dict,
default={
'-S.*': False,
'-C.*': False,
'-file-filter': True,
'-msg-filter': True,
},
help='A dict of option regexes that make up pants\' supported API for zinc. '
'Options not listed here are subject to change/removal. The value of the dict '
'indicates that an option accepts an argument.')
register('--incremental', advanced=True, type=bool, default=True,
help='When set, zinc will use sub-target incremental compilation, which dramatically '
'improves compile performance while changing large targets. When unset, '
'changed targets will be compiled with an empty output directory, as if after '
'running clean-all.')
register('--incremental-caching', advanced=True, type=bool,
help='When set, the results of incremental compiles will be written to the cache. '
'This is unset by default, because it is generally a good precaution to cache '
'only clean/cold builds.')
@classmethod
def subsystem_dependencies(cls):
return super(BaseZincCompile, cls).subsystem_dependencies() + (Zinc.Factory,)
@classmethod
def prepare(cls, options, round_manager):
super(BaseZincCompile, cls).prepare(options, round_manager)
ScalaPlatform.prepare_tools(round_manager)
@property
def incremental(self):
"""Zinc implements incremental compilation.
Setting this property causes the task infrastructure to clone the previous
results_dir for a target into the new results_dir for a target.
"""
return self.get_options().incremental
@property
def cache_incremental(self):
"""Optionally write the results of incremental compiles to the cache."""
return self.get_options().incremental_caching
@memoized_property
def _zinc(self):
return Zinc.Factory.global_instance().create(self.context.products)
def __init__(self, *args, **kwargs):
super(BaseZincCompile, self).__init__(*args, **kwargs)
self.set_distribution(jdk=True)
# A directory to contain per-target subdirectories with apt processor info files.
self._processor_info_dir = os.path.join(self.workdir, 'apt-processor-info')
# Validate zinc options.
ZincCompile.validate_arguments(self.context.log, self.get_options().whitelisted_args,
self._args)
def select(self, target):
raise NotImplementedError()
def select_source(self, source_file_path):
raise NotImplementedError()
def create_analysis_tools(self):
return AnalysisTools(self.dist.real_home, ZincAnalysisParser(), ZincAnalysis,
get_buildroot(), self.get_options().pants_workdir)
def javac_classpath(self):
# Note that if this classpath is empty then Zinc will automatically use the javac from
# the JDK it was invoked with.
return Java.global_javac_classpath(self.context.products)
def scalac_classpath(self):
return ScalaPlatform.global_instance().compiler_classpath(self.context.products)
def write_extra_resources(self, compile_context):
"""Override write_extra_resources to produce plugin and annotation processor files."""
target = compile_context.target
if isinstance(target, ScalacPlugin):
self._write_scalac_plugin_info(compile_context.classes_dir, target)
elif isinstance(target, JavacPlugin):
self._write_javac_plugin_info(compile_context.classes_dir, target)
elif isinstance(target, AnnotationProcessor) and target.processors:
processor_info_file = os.path.join(compile_context.classes_dir, _PROCESSOR_INFO_FILE)
self._write_processor_info(processor_info_file, target.processors)
def _write_processor_info(self, processor_info_file, processors):
with safe_open(processor_info_file, 'w') as f:
for processor in processors:
f.write('{}\n'.format(processor.strip()))
@memoized_property
def _zinc_cache_dir(self):
"""A directory where zinc can store compiled copies of the `compiler-bridge`.
The compiler-bridge is specific to each scala version, and is lazily computed by zinc if the
appropriate version does not exist. Eventually it would be great to just fetch this rather
than compiling it.
"""
hasher = sha1()
for jar_path in self._zinc.zinc + [self._zinc.compiler_interface, self._zinc.compiler_bridge]:
hasher.update(os.path.relpath(jar_path, self.get_options().pants_workdir))
key = hasher.hexdigest()[:12]
return os.path.join(self.get_options().pants_bootstrapdir, 'zinc', key)
def compile(self, args, classpath, sources, classes_output_dir, upstream_analysis, analysis_file,
log_file, zinc_args_file, settings, fatal_warnings, zinc_file_manager,
javac_plugin_map, scalac_plugin_map):
self._verify_zinc_classpath(classpath)
self._verify_zinc_classpath(upstream_analysis.keys())
zinc_args = []
zinc_args.extend([
'-log-level', self.get_options().level,
'-analysis-cache', analysis_file,
'-classpath', ':'.join(classpath),
'-d', classes_output_dir
])
if not self.get_options().colors:
zinc_args.append('-no-color')
if log_file:
zinc_args.extend(['-capture-log', log_file])
zinc_args.extend(['-compiler-interface', self._zinc.compiler_interface])
zinc_args.extend(['-compiler-bridge', self._zinc.compiler_bridge])
zinc_args.extend(['-zinc-cache-dir', self._zinc_cache_dir])
zinc_args.extend(['-scala-path', ':'.join(self.scalac_classpath())])
zinc_args.extend(self._javac_plugin_args(javac_plugin_map))
# Search for scalac plugins on the classpath.
# Note that:
# - We also search in the extra scalac plugin dependencies, if specified.
# - In scala 2.11 and up, the plugin's classpath element can be a dir, but for 2.10 it must be
# a jar. So in-repo plugins will only work with 2.10 if --use-classpath-jars is true.
# - We exclude our own classes_output_dir, because if we're a plugin ourselves, then our
# classes_output_dir doesn't have scalac-plugin.xml yet, and we don't want that fact to get
# memoized (which in practice will only happen if this plugin uses some other plugin, thus
# triggering the plugin search mechanism, which does the memoizing).
scalac_plugin_search_classpath = (
(set(classpath) | set(self.scalac_plugin_classpath_elements())) -
{classes_output_dir}
)
zinc_args.extend(self._scalac_plugin_args(scalac_plugin_map, scalac_plugin_search_classpath))
if upstream_analysis:
zinc_args.extend(['-analysis-map',
','.join('{}:{}'.format(*kv) for kv in upstream_analysis.items())])
zinc_args.extend(args)
zinc_args.extend(self._get_zinc_arguments(settings))
zinc_args.append('-transactional')
if fatal_warnings:
zinc_args.extend(self.get_options().fatal_warnings_enabled_args)
else:
zinc_args.extend(self.get_options().fatal_warnings_disabled_args)
if not zinc_file_manager:
zinc_args.append('-no-zinc-file-manager')
jvm_options = []
if self.javac_classpath():
# Make the custom javac classpath the first thing on the bootclasspath, to ensure that
# it's the one javax.tools.ToolProvider.getSystemJavaCompiler() loads.
# It will probably be loaded even on the regular classpath: If not found on the bootclasspath,
# getSystemJavaCompiler() constructs a classloader that loads from the JDK's tools.jar.
# That classloader will first delegate to its parent classloader, which will search the
# regular classpath. However it's harder to guarantee that our javac will preceed any others
# on the classpath, so it's safer to prefix it to the bootclasspath.
jvm_options.extend(['-Xbootclasspath/p:{}'.format(':'.join(self.javac_classpath()))])
jvm_options.extend(self._jvm_options)
zinc_args.extend(sources)
self.log_zinc_file(analysis_file)
with open(zinc_args_file, 'w') as fp:
for arg in zinc_args:
fp.write(arg)
fp.write(b'\n')
if self.runjava(classpath=self._zinc.zinc,
main=Zinc.ZINC_COMPILE_MAIN,
jvm_options=jvm_options,
args=zinc_args,
workunit_name=self.name(),
workunit_labels=[WorkUnitLabel.COMPILER]):
raise TaskError('Zinc compile failed.')
def _verify_zinc_classpath(self, classpath):
def is_outside(path, putative_parent):
return os.path.relpath(path, putative_parent).startswith(os.pardir)
for path in classpath:
if not os.path.isabs(path):
raise TaskError('Classpath entries provided to zinc should be absolute. '
'{} is not.'.format(path))
if is_outside(path, self.get_options().pants_workdir) and is_outside(path, self.dist.home):
raise TaskError('Classpath entries provided to zinc should be in working directory or '
'part of the JDK. {} is not.'.format(path))
if path != os.path.normpath(path):
raise TaskError('Classpath entries provided to zinc should be normalized '
'(i.e. without ".." and "."). {} is not.'.format(path))
def log_zinc_file(self, analysis_file):
self.context.log.debug('Calling zinc on: {} ({})'
.format(analysis_file,
hash_file(analysis_file).upper()
if os.path.exists(analysis_file)
else 'nonexistent'))
@classmethod
def _javac_plugin_args(cls, javac_plugin_map):
ret = []
for plugin, args in javac_plugin_map.items():
for arg in args:
if ' ' in arg:
# Note: Args are separated by spaces, and there is no way to escape embedded spaces, as
# javac's Main does a simple split on these strings.
raise TaskError('javac plugin args must not contain spaces '
'(arg {} for plugin {})'.format(arg, plugin))
ret.append('-C-Xplugin:{} {}'.format(plugin, ' '.join(args)))
return ret
def _scalac_plugin_args(self, scalac_plugin_map, classpath):
if not scalac_plugin_map:
return []
plugin_jar_map = self._find_scalac_plugins(scalac_plugin_map.keys(), classpath)
ret = []
for name, cp_entries in plugin_jar_map.items():
# Note that the first element in cp_entries is the one containing the plugin's metadata,
# meaning that this is the plugin that will be loaded, even if there happen to be other
# plugins in the list of entries (e.g., because this plugin depends on another plugin).
ret.append('-S-Xplugin:{}'.format(':'.join(cp_entries)))
for arg in scalac_plugin_map[name]:
ret.append('-S-P:{}:{}'.format(name, arg))
return ret
def _find_scalac_plugins(self, scalac_plugins, classpath):
"""Returns a map from plugin name to list of plugin classpath entries.
The first entry in each list is the classpath entry containing the plugin metadata.
The rest are the internal transitive deps of the plugin.
This allows us to have in-repo plugins with dependencies (unlike javac, scalac doesn't load
plugins or their deps from the regular classpath, so we have to provide these entries
separately, in the -Xplugin: flag).
Note that we don't currently support external plugins with dependencies, as we can't know which
external classpath elements are required, and we'd have to put the entire external classpath
on each -Xplugin: flag, which seems excessive.
Instead, external plugins should be published as "fat jars" (which appears to be the norm,
since SBT doesn't support plugins with dependencies anyway).
"""
# Allow multiple flags and also comma-separated values in a single flag.
plugin_names = set([p for val in scalac_plugins for p in val.split(',')])
if not plugin_names:
return {}
active_plugins = {}
buildroot = get_buildroot()
cp_product = self.context.products.get_data('runtime_classpath')
for classpath_element in classpath:
name = self._maybe_get_plugin_name(classpath_element)
if name in plugin_names:
plugin_target_closure = self._plugin_targets('scalac').get(name, [])
# It's important to use relative paths, as the compiler flags get embedded in the zinc
# analysis file, and we port those between systems via the artifact cache.
rel_classpath_elements = [
os.path.relpath(cpe, buildroot) for cpe in
ClasspathUtil.internal_classpath(plugin_target_closure, cp_product, self._confs)]
# If the plugin is external then rel_classpath_elements will be empty, so we take
# just the external jar itself.
rel_classpath_elements = rel_classpath_elements or [classpath_element]
# Some classpath elements may be repeated, so we allow for that here.
if active_plugins.get(name, rel_classpath_elements) != rel_classpath_elements:
raise TaskError('Plugin {} defined in {} and in {}'.format(name, active_plugins[name],
classpath_element))
active_plugins[name] = rel_classpath_elements
if len(active_plugins) == len(plugin_names):
# We've found all the plugins, so return now to spare us from processing
# of the rest of the classpath for no reason.
return active_plugins
# If we get here we must have unresolved plugins.
unresolved_plugins = plugin_names - set(active_plugins.keys())
raise TaskError('Could not find requested plugins: {}'.format(list(unresolved_plugins)))
@classmethod
@memoized_method
def _maybe_get_plugin_name(cls, classpath_element):
"""If classpath_element is a scalac plugin, returns its name.
Returns None otherwise.
"""
def process_info_file(cp_elem, info_file):
plugin_info = ElementTree.parse(info_file).getroot()
if plugin_info.tag != 'plugin':
raise TaskError('File {} in {} is not a valid scalac plugin descriptor'.format(
_SCALAC_PLUGIN_INFO_FILE, cp_elem))
return plugin_info.find('name').text
if os.path.isdir(classpath_element):
try:
with open(os.path.join(classpath_element, _SCALAC_PLUGIN_INFO_FILE)) as plugin_info_file:
return process_info_file(classpath_element, plugin_info_file)
except IOError as e:
if e.errno != errno.ENOENT:
raise
else:
with open_zip(classpath_element, 'r') as jarfile:
try:
with closing(jarfile.open(_SCALAC_PLUGIN_INFO_FILE, 'r')) as plugin_info_file:
return process_info_file(classpath_element, plugin_info_file)
except KeyError:
pass
return None
class ZincCompile(BaseZincCompile):
"""Compile Scala and Java code to classfiles using Zinc."""
@classmethod
def register_options(cls, register):
super(ZincCompile, cls).register_options(register)
register('--javac-plugins', advanced=True, type=list, fingerprint=True,
help='Use these javac plugins.')
register('--javac-plugin-args', advanced=True, type=dict, default={}, fingerprint=True,
help='Map from javac plugin name to list of arguments for that plugin.')
cls.register_jvm_tool(register, 'javac-plugin-dep', classpath=[],
help='Search for javac plugins here, as well as in any '
'explicit dependencies.')
register('--scalac-plugins', advanced=True, type=list, fingerprint=True,
help='Use these scalac plugins.')
register('--scalac-plugin-args', advanced=True, type=dict, default={}, fingerprint=True,
help='Map from scalac plugin name to list of arguments for that plugin.')
cls.register_jvm_tool(register, 'scalac-plugin-dep', classpath=[],
help='Search for scalac plugins here, as well as in any '
'explicit dependencies.')
@classmethod
def product_types(cls):
return ['runtime_classpath', 'classes_by_source', 'product_deps_by_src', 'zinc_args']
@memoized_method
def extra_compile_time_classpath_elements(self):
# javac plugins are loaded from the regular class entries containing javac plugins,
# so we can provide them here.
# Note that, unlike javac, scalac plugins are not loaded from the regular classpath,
# so we don't provide them here.
return self.tool_classpath('javac-plugin-dep')
@memoized_method
def scalac_plugin_classpath_elements(self):
"""Classpath entries containing scalac plugins."""
return self.tool_classpath('scalac-plugin-dep')
def select(self, target):
# Require that targets are marked for JVM compilation, to differentiate from
# targets owned by the scalajs contrib module.
if not isinstance(target, JvmTarget):
return False
return target.has_sources('.java') or target.has_sources('.scala')
def select_source(self, source_file_path):
return source_file_path.endswith('.java') or source_file_path.endswith('.scala')
| 44.370998 | 100 | 0.70222 |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import errno
import logging
import os
import re
import textwrap
from contextlib import closing
from hashlib import sha1
from xml.etree import ElementTree
from pants.backend.jvm.subsystems.java import Java
from pants.backend.jvm.subsystems.jvm_platform import JvmPlatform
from pants.backend.jvm.subsystems.scala_platform import ScalaPlatform
from pants.backend.jvm.subsystems.zinc import Zinc
from pants.backend.jvm.targets.annotation_processor import AnnotationProcessor
from pants.backend.jvm.targets.javac_plugin import JavacPlugin
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.backend.jvm.targets.scalac_plugin import ScalacPlugin
from pants.backend.jvm.tasks.classpath_util import ClasspathUtil
from pants.backend.jvm.tasks.jvm_compile.analysis_tools import AnalysisTools
from pants.backend.jvm.tasks.jvm_compile.jvm_compile import JvmCompile
from pants.backend.jvm.tasks.jvm_compile.zinc.zinc_analysis import ZincAnalysis
from pants.backend.jvm.tasks.jvm_compile.zinc.zinc_analysis_parser import ZincAnalysisParser
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.hash_utils import hash_file
from pants.base.workunit import WorkUnitLabel
from pants.java.distribution.distribution import DistributionLocator
from pants.util.contextutil import open_zip
from pants.util.dirutil import safe_open
from pants.util.memo import memoized_method, memoized_property
_SCALAC_PLUGIN_INFO_FILE = 'scalac-plugin.xml'
_JAVAC_PLUGIN_INFO_FILE = 'META-INF/services/com.sun.source.util.Plugin'
_PROCESSOR_INFO_FILE = 'META-INF/services/javax.annotation.processing.Processor'
logger = logging.getLogger(__name__)
class BaseZincCompile(JvmCompile):
_name = 'zinc'
@staticmethod
def _write_scalac_plugin_info(resources_dir, scalac_plugin_target):
scalac_plugin_info_file = os.path.join(resources_dir, _SCALAC_PLUGIN_INFO_FILE)
with safe_open(scalac_plugin_info_file, 'w') as f:
f.write(textwrap.dedent("""
<plugin>
<name>{}</name>
<classname>{}</classname>
</plugin>
""".format(scalac_plugin_target.plugin, scalac_plugin_target.classname)).strip())
@staticmethod
def _write_javac_plugin_info(resources_dir, javac_plugin_target):
javac_plugin_info_file = os.path.join(resources_dir, _JAVAC_PLUGIN_INFO_FILE)
with safe_open(javac_plugin_info_file, 'w') as f:
f.write(javac_plugin_target.classname)
@staticmethod
def validate_arguments(log, whitelisted_args, args):
valid_patterns = {re.compile(p): v for p, v in whitelisted_args.items()}
def validate(idx):
arg = args[idx]
for pattern, has_argument in valid_patterns.items():
if pattern.match(arg):
return 2 if has_argument else 1
log.warn("Zinc argument '{}' is not supported, and is subject to change/removal!".format(arg))
return 1
arg_index = 0
while arg_index < len(args):
arg_index += validate(arg_index)
@staticmethod
def _get_zinc_arguments(settings):
zinc_args = [
'-C-source', '-C{}'.format(settings.source_level),
'-C-target', '-C{}'.format(settings.target_level),
]
if settings.args:
settings_args = settings.args
if any('$JAVA_HOME' in a for a in settings.args):
try:
distribution = JvmPlatform.preferred_jvm_distribution([settings], strict=True)
except DistributionLocator.Error:
distribution = JvmPlatform.preferred_jvm_distribution([settings], strict=False)
logger.debug('Substituting "$JAVA_HOME" with "{}" in jvm-platform args.'
.format(distribution.home))
settings_args = (a.replace('$JAVA_HOME', distribution.home) for a in settings.args)
zinc_args.extend(settings_args)
return zinc_args
@classmethod
def implementation_version(cls):
return super(BaseZincCompile, cls).implementation_version() + [('BaseZincCompile', 7)]
@classmethod
def compiler_plugin_types(cls):
return (AnnotationProcessor, JavacPlugin, ScalacPlugin)
@classmethod
def get_jvm_options_default(cls, bootstrap_option_values):
return ('-Dfile.encoding=UTF-8', '-Dzinc.analysis.cache.limit=1000',
'-Djava.awt.headless=true', '-Xmx2g')
@classmethod
def get_args_default(cls, bootstrap_option_values):
return ('-C-encoding', '-CUTF-8', '-S-encoding', '-SUTF-8', '-S-g:vars')
@classmethod
def get_warning_args_default(cls):
return ('-C-deprecation', '-C-Xlint:all', '-C-Xlint:-serial', '-C-Xlint:-path',
'-S-deprecation', '-S-unchecked', '-S-Xlint')
@classmethod
def get_no_warning_args_default(cls):
return ('-C-nowarn', '-C-Xlint:none', '-S-nowarn', '-S-Xlint:none', )
@classmethod
def get_fatal_warnings_enabled_args_default(cls):
return ('-S-Xfatal-warnings', '-C-Werror')
@classmethod
def get_fatal_warnings_disabled_args_default(cls):
return ()
@classmethod
def register_options(cls, register):
super(BaseZincCompile, cls).register_options(register)
register('--whitelisted-args', advanced=True, type=dict,
default={
'-S.*': False,
'-C.*': False,
'-file-filter': True,
'-msg-filter': True,
},
help='A dict of option regexes that make up pants\' supported API for zinc. '
'Options not listed here are subject to change/removal. The value of the dict '
'indicates that an option accepts an argument.')
register('--incremental', advanced=True, type=bool, default=True,
help='When set, zinc will use sub-target incremental compilation, which dramatically '
'improves compile performance while changing large targets. When unset, '
'changed targets will be compiled with an empty output directory, as if after '
'running clean-all.')
register('--incremental-caching', advanced=True, type=bool,
help='When set, the results of incremental compiles will be written to the cache. '
'This is unset by default, because it is generally a good precaution to cache '
'only clean/cold builds.')
@classmethod
def subsystem_dependencies(cls):
return super(BaseZincCompile, cls).subsystem_dependencies() + (Zinc.Factory,)
@classmethod
def prepare(cls, options, round_manager):
super(BaseZincCompile, cls).prepare(options, round_manager)
ScalaPlatform.prepare_tools(round_manager)
@property
def incremental(self):
return self.get_options().incremental
@property
def cache_incremental(self):
return self.get_options().incremental_caching
@memoized_property
def _zinc(self):
return Zinc.Factory.global_instance().create(self.context.products)
def __init__(self, *args, **kwargs):
super(BaseZincCompile, self).__init__(*args, **kwargs)
self.set_distribution(jdk=True)
# A directory to contain per-target subdirectories with apt processor info files.
self._processor_info_dir = os.path.join(self.workdir, 'apt-processor-info')
# Validate zinc options.
ZincCompile.validate_arguments(self.context.log, self.get_options().whitelisted_args,
self._args)
def select(self, target):
raise NotImplementedError()
def select_source(self, source_file_path):
raise NotImplementedError()
def create_analysis_tools(self):
return AnalysisTools(self.dist.real_home, ZincAnalysisParser(), ZincAnalysis,
get_buildroot(), self.get_options().pants_workdir)
def javac_classpath(self):
# Note that if this classpath is empty then Zinc will automatically use the javac from
# the JDK it was invoked with.
return Java.global_javac_classpath(self.context.products)
def scalac_classpath(self):
return ScalaPlatform.global_instance().compiler_classpath(self.context.products)
def write_extra_resources(self, compile_context):
target = compile_context.target
if isinstance(target, ScalacPlugin):
self._write_scalac_plugin_info(compile_context.classes_dir, target)
elif isinstance(target, JavacPlugin):
self._write_javac_plugin_info(compile_context.classes_dir, target)
elif isinstance(target, AnnotationProcessor) and target.processors:
processor_info_file = os.path.join(compile_context.classes_dir, _PROCESSOR_INFO_FILE)
self._write_processor_info(processor_info_file, target.processors)
def _write_processor_info(self, processor_info_file, processors):
with safe_open(processor_info_file, 'w') as f:
for processor in processors:
f.write('{}\n'.format(processor.strip()))
@memoized_property
def _zinc_cache_dir(self):
hasher = sha1()
for jar_path in self._zinc.zinc + [self._zinc.compiler_interface, self._zinc.compiler_bridge]:
hasher.update(os.path.relpath(jar_path, self.get_options().pants_workdir))
key = hasher.hexdigest()[:12]
return os.path.join(self.get_options().pants_bootstrapdir, 'zinc', key)
def compile(self, args, classpath, sources, classes_output_dir, upstream_analysis, analysis_file,
log_file, zinc_args_file, settings, fatal_warnings, zinc_file_manager,
javac_plugin_map, scalac_plugin_map):
self._verify_zinc_classpath(classpath)
self._verify_zinc_classpath(upstream_analysis.keys())
zinc_args = []
zinc_args.extend([
'-log-level', self.get_options().level,
'-analysis-cache', analysis_file,
'-classpath', ':'.join(classpath),
'-d', classes_output_dir
])
if not self.get_options().colors:
zinc_args.append('-no-color')
if log_file:
zinc_args.extend(['-capture-log', log_file])
zinc_args.extend(['-compiler-interface', self._zinc.compiler_interface])
zinc_args.extend(['-compiler-bridge', self._zinc.compiler_bridge])
zinc_args.extend(['-zinc-cache-dir', self._zinc_cache_dir])
zinc_args.extend(['-scala-path', ':'.join(self.scalac_classpath())])
zinc_args.extend(self._javac_plugin_args(javac_plugin_map))
# Search for scalac plugins on the classpath.
# Note that:
# - We also search in the extra scalac plugin dependencies, if specified.
# - In scala 2.11 and up, the plugin's classpath element can be a dir, but for 2.10 it must be
# classes_output_dir doesn't have scalac-plugin.xml yet, and we don't want that fact to get
# memoized (which in practice will only happen if this plugin uses some other plugin, thus
# triggering the plugin search mechanism, which does the memoizing).
scalac_plugin_search_classpath = (
(set(classpath) | set(self.scalac_plugin_classpath_elements())) -
{classes_output_dir}
)
zinc_args.extend(self._scalac_plugin_args(scalac_plugin_map, scalac_plugin_search_classpath))
if upstream_analysis:
zinc_args.extend(['-analysis-map',
','.join('{}:{}'.format(*kv) for kv in upstream_analysis.items())])
zinc_args.extend(args)
zinc_args.extend(self._get_zinc_arguments(settings))
zinc_args.append('-transactional')
if fatal_warnings:
zinc_args.extend(self.get_options().fatal_warnings_enabled_args)
else:
zinc_args.extend(self.get_options().fatal_warnings_disabled_args)
if not zinc_file_manager:
zinc_args.append('-no-zinc-file-manager')
jvm_options = []
if self.javac_classpath():
# Make the custom javac classpath the first thing on the bootclasspath, to ensure that
# it's the one javax.tools.ToolProvider.getSystemJavaCompiler() loads.
# That classloader will first delegate to its parent classloader, which will search the
# regular classpath. However it's harder to guarantee that our javac will preceed any others
jvm_options.extend(['-Xbootclasspath/p:{}'.format(':'.join(self.javac_classpath()))])
jvm_options.extend(self._jvm_options)
zinc_args.extend(sources)
self.log_zinc_file(analysis_file)
with open(zinc_args_file, 'w') as fp:
for arg in zinc_args:
fp.write(arg)
fp.write(b'\n')
if self.runjava(classpath=self._zinc.zinc,
main=Zinc.ZINC_COMPILE_MAIN,
jvm_options=jvm_options,
args=zinc_args,
workunit_name=self.name(),
workunit_labels=[WorkUnitLabel.COMPILER]):
raise TaskError('Zinc compile failed.')
def _verify_zinc_classpath(self, classpath):
def is_outside(path, putative_parent):
return os.path.relpath(path, putative_parent).startswith(os.pardir)
for path in classpath:
if not os.path.isabs(path):
raise TaskError('Classpath entries provided to zinc should be absolute. '
'{} is not.'.format(path))
if is_outside(path, self.get_options().pants_workdir) and is_outside(path, self.dist.home):
raise TaskError('Classpath entries provided to zinc should be in working directory or '
'part of the JDK. {} is not.'.format(path))
if path != os.path.normpath(path):
raise TaskError('Classpath entries provided to zinc should be normalized '
'(i.e. without ".." and "."). {} is not.'.format(path))
def log_zinc_file(self, analysis_file):
self.context.log.debug('Calling zinc on: {} ({})'
.format(analysis_file,
hash_file(analysis_file).upper()
if os.path.exists(analysis_file)
else 'nonexistent'))
@classmethod
def _javac_plugin_args(cls, javac_plugin_map):
ret = []
for plugin, args in javac_plugin_map.items():
for arg in args:
if ' ' in arg:
# Note: Args are separated by spaces, and there is no way to escape embedded spaces, as
# javac's Main does a simple split on these strings.
raise TaskError('javac plugin args must not contain spaces '
'(arg {} for plugin {})'.format(arg, plugin))
ret.append('-C-Xplugin:{} {}'.format(plugin, ' '.join(args)))
return ret
def _scalac_plugin_args(self, scalac_plugin_map, classpath):
if not scalac_plugin_map:
return []
plugin_jar_map = self._find_scalac_plugins(scalac_plugin_map.keys(), classpath)
ret = []
for name, cp_entries in plugin_jar_map.items():
# meaning that this is the plugin that will be loaded, even if there happen to be other
# plugins in the list of entries (e.g., because this plugin depends on another plugin).
ret.append('-S-Xplugin:{}'.format(':'.join(cp_entries)))
for arg in scalac_plugin_map[name]:
ret.append('-S-P:{}:{}'.format(name, arg))
return ret
def _find_scalac_plugins(self, scalac_plugins, classpath):
# Allow multiple flags and also comma-separated values in a single flag.
plugin_names = set([p for val in scalac_plugins for p in val.split(',')])
if not plugin_names:
return {}
active_plugins = {}
buildroot = get_buildroot()
cp_product = self.context.products.get_data('runtime_classpath')
for classpath_element in classpath:
name = self._maybe_get_plugin_name(classpath_element)
if name in plugin_names:
plugin_target_closure = self._plugin_targets('scalac').get(name, [])
# It's important to use relative paths, as the compiler flags get embedded in the zinc
rel_classpath_elements = [
os.path.relpath(cpe, buildroot) for cpe in
ClasspathUtil.internal_classpath(plugin_target_closure, cp_product, self._confs)]
rel_classpath_elements = rel_classpath_elements or [classpath_element]
if active_plugins.get(name, rel_classpath_elements) != rel_classpath_elements:
raise TaskError('Plugin {} defined in {} and in {}'.format(name, active_plugins[name],
classpath_element))
active_plugins[name] = rel_classpath_elements
if len(active_plugins) == len(plugin_names):
# of the rest of the classpath for no reason.
return active_plugins
# If we get here we must have unresolved plugins.
unresolved_plugins = plugin_names - set(active_plugins.keys())
raise TaskError('Could not find requested plugins: {}'.format(list(unresolved_plugins)))
@classmethod
@memoized_method
def _maybe_get_plugin_name(cls, classpath_element):
def process_info_file(cp_elem, info_file):
plugin_info = ElementTree.parse(info_file).getroot()
if plugin_info.tag != 'plugin':
raise TaskError('File {} in {} is not a valid scalac plugin descriptor'.format(
_SCALAC_PLUGIN_INFO_FILE, cp_elem))
return plugin_info.find('name').text
if os.path.isdir(classpath_element):
try:
with open(os.path.join(classpath_element, _SCALAC_PLUGIN_INFO_FILE)) as plugin_info_file:
return process_info_file(classpath_element, plugin_info_file)
except IOError as e:
if e.errno != errno.ENOENT:
raise
else:
with open_zip(classpath_element, 'r') as jarfile:
try:
with closing(jarfile.open(_SCALAC_PLUGIN_INFO_FILE, 'r')) as plugin_info_file:
return process_info_file(classpath_element, plugin_info_file)
except KeyError:
pass
return None
class ZincCompile(BaseZincCompile):
@classmethod
def register_options(cls, register):
super(ZincCompile, cls).register_options(register)
register('--javac-plugins', advanced=True, type=list, fingerprint=True,
help='Use these javac plugins.')
register('--javac-plugin-args', advanced=True, type=dict, default={}, fingerprint=True,
help='Map from javac plugin name to list of arguments for that plugin.')
cls.register_jvm_tool(register, 'javac-plugin-dep', classpath=[],
help='Search for javac plugins here, as well as in any '
'explicit dependencies.')
register('--scalac-plugins', advanced=True, type=list, fingerprint=True,
help='Use these scalac plugins.')
register('--scalac-plugin-args', advanced=True, type=dict, default={}, fingerprint=True,
help='Map from scalac plugin name to list of arguments for that plugin.')
cls.register_jvm_tool(register, 'scalac-plugin-dep', classpath=[],
help='Search for scalac plugins here, as well as in any '
'explicit dependencies.')
@classmethod
def product_types(cls):
return ['runtime_classpath', 'classes_by_source', 'product_deps_by_src', 'zinc_args']
@memoized_method
def extra_compile_time_classpath_elements(self):
# javac plugins are loaded from the regular class entries containing javac plugins,
# so we can provide them here.
# Note that, unlike javac, scalac plugins are not loaded from the regular classpath,
# so we don't provide them here.
return self.tool_classpath('javac-plugin-dep')
@memoized_method
def scalac_plugin_classpath_elements(self):
return self.tool_classpath('scalac-plugin-dep')
def select(self, target):
if not isinstance(target, JvmTarget):
return False
return target.has_sources('.java') or target.has_sources('.scala')
def select_source(self, source_file_path):
return source_file_path.endswith('.java') or source_file_path.endswith('.scala')
| true | true |
f7fc6c4e5b5689fc7d3141bbbe2b805bc50f0486 | 1,061 | py | Python | minigest/docfisc/views/fatture_acquisto.py | ctrlmaniac/minigest | 2bfceb57e41c872e4112e24d0e6991164846888b | [
"MIT"
] | null | null | null | minigest/docfisc/views/fatture_acquisto.py | ctrlmaniac/minigest | 2bfceb57e41c872e4112e24d0e6991164846888b | [
"MIT"
] | 1 | 2021-09-22T19:10:20.000Z | 2021-09-22T19:10:20.000Z | minigest/docfisc/views/fatture_acquisto.py | ctrlmaniac/minigest | 2bfceb57e41c872e4112e24d0e6991164846888b | [
"MIT"
] | null | null | null | from datetime import datetime
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseNotFound
from django.shortcuts import render
from django.views import View
from minigest.anagrafica.models import SoggettoFiscale as Azienda
from minigest.docfisc.models import Fattura
class FattureAcquisto(LoginRequiredMixin, View):
template_name = "docfisc/fatture_acquisto.html"
def get(self, request, *args, **kwargs):
azienda = None
periodo = (
datetime.now().strftime("%Y-%m")
if "periodo" not in kwargs
else kwargs["periodo"]
)
try:
azienda = Azienda.objects.get(pk=kwargs["azienda"])
except Azienda.DoesNotExist:
return HttpResponseNotFound()
fatture = Fattura.objects.filter(
cliente=azienda, data_sdi__icontains=periodo
).order_by("data_sdi")
context = {"azienda": azienda, "periodo": periodo, "fatture": fatture}
return render(request, self.template_name, context)
| 30.314286 | 78 | 0.68049 | from datetime import datetime
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseNotFound
from django.shortcuts import render
from django.views import View
from minigest.anagrafica.models import SoggettoFiscale as Azienda
from minigest.docfisc.models import Fattura
class FattureAcquisto(LoginRequiredMixin, View):
template_name = "docfisc/fatture_acquisto.html"
def get(self, request, *args, **kwargs):
azienda = None
periodo = (
datetime.now().strftime("%Y-%m")
if "periodo" not in kwargs
else kwargs["periodo"]
)
try:
azienda = Azienda.objects.get(pk=kwargs["azienda"])
except Azienda.DoesNotExist:
return HttpResponseNotFound()
fatture = Fattura.objects.filter(
cliente=azienda, data_sdi__icontains=periodo
).order_by("data_sdi")
context = {"azienda": azienda, "periodo": periodo, "fatture": fatture}
return render(request, self.template_name, context)
| true | true |
f7fc6d211e3523a6b958a102f4f7e6accb98b3f4 | 870 | py | Python | examples/charges/get_charge_by_id.py | pagarme/pagarme-core-api-python | c7b11ca78ab3e7e896e5b75048e6f72b511db00e | [
"MIT"
] | 6 | 2021-09-02T19:55:04.000Z | 2022-03-16T14:06:15.000Z | examples/charges/get_charge_by_id.py | pagarme/pagarme-core-api-python | c7b11ca78ab3e7e896e5b75048e6f72b511db00e | [
"MIT"
] | 2 | 2021-10-11T22:48:15.000Z | 2022-01-24T18:24:23.000Z | examples/charges/get_charge_by_id.py | pagarme/pagarme-core-api-python | c7b11ca78ab3e7e896e5b75048e6f72b511db00e | [
"MIT"
] | 2 | 2021-09-12T21:43:32.000Z | 2022-03-07T16:58:54.000Z | from pagarmecoreapi.pagarmecoreapi_client import PagarmecoreapiClient
from pagarmecoreapi.controllers import *
from pagarmecoreapi.exceptions.error_exception import *
# Configuration parameters and credentials
basic_auth_user_name = 'basic_auth_user_name' # The username to use with basic authentication
basic_auth_password = 'basic_auth_password' # The password to use with basic authentication
PagarmecoreapiClient(basic_auth_user_name, basic_auth_password)
charges_controller = charges_controller.ChargesController()
charge_Id = "ch_8YQ1JeTLzF8zlqWy"
try:
result = charges_controller.get_charge(charge_Id)
assert result is not None
assert result.id == charge_Id
print("Charge found!")
print("Charge_Id: ", result.id)
except ErrorException as ex:
print(ex.message)
print("Errors: ", ex.errors)
except Exception as ex:
raise ex
| 33.461538 | 93 | 0.796552 | from pagarmecoreapi.pagarmecoreapi_client import PagarmecoreapiClient
from pagarmecoreapi.controllers import *
from pagarmecoreapi.exceptions.error_exception import *
basic_auth_user_name = 'basic_auth_user_name'
basic_auth_password = 'basic_auth_password'
PagarmecoreapiClient(basic_auth_user_name, basic_auth_password)
charges_controller = charges_controller.ChargesController()
charge_Id = "ch_8YQ1JeTLzF8zlqWy"
try:
result = charges_controller.get_charge(charge_Id)
assert result is not None
assert result.id == charge_Id
print("Charge found!")
print("Charge_Id: ", result.id)
except ErrorException as ex:
print(ex.message)
print("Errors: ", ex.errors)
except Exception as ex:
raise ex
| true | true |
f7fc6dd3c9474d6fdbcd1d888d51be532d61f43b | 13,920 | py | Python | google/ads/google_ads/v3/services/geo_target_constant_service_client.py | andy0937/google-ads-python | cb5da7f4a75076828d1fc3524b08cc167670435a | [
"Apache-2.0"
] | 1 | 2019-11-30T23:42:39.000Z | 2019-11-30T23:42:39.000Z | google/ads/google_ads/v3/services/geo_target_constant_service_client.py | andy0937/google-ads-python | cb5da7f4a75076828d1fc3524b08cc167670435a | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v3/services/geo_target_constant_service_client.py | andy0937/google-ads-python | cb5da7f4a75076828d1fc3524b08cc167670435a | [
"Apache-2.0"
] | 1 | 2020-09-30T17:04:06.000Z | 2020-09-30T17:04:06.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.ads.googleads.v3.services GeoTargetConstantService API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.path_template
import google.api_core.protobuf_helpers
from google.ads.google_ads.v3.services import geo_target_constant_service_client_config
from google.ads.google_ads.v3.services.transports import geo_target_constant_service_grpc_transport
from google.ads.google_ads.v3.proto.services import geo_target_constant_service_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-ads',
).version
class GeoTargetConstantServiceClient(object):
"""Service to fetch geo target constants."""
SERVICE_ADDRESS = 'googleads.googleapis.com:443'
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.ads.googleads.v3.services.GeoTargetConstantService'
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
GeoTargetConstantServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def geo_target_constant_path(cls, geo_target_constant):
"""Return a fully-qualified geo_target_constant string."""
return google.api_core.path_template.expand(
'geoTargetConstants/{geo_target_constant}',
geo_target_constant=geo_target_constant,
)
def __init__(self, transport=None, channel=None, credentials=None,
client_config=None, client_info=None):
"""Constructor.
Args:
transport (Union[~.GeoTargetConstantServiceGrpcTransport,
Callable[[~.Credentials, type], ~.GeoTargetConstantServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn('The `client_config` argument is deprecated.',
PendingDeprecationWarning, stacklevel=2)
else:
client_config = geo_target_constant_service_client_config.config
if channel:
warnings.warn('The `channel` argument is deprecated; use '
'`transport` instead.',
PendingDeprecationWarning, stacklevel=2)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=geo_target_constant_service_grpc_transport.GeoTargetConstantServiceGrpcTransport,
)
else:
if credentials:
raise ValueError(
'Received both a transport instance and '
'credentials; these are mutually exclusive.'
)
self.transport = transport
else:
self.transport = geo_target_constant_service_grpc_transport.GeoTargetConstantServiceGrpcTransport(
address=self.SERVICE_ADDRESS,
channel=channel,
credentials=credentials,
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION,
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME],
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def get_geo_target_constant(
self,
resource_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Returns the requested geo target constant in full detail.
Args:
resource_name (str): Required. The resource name of the geo target constant to fetch.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.ads.googleads_v3.types.GeoTargetConstant` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_geo_target_constant' not in self._inner_api_calls:
self._inner_api_calls['get_geo_target_constant'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_geo_target_constant,
default_retry=self._method_configs['GetGeoTargetConstant'].retry,
default_timeout=self._method_configs['GetGeoTargetConstant'].timeout,
client_info=self._client_info,
)
request = geo_target_constant_service_pb2.GetGeoTargetConstantRequest(
resource_name=resource_name,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('resource_name', resource_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(routing_header)
metadata.append(routing_metadata)
return self._inner_api_calls['get_geo_target_constant'](request, retry=retry, timeout=timeout, metadata=metadata)
def suggest_geo_target_constants(
self,
locale,
country_code,
location_names=None,
geo_targets=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Returns GeoTargetConstant suggestions by location name or by resource name.
Args:
locale (Union[dict, ~google.ads.googleads_v3.types.StringValue]): If possible, returned geo targets are translated using this locale. If not,
en is used by default. This is also used as a hint for returned geo
targets.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.ads.googleads_v3.types.StringValue`
country_code (Union[dict, ~google.ads.googleads_v3.types.StringValue]): Returned geo targets are restricted to this country code.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.ads.googleads_v3.types.StringValue`
location_names (Union[dict, ~google.ads.googleads_v3.types.LocationNames]): The location names to search by. At most 25 names can be set.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.ads.googleads_v3.types.LocationNames`
geo_targets (Union[dict, ~google.ads.googleads_v3.types.GeoTargets]): The geo target constant resource names to filter by.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.ads.googleads_v3.types.GeoTargets`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.ads.googleads_v3.types.SuggestGeoTargetConstantsResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'suggest_geo_target_constants' not in self._inner_api_calls:
self._inner_api_calls['suggest_geo_target_constants'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.suggest_geo_target_constants,
default_retry=self._method_configs['SuggestGeoTargetConstants'].retry,
default_timeout=self._method_configs['SuggestGeoTargetConstants'].timeout,
client_info=self._client_info,
)
# Sanity check: We have some fields which are mutually exclusive;
# raise ValueError if more than one is sent.
google.api_core.protobuf_helpers.check_oneof(
location_names=location_names,
geo_targets=geo_targets,
)
request = geo_target_constant_service_pb2.SuggestGeoTargetConstantsRequest(
locale=locale,
country_code=country_code,
location_names=location_names,
geo_targets=geo_targets,
)
return self._inner_api_calls['suggest_geo_target_constants'](request, retry=retry, timeout=timeout, metadata=metadata)
| 46.245847 | 153 | 0.656753 |
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.path_template
import google.api_core.protobuf_helpers
from google.ads.google_ads.v3.services import geo_target_constant_service_client_config
from google.ads.google_ads.v3.services.transports import geo_target_constant_service_grpc_transport
from google.ads.google_ads.v3.proto.services import geo_target_constant_service_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-ads',
).version
class GeoTargetConstantServiceClient(object):
SERVICE_ADDRESS = 'googleads.googleapis.com:443'
_INTERFACE_NAME = 'google.ads.googleads.v3.services.GeoTargetConstantService'
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def geo_target_constant_path(cls, geo_target_constant):
return google.api_core.path_template.expand(
'geoTargetConstants/{geo_target_constant}',
geo_target_constant=geo_target_constant,
)
def __init__(self, transport=None, channel=None, credentials=None,
client_config=None, client_info=None):
if client_config is not None:
warnings.warn('The `client_config` argument is deprecated.',
PendingDeprecationWarning, stacklevel=2)
else:
client_config = geo_target_constant_service_client_config.config
if channel:
warnings.warn('The `channel` argument is deprecated; use '
'`transport` instead.',
PendingDeprecationWarning, stacklevel=2)
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=geo_target_constant_service_grpc_transport.GeoTargetConstantServiceGrpcTransport,
)
else:
if credentials:
raise ValueError(
'Received both a transport instance and '
'credentials; these are mutually exclusive.'
)
self.transport = transport
else:
self.transport = geo_target_constant_service_grpc_transport.GeoTargetConstantServiceGrpcTransport(
address=self.SERVICE_ADDRESS,
channel=channel,
credentials=credentials,
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION,
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME],
)
self._inner_api_calls = {}
def get_geo_target_constant(
self,
resource_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
if 'get_geo_target_constant' not in self._inner_api_calls:
self._inner_api_calls['get_geo_target_constant'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_geo_target_constant,
default_retry=self._method_configs['GetGeoTargetConstant'].retry,
default_timeout=self._method_configs['GetGeoTargetConstant'].timeout,
client_info=self._client_info,
)
request = geo_target_constant_service_pb2.GetGeoTargetConstantRequest(
resource_name=resource_name,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('resource_name', resource_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(routing_header)
metadata.append(routing_metadata)
return self._inner_api_calls['get_geo_target_constant'](request, retry=retry, timeout=timeout, metadata=metadata)
def suggest_geo_target_constants(
self,
locale,
country_code,
location_names=None,
geo_targets=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
if 'suggest_geo_target_constants' not in self._inner_api_calls:
self._inner_api_calls['suggest_geo_target_constants'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.suggest_geo_target_constants,
default_retry=self._method_configs['SuggestGeoTargetConstants'].retry,
default_timeout=self._method_configs['SuggestGeoTargetConstants'].timeout,
client_info=self._client_info,
)
google.api_core.protobuf_helpers.check_oneof(
location_names=location_names,
geo_targets=geo_targets,
)
request = geo_target_constant_service_pb2.SuggestGeoTargetConstantsRequest(
locale=locale,
country_code=country_code,
location_names=location_names,
geo_targets=geo_targets,
)
return self._inner_api_calls['suggest_geo_target_constants'](request, retry=retry, timeout=timeout, metadata=metadata)
| true | true |
f7fc6e41b013f84c69c048468acdb03cc6f422c1 | 576 | py | Python | base/migrations/0002_orderitem_user.py | vishnu-sagubandi/LIAVYS_BACKEND | 34c8a28d741cf6ca47ce1bcca045c3256945e309 | [
"MIT"
] | null | null | null | base/migrations/0002_orderitem_user.py | vishnu-sagubandi/LIAVYS_BACKEND | 34c8a28d741cf6ca47ce1bcca045c3256945e309 | [
"MIT"
] | null | null | null | base/migrations/0002_orderitem_user.py | vishnu-sagubandi/LIAVYS_BACKEND | 34c8a28d741cf6ca47ce1bcca045c3256945e309 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-08-03 12:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('base', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='orderitem',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
| 26.181818 | 122 | 0.670139 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('base', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='orderitem',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
| true | true |
f7fc6e66f890099ef1128bbc0f90165ab1a1e187 | 1,784 | py | Python | dev/Gems/CloudGemMetric/v1/AWS/lambda-code/ServiceLambda/api/dashboard.py | brianherrera/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | [
"AML"
] | 1,738 | 2017-09-21T10:59:12.000Z | 2022-03-31T21:05:46.000Z | dev/Gems/CloudGemMetric/v1/AWS/lambda-code/ServiceLambda/api/dashboard.py | olivier-be/lumberyard | 3d688932f919dbf5821f0cb8a210ce24abe39e9e | [
"AML"
] | 427 | 2017-09-29T22:54:36.000Z | 2022-02-15T19:26:50.000Z | dev/Gems/CloudGemMetric/v1/AWS/lambda-code/ServiceLambda/api/dashboard.py | olivier-be/lumberyard | 3d688932f919dbf5821f0cb8a210ce24abe39e9e | [
"AML"
] | 671 | 2017-09-21T08:04:01.000Z | 2022-03-29T14:30:07.000Z | #
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
from __future__ import print_function
import service
import os
import util
import metric_constant as c
import json
from dynamodb import DynamoDb
from cgf_utils import custom_resource_response
@service.api
def get(request, facetid):
db = DynamoDb()
result = db.get_key(facetid)
result = json.dumps(result, cls=util.DynamoDbDecoder)
return result
@service.api
def post(request, facetid, meta):
db = DynamoDb()
return db.set(facetid, meta)
def cli(context, args):
util.set_logger(args.verbose)
from resource_manager_common import constant
credentials = context.aws.load_credentials()
resources = util.get_resources(context)
os.environ[c.ENV_REGION] = context.config.project_region
os.environ[c.ENV_DB_TABLE_CONTEXT] = resources[c.RES_DB_TABLE_CONTEXT]
os.environ["AWS_ACCESS_KEY"] = args.aws_access_key if args.aws_access_key else credentials.get(args.profile if args.profile else context.config.user_default_profile, constant.ACCESS_KEY_OPTION)
os.environ["AWS_SECRET_KEY"] = args.aws_secret_key if args.aws_secret_key else credentials.get(args.profile if args.profile else context.config.user_default_profile, constant.SECRET_KEY_OPTION)
print(eval(args.function)({}, args.param))
| 37.957447 | 197 | 0.767937 |
from __future__ import print_function
import service
import os
import util
import metric_constant as c
import json
from dynamodb import DynamoDb
from cgf_utils import custom_resource_response
@service.api
def get(request, facetid):
db = DynamoDb()
result = db.get_key(facetid)
result = json.dumps(result, cls=util.DynamoDbDecoder)
return result
@service.api
def post(request, facetid, meta):
db = DynamoDb()
return db.set(facetid, meta)
def cli(context, args):
util.set_logger(args.verbose)
from resource_manager_common import constant
credentials = context.aws.load_credentials()
resources = util.get_resources(context)
os.environ[c.ENV_REGION] = context.config.project_region
os.environ[c.ENV_DB_TABLE_CONTEXT] = resources[c.RES_DB_TABLE_CONTEXT]
os.environ["AWS_ACCESS_KEY"] = args.aws_access_key if args.aws_access_key else credentials.get(args.profile if args.profile else context.config.user_default_profile, constant.ACCESS_KEY_OPTION)
os.environ["AWS_SECRET_KEY"] = args.aws_secret_key if args.aws_secret_key else credentials.get(args.profile if args.profile else context.config.user_default_profile, constant.SECRET_KEY_OPTION)
print(eval(args.function)({}, args.param))
| true | true |
f7fc6e967a45b9376a2835c57c448e5de9227bfd | 622 | py | Python | tools/_init_paths.py | Buki2/MAttNet | 8abae2ba521de494b45ac6f1821602d6e17a6c59 | [
"MIT"
] | null | null | null | tools/_init_paths.py | Buki2/MAttNet | 8abae2ba521de494b45ac6f1821602d6e17a6c59 | [
"MIT"
] | null | null | null | tools/_init_paths.py | Buki2/MAttNet | 8abae2ba521de494b45ac6f1821602d6e17a6c59 | [
"MIT"
] | null | null | null | import os.path as osp
import sys
# mrcn path
this_dir = osp.dirname(__file__)
# mrcn_dir = osp.join(this_dir, '..', 'pyutils', 'mask-faster-rcnn')
# mrcn_dir = osp.join(this_dir, '..', 'pyutils', 'tf-faster-rcnn')
mrcn_dir = osp.join(this_dir, '..', 'pyutils', 'pytorch-faster-rcnn')
sys.path.insert(0, osp.join(mrcn_dir, 'lib'))
sys.path.insert(0, osp.join(mrcn_dir, 'data', 'refer'))
sys.path.insert(0, osp.join(mrcn_dir, 'data', 'coco', 'PythonAPI'))
# refer path
refer_dir = osp.join(this_dir, '..', 'pyutils', 'refer')
sys.path.insert(0, refer_dir)
# model path
sys.path.insert(0, osp.join(this_dir, '..', 'lib')) | 34.555556 | 69 | 0.672026 | import os.path as osp
import sys
this_dir = osp.dirname(__file__)
mrcn_dir = osp.join(this_dir, '..', 'pyutils', 'pytorch-faster-rcnn')
sys.path.insert(0, osp.join(mrcn_dir, 'lib'))
sys.path.insert(0, osp.join(mrcn_dir, 'data', 'refer'))
sys.path.insert(0, osp.join(mrcn_dir, 'data', 'coco', 'PythonAPI'))
refer_dir = osp.join(this_dir, '..', 'pyutils', 'refer')
sys.path.insert(0, refer_dir)
sys.path.insert(0, osp.join(this_dir, '..', 'lib')) | true | true |
f7fc6ffc11d4d4e152350d055d1b9d727c367d08 | 3,617 | py | Python | tests/models/input/types/test_list_input.py | TheLabbingProject/django_analyses | 08cac40a32754a265b37524f08ec6160c69ebea8 | [
"Apache-2.0"
] | 1 | 2020-12-30T12:43:34.000Z | 2020-12-30T12:43:34.000Z | tests/models/input/types/test_list_input.py | TheLabbingProject/django_analyses | 08cac40a32754a265b37524f08ec6160c69ebea8 | [
"Apache-2.0"
] | 59 | 2019-12-25T13:14:56.000Z | 2021-07-22T12:24:46.000Z | tests/models/input/types/test_list_input.py | TheLabbingProject/django_analyses | 08cac40a32754a265b37524f08ec6160c69ebea8 | [
"Apache-2.0"
] | 2 | 2020-05-24T06:44:27.000Z | 2020-07-09T15:47:31.000Z | from django.core.exceptions import ValidationError
from django.test import TestCase
from django_analyses.models.input.types.input_types import InputTypes
from django_analyses.models.input.utils import ListElementTypes
from tests.factories.input.definitions.list_input_definition import \
ListInputDefinitionFactory
from tests.factories.input.types.list_input import ListInputFactory
class ListInputTestCase(TestCase):
"""
Tests for the :class:`~django_analyses.models.input.types.list_input.ListInput` model.
"""
def setUp(self):
"""
Adds the created instances to the tests' contexts.
For more information see unittest's :meth:`~unittest.TestCase.setUp` method.
"""
self.list_input_definition = ListInputDefinitionFactory(
min_length=2, max_length=5, element_type=ListElementTypes.INT.name,
)
self.list_input = ListInputFactory(
definition=self.list_input_definition, value=[1, 2, 3, 4],
)
###########
# Methods #
###########
def test_string(self):
value = str(self.list_input)
expected = f"'{self.list_input.key}' = {self.list_input.value}"
self.assertEqual(value, expected)
def test_none_value_if_required_raises_validation_error(self):
self.list_input.definition.required = True
self.list_input.definition.save()
self.list_input.value = None
with self.assertRaises(ValidationError):
self.list_input.save()
def test_get_type(self):
value = self.list_input.get_type()
self.assertEqual(value, InputTypes.LST)
def test_min_length_raises_validation_error(self):
self.list_input.value = [0]
with self.assertRaises(ValidationError):
self.list_input.save()
def test_max_length_raises_validation_error(self):
self.list_input.value += [5, 6]
with self.assertRaises(ValidationError):
self.list_input.save()
def test_non_list_values_raise_validation_error(self):
self.list_input.value = 0
with self.assertRaises(ValidationError):
self.list_input.save()
self.list_input.value = 0.4
with self.assertRaises(ValidationError):
self.list_input.save()
self.list_input.value = "a"
with self.assertRaises(ValidationError):
self.list_input.save()
self.list_input.value = False
with self.assertRaises(ValidationError):
self.list_input.save()
self.list_input.value = True
with self.assertRaises(ValidationError):
self.list_input.save()
def test_wrong_type_elements_raise_validation_error(self):
self.list_input.value = ["a", "b", "c"]
with self.assertRaises(ValidationError):
self.list_input.save()
self.list_input.value = [0, 1, "c"]
with self.assertRaises(ValidationError):
self.list_input.save()
self.list_input.value = [True, 0, 1]
with self.assertRaises(ValidationError):
self.list_input.save()
self.list_input.value = [False, 0, 1]
with self.assertRaises(ValidationError):
self.list_input.save()
self.list_input.value = [0, 1, 1.1]
with self.assertRaises(ValidationError):
self.list_input.save()
self.list_input.value = [0, 1, []]
with self.assertRaises(ValidationError):
self.list_input.save()
self.list_input.value = [0, {}, 1]
with self.assertRaises(ValidationError):
self.list_input.save()
| 36.535354 | 90 | 0.65828 | from django.core.exceptions import ValidationError
from django.test import TestCase
from django_analyses.models.input.types.input_types import InputTypes
from django_analyses.models.input.utils import ListElementTypes
from tests.factories.input.definitions.list_input_definition import \
ListInputDefinitionFactory
from tests.factories.input.types.list_input import ListInputFactory
class ListInputTestCase(TestCase):
def setUp(self):
self.list_input_definition = ListInputDefinitionFactory(
min_length=2, max_length=5, element_type=ListElementTypes.INT.name,
)
self.list_input = ListInputFactory(
definition=self.list_input_definition, value=[1, 2, 3, 4],
)
put.key}' = {self.list_input.value}"
self.assertEqual(value, expected)
def test_none_value_if_required_raises_validation_error(self):
self.list_input.definition.required = True
self.list_input.definition.save()
self.list_input.value = None
with self.assertRaises(ValidationError):
self.list_input.save()
def test_get_type(self):
value = self.list_input.get_type()
self.assertEqual(value, InputTypes.LST)
def test_min_length_raises_validation_error(self):
self.list_input.value = [0]
with self.assertRaises(ValidationError):
self.list_input.save()
def test_max_length_raises_validation_error(self):
self.list_input.value += [5, 6]
with self.assertRaises(ValidationError):
self.list_input.save()
def test_non_list_values_raise_validation_error(self):
self.list_input.value = 0
with self.assertRaises(ValidationError):
self.list_input.save()
self.list_input.value = 0.4
with self.assertRaises(ValidationError):
self.list_input.save()
self.list_input.value = "a"
with self.assertRaises(ValidationError):
self.list_input.save()
self.list_input.value = False
with self.assertRaises(ValidationError):
self.list_input.save()
self.list_input.value = True
with self.assertRaises(ValidationError):
self.list_input.save()
def test_wrong_type_elements_raise_validation_error(self):
self.list_input.value = ["a", "b", "c"]
with self.assertRaises(ValidationError):
self.list_input.save()
self.list_input.value = [0, 1, "c"]
with self.assertRaises(ValidationError):
self.list_input.save()
self.list_input.value = [True, 0, 1]
with self.assertRaises(ValidationError):
self.list_input.save()
self.list_input.value = [False, 0, 1]
with self.assertRaises(ValidationError):
self.list_input.save()
self.list_input.value = [0, 1, 1.1]
with self.assertRaises(ValidationError):
self.list_input.save()
self.list_input.value = [0, 1, []]
with self.assertRaises(ValidationError):
self.list_input.save()
self.list_input.value = [0, {}, 1]
with self.assertRaises(ValidationError):
self.list_input.save()
| true | true |
f7fc71a1a0870e917d58bfac6f67f255356963b5 | 2,193 | py | Python | xsoccer/games/models.py | awyrough/xsoccer | 5f3d5f73ede21f6ba3baa0089a821b0a8d151b8d | [
"MIT"
] | null | null | null | xsoccer/games/models.py | awyrough/xsoccer | 5f3d5f73ede21f6ba3baa0089a821b0a8d151b8d | [
"MIT"
] | null | null | null | xsoccer/games/models.py | awyrough/xsoccer | 5f3d5f73ede21f6ba3baa0089a821b0a8d151b8d | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Game(models.Model):
"""
An Opta game.
"""
# Opta UUID (NB: Different than Django-generated pk/id)
uuid = models.CharField("Opta uID", max_length=255, unique=True)
date = models.DateField("Date Played")
attendance = models.IntegerField("Attendance", null=True, blank=True)
venue = models.ForeignKey('venues.Venue', on_delete=models.PROTECT, related_name="game_venue")
home_team = models.ForeignKey('teams.Team', on_delete=models.PROTECT, related_name="game_home_team")
away_team = models.ForeignKey('teams.Team', on_delete=models.PROTECT, related_name="game_away_team")
winner = models.ForeignKey('teams.Team', on_delete=models.SET_NULL, null=True, blank=True, related_name="game_winner")
first_half_time = models.IntegerField("1st Half Length (mins)")
second_half_time = models.IntegerField("2nd Half Length (mins)")
home_team_score = models.IntegerField("Final Score of Home Team", null=True)
away_team_score = models.IntegerField("Final Score of Away Team", null=True)
def __str__(self):
return "%s at %s on %s (%s)" % (
self.away_team, self.home_team, self.date, self.uuid)
# return "%s at %s on %s (W = %s)" % (
# self.away_team, self.home_team, self.date, self.winner)
def location(self):
# The venue portion should always be populated for Opta games
if self.venue:
return self.venue
else:
return None
def game_length(self):
return self.first_half_time + self.second_half_time
def atmosphere(self):
return (float(self.attendance) / float(self.venue.capacity)) * 100
def team_score(team_uuid):
if team_uuid == home_team.uuid:
return home_team_score
elif team_uuid == away_team.uuid:
return away_team_score
else:
return "Your logic is wrong"
#give us the ability to manually update the DB instances at any point! (added to Game model to update scores)
def update(self, **kwargs):
if self._state.adding:
raise self.DoesNotExist
for field, value in kwargs.items():
setattr(self, field, value)
self.save(update_fields=kwargs.keys())
class Meta:
unique_together = ["home_team", "away_team", "date"]
| 36.55 | 119 | 0.735978 | from __future__ import unicode_literals
from django.db import models
class Game(models.Model):
uuid = models.CharField("Opta uID", max_length=255, unique=True)
date = models.DateField("Date Played")
attendance = models.IntegerField("Attendance", null=True, blank=True)
venue = models.ForeignKey('venues.Venue', on_delete=models.PROTECT, related_name="game_venue")
home_team = models.ForeignKey('teams.Team', on_delete=models.PROTECT, related_name="game_home_team")
away_team = models.ForeignKey('teams.Team', on_delete=models.PROTECT, related_name="game_away_team")
winner = models.ForeignKey('teams.Team', on_delete=models.SET_NULL, null=True, blank=True, related_name="game_winner")
first_half_time = models.IntegerField("1st Half Length (mins)")
second_half_time = models.IntegerField("2nd Half Length (mins)")
home_team_score = models.IntegerField("Final Score of Home Team", null=True)
away_team_score = models.IntegerField("Final Score of Away Team", null=True)
def __str__(self):
return "%s at %s on %s (%s)" % (
self.away_team, self.home_team, self.date, self.uuid)
def location(self):
if self.venue:
return self.venue
else:
return None
def game_length(self):
return self.first_half_time + self.second_half_time
def atmosphere(self):
return (float(self.attendance) / float(self.venue.capacity)) * 100
def team_score(team_uuid):
if team_uuid == home_team.uuid:
return home_team_score
elif team_uuid == away_team.uuid:
return away_team_score
else:
return "Your logic is wrong"
def update(self, **kwargs):
if self._state.adding:
raise self.DoesNotExist
for field, value in kwargs.items():
setattr(self, field, value)
self.save(update_fields=kwargs.keys())
class Meta:
unique_together = ["home_team", "away_team", "date"]
| true | true |
f7fc72ed5e5d6f7fdb5691e04ef154a07fc19d2e | 10,785 | py | Python | upstream/test/functional-tests-legacy/PfwTestCase/Types/tUINT16.py | TinkerEdgeR-Android/external_parameter-framework | 108db75a59dbea562ac4bcaf8c6cc862c4919af0 | [
"BSD-3-Clause"
] | null | null | null | upstream/test/functional-tests-legacy/PfwTestCase/Types/tUINT16.py | TinkerEdgeR-Android/external_parameter-framework | 108db75a59dbea562ac4bcaf8c6cc862c4919af0 | [
"BSD-3-Clause"
] | null | null | null | upstream/test/functional-tests-legacy/PfwTestCase/Types/tUINT16.py | TinkerEdgeR-Android/external_parameter-framework | 108db75a59dbea562ac4bcaf8c6cc862c4919af0 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python2
# -*-coding:utf-8 -*
# Copyright (c) 2011-2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Integer parameter type testcases - UINT16
List of tested functions :
--------------------------
- [setParameter] function
- [getParameter] function
Initial Settings :
------------------
UINT16 :
- unsigned
- size = 16
- range : [0, 1000]
Test cases :
------------
- UINT16 parameter min value = 0
- UINT16 parameter min value out of bounds = -1
- UINT16 parameter max value = 1000
- UINT16 parameter max value out of bounds = 1001
- UINT16 parameter in nominal case = 50
"""
import os
import commands
from Util.PfwUnitTestLib import PfwTestCase
from Util import ACTLogging
log=ACTLogging.Logger()
# Test of type UINT16 - range [0, 1000]
class TestCases(PfwTestCase):
def setUp(self):
self.param_name = "/Test/Test/TEST_DIR/UINT16"
self.pfw.sendCmd("setTuningMode", "on")
def tearDown(self):
self.pfw.sendCmd("setTuningMode", "off")
def test_Nominal_Case(self):
"""
Testing UINT16 in nominal case = 50
-----------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set UINT16 parameter in nominal case = 50
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- UINT16 parameter set to 50
- Blackboard and filesystem values checked
"""
log.D(self.test_Nominal_Case.__doc__)
log.I("UINT16 parameter in nominal case = 50")
value = "50"
hex_value = "0x32"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/UINT16").read()[:-1] == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMin(self):
"""
Testing UINT16 minimal value = 0
--------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set UINT16 parameter min value = 0
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- UINT16 parameter set to 0
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMin.__doc__)
log.I("UINT16 parameter min value = 0")
value = "0"
hex_value = "0x0"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/UINT16").read()[:-1] == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMin_Overflow(self):
"""
Testing UINT16 parameter value out of negative range
----------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set UINT16 to -1
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected
- UINT16 parameter not updated
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMin_Overflow.__doc__)
log.I("UINT16 parameter min value out of bounds = -1")
value = "-1"
param_check = open(os.environ["PFW_RESULT"] + "/UINT16").read()[:-1]
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value, expectSuccess=False)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out != "Done", log.F("PFW : Error not detected when setting parameter %s out of bounds"
% (self.param_name))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/UINT16").read()[:-1] == param_check, log.F("FILESYSTEM : Forbiden parameter change")
log.I("test OK")
def test_TypeMax(self):
"""
Testing UINT16 parameter maximum value
--------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set UINT16 to 1000
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- UINT16 parameter set to 1000
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMax.__doc__)
log.I("UINT16 parameter max value = 1000")
value = "1000"
hex_value = "0x3e8"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/UINT16").read()[:-1] == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMax_Overflow(self):
"""
Testing UINT16 parameter value out of positive range
----------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set UINT16 to 1001
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected
- UINT16 parameter not updated
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMax_Overflow.__doc__)
log.I("UINT16 parameter max value out of bounds = 1001")
value = "1001"
param_check = open(os.environ["PFW_RESULT"] + "/UINT16").read()[:-1]
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value, expectSuccess=False)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out != "Done", log.F("PFW : Error not detected when setting parameter %s out of bounds"
% (self.param_name))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/UINT16").read()[:-1] == param_check, log.F("FILESYSTEM : Forbiden parameter change")
log.I("test OK")
| 42.968127 | 133 | 0.538248 |
import os
import commands
from Util.PfwUnitTestLib import PfwTestCase
from Util import ACTLogging
log=ACTLogging.Logger()
class TestCases(PfwTestCase):
def setUp(self):
self.param_name = "/Test/Test/TEST_DIR/UINT16"
self.pfw.sendCmd("setTuningMode", "on")
def tearDown(self):
self.pfw.sendCmd("setTuningMode", "off")
def test_Nominal_Case(self):
log.D(self.test_Nominal_Case.__doc__)
log.I("UINT16 parameter in nominal case = 50")
value = "50"
hex_value = "0x32"
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
assert open(os.environ["PFW_RESULT"] + "/UINT16").read()[:-1] == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMin(self):
log.D(self.test_TypeMin.__doc__)
log.I("UINT16 parameter min value = 0")
value = "0"
hex_value = "0x0"
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
assert open(os.environ["PFW_RESULT"] + "/UINT16").read()[:-1] == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMin_Overflow(self):
log.D(self.test_TypeMin_Overflow.__doc__)
log.I("UINT16 parameter min value out of bounds = -1")
value = "-1"
param_check = open(os.environ["PFW_RESULT"] + "/UINT16").read()[:-1]
out, err = self.pfw.sendCmd("setParameter", self.param_name, value, expectSuccess=False)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out != "Done", log.F("PFW : Error not detected when setting parameter %s out of bounds"
% (self.param_name))
assert open(os.environ["PFW_RESULT"] + "/UINT16").read()[:-1] == param_check, log.F("FILESYSTEM : Forbiden parameter change")
log.I("test OK")
def test_TypeMax(self):
log.D(self.test_TypeMax.__doc__)
log.I("UINT16 parameter max value = 1000")
value = "1000"
hex_value = "0x3e8"
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
assert open(os.environ["PFW_RESULT"] + "/UINT16").read()[:-1] == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMax_Overflow(self):
log.D(self.test_TypeMax_Overflow.__doc__)
log.I("UINT16 parameter max value out of bounds = 1001")
value = "1001"
param_check = open(os.environ["PFW_RESULT"] + "/UINT16").read()[:-1]
out, err = self.pfw.sendCmd("setParameter", self.param_name, value, expectSuccess=False)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out != "Done", log.F("PFW : Error not detected when setting parameter %s out of bounds"
% (self.param_name))
assert open(os.environ["PFW_RESULT"] + "/UINT16").read()[:-1] == param_check, log.F("FILESYSTEM : Forbiden parameter change")
log.I("test OK")
| true | true |
f7fc7465e40ae9c9761950d5879c2d974be5b2a8 | 1,436 | py | Python | python_utils/gunicorn/log/loggers.py | thegreendog/python-utils | e3d20d7a53880cc2276e6254b6fed74b43eb894b | [
"MIT"
] | 3 | 2018-09-27T09:59:07.000Z | 2020-04-13T03:21:01.000Z | python_utils/gunicorn/log/loggers.py | thegreendog/python-utils | e3d20d7a53880cc2276e6254b6fed74b43eb894b | [
"MIT"
] | null | null | null | python_utils/gunicorn/log/loggers.py | thegreendog/python-utils | e3d20d7a53880cc2276e6254b6fed74b43eb894b | [
"MIT"
] | 1 | 2020-04-13T03:21:01.000Z | 2020-04-13T03:21:01.000Z | import traceback
from gunicorn import glogging
class GunicornLogger(glogging.Logger):
def access(self, resp, req, environ, request_time):
""" See http://httpd.apache.org/docs/2.0/logs.html#combined
for format details
"""
if not (self.cfg.accesslog or self.cfg.logconfig or
self.cfg.logconfig_dict or
(self.cfg.syslog and not self.cfg.disable_redirect_access_to_syslog)):
return
# wrap atoms:
# - make sure atoms will be test case insensitively
# - if atom doesn't exist replace it by '-'
safe_atoms = self.atoms_wrapper_class(self.atoms(resp, req, environ,
request_time))
try:
self.access_log.info(self.cfg.access_log_format, safe_atoms,
extra={'remote_addr': safe_atoms.get('h'),
'status_code': safe_atoms.get('s'),
'http_user_agent': safe_atoms.get('a'),
'request_method': safe_atoms.get('m'),
'path_info': safe_atoms.get('U'),
'server_protocol': safe_atoms.get('H')
}
)
except:
self.error(traceback.format_exc())
| 42.235294 | 86 | 0.486769 | import traceback
from gunicorn import glogging
class GunicornLogger(glogging.Logger):
def access(self, resp, req, environ, request_time):
if not (self.cfg.accesslog or self.cfg.logconfig or
self.cfg.logconfig_dict or
(self.cfg.syslog and not self.cfg.disable_redirect_access_to_syslog)):
return
safe_atoms = self.atoms_wrapper_class(self.atoms(resp, req, environ,
request_time))
try:
self.access_log.info(self.cfg.access_log_format, safe_atoms,
extra={'remote_addr': safe_atoms.get('h'),
'status_code': safe_atoms.get('s'),
'http_user_agent': safe_atoms.get('a'),
'request_method': safe_atoms.get('m'),
'path_info': safe_atoms.get('U'),
'server_protocol': safe_atoms.get('H')
}
)
except:
self.error(traceback.format_exc())
| true | true |
f7fc747387731a0b2a60200b416d632e8bc9f068 | 11,586 | py | Python | 01_matplotlib.py | Munazza12/Python | b550e3bf8a5468602bec4f23b4d4d1f2f696342f | [
"Apache-2.0"
] | null | null | null | 01_matplotlib.py | Munazza12/Python | b550e3bf8a5468602bec4f23b4d4d1f2f696342f | [
"Apache-2.0"
] | null | null | null | 01_matplotlib.py | Munazza12/Python | b550e3bf8a5468602bec4f23b4d4d1f2f696342f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""01_Matplotlib.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1kcVik8w7TtrXYaCSklntRqq2av-7N7-6
#Introduction to Matplotlib

Matplotlib is the "grandfather" library of data visualization with Python. It was created by John Hunter. He created it to try to replicate MatLab's (another programming language) plotting capabilities in Python. So if you happen to be familiar with matlab, matplotlib will feel natural to you. Matplotlib is a plotting library for Python. It is used along with NumPy to provide an environment that is an effective open source alternative for MatLab.
It is an excellent 2D and 3D graphics library for generating scientific figures.
Some of the major Pros of Matplotlib are:
* Generally easy to get started for simple plots
* Support for custom labels and texts
* Great control of every element in a figure
* High-quality output in many formats
* Very customizable in general
##Importing matplotlib
To import matplotlib in Colaboratory under the name **plt** from module **matplotlib.pyplot** type the following:
"""
import matplotlib.pyplot as plt
"""**Note:** If you are using Colaboratory **plt.show()** at the end of all the plooting commands to have the figure pop up in another window.
#Basic Example
Let's walk through a very simple example using two numpy arrays. You can also use lists, but most likely you'll be passing numpy arrays or pandas columns (which essentially also behave like arrays).
Let us create the data we want to plot.
"""
import numpy as np
x = np.linspace(0, 5, 11)
y = x ** 2
x
y
"""We created 2 set of numbers **x** and **y**.
## Basic Matplotlib Commands
We can create a very simple line plot using the following:
"""
plt.plot(x, y, 'r') # 'r' is the color red
plt.xlabel('X Axis Title Here')
plt.ylabel('Y Axis Title Here')
plt.title('String Title Here')
plt.show()
"""## Creating Multiplots on Same Canvas"""
# plt.subplot(nrows, ncols, plot_number)
plt.subplot(1,2,1)
plt.plot(x, y, 'r*') # More on color options later
plt.subplot(1,2,2)
plt.plot(y, x, 'g*-');
"""# Matplotlib Object Oriented Method
Now that we've seen the basics, let's break it all down with a more formal introduction of Matplotlib's Object Oriented API. This means we will instantiate figure objects and then call methods or attributes from that object.
## Introduction to the Object Oriented Method
The main idea in using the more formal Object Oriented method is to create figure objects and then just call methods or attributes off of that object. This approach is nicer when dealing with a canvas that has multiple plots on it.
To begin we create a figure instance. Then we can add axes to that figure:
"""
# Create Figure (empty canvas)
fig = plt.figure()
# Add set of axes to figure
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # left, bottom, width, height (range 0 to 1)
# Plot on that set of axes
axes.plot(x, y, 'g')
axes.set_xlabel('Set X Label') # Notice the use of set_ to begin methods
axes.set_ylabel('Set y Label')
axes.set_title('Set Title')
"""Code is a little more complicated, but the advantage is that we now have full control of where the plot axes are placed, and we can easily add more than one axis to the figure:"""
# Creates blank canvas
fig = plt.figure()
axes1 = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # main axes
axes2 = fig.add_axes([0.2, 0.5, 0.4, 0.3]) # inset axes
# Larger Figure Axes 1
axes1.plot(x, y, 'b')
axes1.set_xlabel('X_label_axes2')
axes1.set_ylabel('Y_label_axes2')
axes1.set_title('Axes 2 Title')
# Insert Figure Axes 2
axes2.plot(y, x, 'r')
axes2.set_xlabel('X_label_axes2')
axes2.set_ylabel('Y_label_axes2')
axes2.set_title('Axes 2 Title');
"""## subplots()
The plt.subplots() object will act as a more automatic axis manager.
Basic use cases:
"""
# Use similar to plt.figure() except use tuple unpacking to grab fig and axes
fig, axes = plt.subplots()
# Now use the axes object to add stuff to plot
axes.plot(x, y, 'r')
axes.set_xlabel('x')
axes.set_ylabel('y')
axes.set_title('title');
"""Then you can specify the number of rows and columns when creating the subplots() object:"""
# Empty canvas of 1 by 2 subplots
fig, axes = plt.subplots(nrows=1, ncols=2)
# Axes is an array of axes to plot on
axes
"""**Note:** Here you can see that **axes** is an **array** type. That means we can iterate through it."""
for ax in axes:
ax.plot(x, y, 'b')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('title')
# Display the figure object
fig
"""A common issue with matplolib is overlapping subplots or figures. We ca use **fig.tight_layout()** or **plt.tight_layout()** method, which automatically adjusts the positions of the axes on the figure canvas so that there is no overlapping content:"""
fig, axes = plt.subplots(nrows=1, ncols=2)
for ax in axes:
ax.plot(x, y, 'g')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('title')
fig
plt.tight_layout()
"""## Figure size, aspect ratio and DPI
Matplotlib allows the aspect ratio, DPI and figure size to be specified when the Figure object is created. You can use the figsize and dpi keyword arguments.
* **figsize** is a tuple of the width and height of the figure in inches
* **dpi** is the dots-per-inch (pixel per inch).
For example:
"""
fig = plt.figure(figsize=(8,4), dpi=100)
"""The same arguments can also be passed to layout managers, such as the **subplots** function:"""
fig, axes = plt.subplots(figsize=(12,3))
axes.plot(x, y, 'r')
axes.set_xlabel('x')
axes.set_ylabel('y')
axes.set_title('title');
"""## Saving figures
Matplotlib can generate high-quality output in a number formats, including PNG, JPG, EPS, SVG, PGF and PDF.
To save a figure to a file we can use the **savefig** method in the **Figure** class:
"""
fig.savefig("filename.png")
"""Here we can also optionally specify the DPI and choose between different output formats:"""
fig.savefig("filename.png", dpi=200)
"""## Legends, labels and titles
Now that we have covered the basics of how to create a figure canvas and add axes instances to the canvas, let's look at how decorate a figure with titles, axis labels, and legends.
**Figure titles**
A title can be added to each axis instance in a figure. To set the title, use the **set_title** method in the axes instance:
"""
ax.set_title("title");
"""**Axis labels**
Similarly, with the methods **set_xlabel** and **set_ylabel**, we can set the labels of the X and Y axes:
"""
ax.set_xlabel("x")
ax.set_ylabel("y");
"""## Legends
You can use the **label="label text"** keyword argument when plots or other objects are added to the figure, and then using the **legend** method without arguments to add the legend to the figure:
"""
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.plot(x, x**2, label="x**2")
ax.plot(x, x**3, label="x**3")
ax.legend()
"""The legend function takes an optional keyword argument loc that can be used to specify where in the figure the legend is to be drawn. The allowed values of loc are numerical codes for the various places the legend can be drawn. See the documentation page for details. Some of the most common loc values are:"""
# Lots of options....
ax.legend(loc=1) # upper right corner
ax.legend(loc=2) # upper left corner
ax.legend(loc=3) # lower left corner
ax.legend(loc=4) # lower right corner
# .. many more options are available
# Most common to choose
ax.legend(loc=0) # let matplotlib decide the optimal location
fig
"""## Setting colors, linewidths, linetypes
Matplotlib gives you *a lot* of options for customizing colors, linewidths, and linetypes.
With matplotlib, we can define the colors of lines and other graphical elements in a number of ways. First of all, we can use the MATLAB-like syntax where 'b' means blue, 'g' means green, etc. The MATLAB API for selecting line styles are also supported: where, for example, 'b.-' means a blue line with dots:
"""
fig, ax = plt.subplots()
ax.plot(x, x**2, 'b.-') # blue line with dots
ax.plot(x, x**3, 'g--') # green dashed line
"""### Colors with the color= parameter
We can also define colors by their names or RGB hex codes and optionally provide an alpha value using the color and alpha keyword arguments. Alpha indicates opacity.
"""
fig, ax = plt.subplots()
ax.plot(x, x+1, color="blue", alpha=0.5) # half-transparant
ax.plot(x, x+2, color="#8B008B") # RGB hex code
ax.plot(x, x+3, color="#FF8C00")
"""### Line and marker styles
To change the line width, we can use the linewidth or lw keyword argument. The line style can be selected using the linestyle or ls keyword arguments:
"""
fig, ax = plt.subplots(figsize=(12,6))
ax.plot(x, x+1, color="red", linewidth=0.25)
ax.plot(x, x+2, color="red", linewidth=0.50)
ax.plot(x, x+3, color="red", linewidth=1.00)
ax.plot(x, x+4, color="red", linewidth=2.00)
# possible linestype options ‘-‘, ‘–’, ‘-.’, ‘:’, ‘steps’
ax.plot(x, x+5, color="green", lw=3, linestyle='-')
ax.plot(x, x+6, color="green", lw=3, ls='-.')
ax.plot(x, x+7, color="green", lw=3, ls=':')
# custom dash
line, = ax.plot(x, x+8, color="black", lw=1.50)
line.set_dashes([5, 10, 15, 10]) # format: line length, space length, ...
# possible marker symbols: marker = '+', 'o', '*', 's', ',', '.', '1', '2', '3', '4', ...
ax.plot(x, x+ 9, color="blue", lw=3, ls='-', marker='+')
ax.plot(x, x+10, color="blue", lw=3, ls='--', marker='o')
ax.plot(x, x+11, color="blue", lw=3, ls='-', marker='s')
ax.plot(x, x+12, color="blue", lw=3, ls='--', marker='1')
# marker size and color
ax.plot(x, x+13, color="purple", lw=1, ls='-', marker='o', markersize=2)
ax.plot(x, x+14, color="purple", lw=1, ls='-', marker='o', markersize=4)
ax.plot(x, x+15, color="purple", lw=1, ls='-', marker='o', markersize=8, markerfacecolor="red")
ax.plot(x, x+16, color="purple", lw=1, ls='-', marker='s', markersize=8,
markerfacecolor="yellow", markeredgewidth=3, markeredgecolor="green");
"""## Plot range
We can configure the ranges of the axes using the set_ylim and set_xlim methods in the axis object, or axis('tight') for automatically getting "tightly fitted" axes ranges:
"""
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
axes[0].plot(x, x**2, x, x**3)
axes[0].set_title("default axes ranges")
axes[1].plot(x, x**2, x, x**3)
axes[1].axis('tight')
axes[1].set_title("tight axes")
axes[2].plot(x, x**2, x, x**3)
axes[2].set_ylim([0, 60])
axes[2].set_xlim([2, 5])
axes[2].set_title("custom axes range");
"""# Special Plot Types
There are many specialized plots we can create, such as barplots, histograms, scatter plots, and much more. Most of these type of plots we will actually create using seaborn, a statistical plotting library for Python. But here are a few examples of these type of plots:
"""
plt.scatter(x,y)
from random import sample
data = sample(range(1, 1000), 100)
plt.hist(data)
data = [np.random.normal(0, std, 100) for std in range(1, 4)]
# rectangular box plot
plt.boxplot(data,vert=True,patch_artist=True);
"""## Further reading
* http://www.matplotlib.org - The project web page for matplotlib.
* https://github.com/matplotlib/matplotlib - The source code for matplotlib.
* http://matplotlib.org/gallery.html - A large gallery showcaseing various types of plots matplotlib can create. Highly recommended!
* http://www.loria.fr/~rougier/teaching/matplotlib - A good matplotlib tutorial.
* http://scipy-lectures.github.io/matplotlib/matplotlib.html - Another good matplotlib reference.
#Great job!
""" | 33.97654 | 450 | 0.707837 |
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0, 5, 11)
y = x ** 2
x
y
plt.plot(x, y, 'r')
plt.xlabel('X Axis Title Here')
plt.ylabel('Y Axis Title Here')
plt.title('String Title Here')
plt.show()
plt.subplot(1,2,1)
plt.plot(x, y, 'r*')
plt.subplot(1,2,2)
plt.plot(y, x, 'g*-');
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
axes.plot(x, y, 'g')
axes.set_xlabel('Set X Label')
axes.set_ylabel('Set y Label')
axes.set_title('Set Title')
fig = plt.figure()
axes1 = fig.add_axes([0.1, 0.1, 0.8, 0.8])
axes2 = fig.add_axes([0.2, 0.5, 0.4, 0.3])
axes1.plot(x, y, 'b')
axes1.set_xlabel('X_label_axes2')
axes1.set_ylabel('Y_label_axes2')
axes1.set_title('Axes 2 Title')
axes2.plot(y, x, 'r')
axes2.set_xlabel('X_label_axes2')
axes2.set_ylabel('Y_label_axes2')
axes2.set_title('Axes 2 Title');
fig, axes = plt.subplots()
axes.plot(x, y, 'r')
axes.set_xlabel('x')
axes.set_ylabel('y')
axes.set_title('title');
fig, axes = plt.subplots(nrows=1, ncols=2)
axes
for ax in axes:
ax.plot(x, y, 'b')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('title')
fig
fig, axes = plt.subplots(nrows=1, ncols=2)
for ax in axes:
ax.plot(x, y, 'g')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('title')
fig
plt.tight_layout()
fig = plt.figure(figsize=(8,4), dpi=100)
fig, axes = plt.subplots(figsize=(12,3))
axes.plot(x, y, 'r')
axes.set_xlabel('x')
axes.set_ylabel('y')
axes.set_title('title');
fig.savefig("filename.png")
fig.savefig("filename.png", dpi=200)
ax.set_title("title");
ax.set_xlabel("x")
ax.set_ylabel("y");
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.plot(x, x**2, label="x**2")
ax.plot(x, x**3, label="x**3")
ax.legend()
ax.legend(loc=1)
ax.legend(loc=2)
ax.legend(loc=3)
ax.legend(loc=4)
ax.legend(loc=0)
fig
fig, ax = plt.subplots()
ax.plot(x, x**2, 'b.-')
ax.plot(x, x**3, 'g--')
fig, ax = plt.subplots()
ax.plot(x, x+1, color="blue", alpha=0.5)
ax.plot(x, x+2, color="#8B008B")
ax.plot(x, x+3, color="#FF8C00")
fig, ax = plt.subplots(figsize=(12,6))
ax.plot(x, x+1, color="red", linewidth=0.25)
ax.plot(x, x+2, color="red", linewidth=0.50)
ax.plot(x, x+3, color="red", linewidth=1.00)
ax.plot(x, x+4, color="red", linewidth=2.00)
ax.plot(x, x+5, color="green", lw=3, linestyle='-')
ax.plot(x, x+6, color="green", lw=3, ls='-.')
ax.plot(x, x+7, color="green", lw=3, ls=':')
line, = ax.plot(x, x+8, color="black", lw=1.50)
line.set_dashes([5, 10, 15, 10])
ax.plot(x, x+ 9, color="blue", lw=3, ls='-', marker='+')
ax.plot(x, x+10, color="blue", lw=3, ls='--', marker='o')
ax.plot(x, x+11, color="blue", lw=3, ls='-', marker='s')
ax.plot(x, x+12, color="blue", lw=3, ls='--', marker='1')
ax.plot(x, x+13, color="purple", lw=1, ls='-', marker='o', markersize=2)
ax.plot(x, x+14, color="purple", lw=1, ls='-', marker='o', markersize=4)
ax.plot(x, x+15, color="purple", lw=1, ls='-', marker='o', markersize=8, markerfacecolor="red")
ax.plot(x, x+16, color="purple", lw=1, ls='-', marker='s', markersize=8,
markerfacecolor="yellow", markeredgewidth=3, markeredgecolor="green");
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
axes[0].plot(x, x**2, x, x**3)
axes[0].set_title("default axes ranges")
axes[1].plot(x, x**2, x, x**3)
axes[1].axis('tight')
axes[1].set_title("tight axes")
axes[2].plot(x, x**2, x, x**3)
axes[2].set_ylim([0, 60])
axes[2].set_xlim([2, 5])
axes[2].set_title("custom axes range");
plt.scatter(x,y)
from random import sample
data = sample(range(1, 1000), 100)
plt.hist(data)
data = [np.random.normal(0, std, 100) for std in range(1, 4)]
plt.boxplot(data,vert=True,patch_artist=True);
| true | true |
f7fc74f1f0523af27081e1fde1407266088d0bc7 | 1,641 | py | Python | pydef_core/follow_convergence_OSZICAR.py | wangvei/PyDEF-2.0 | ee5bd0a01d241e5df45ccbf10f3c27e1b796418b | [
"MIT"
] | 1 | 2019-01-12T14:30:21.000Z | 2019-01-12T14:30:21.000Z | pydef_core/follow_convergence_OSZICAR.py | wangvei/PyDEF-2.0 | ee5bd0a01d241e5df45ccbf10f3c27e1b796418b | [
"MIT"
] | null | null | null | pydef_core/follow_convergence_OSZICAR.py | wangvei/PyDEF-2.0 | ee5bd0a01d241e5df45ccbf10f3c27e1b796418b | [
"MIT"
] | 3 | 2019-03-01T15:15:00.000Z | 2020-04-19T13:30:41.000Z | """
12/12/17
Simple script to follow convergence from OSZICAR file
author: Adrien Stoliaroff
email: adrien.stoliaroff@cnrs-imn.fr
"""
import matplotlib.pyplot as plt
def isInt(s):
try:
int(s)
return True
except ValueError:
return False
#Read OSZICAR content
infile = open("OSZICAR","r")
eList, dEList, dEpsList, rmsList, eSCF, dESCF = [], [], [], [], [], []
for line in infile:
content = line.replace('d eps','deps').split()
#line inside SCF cycle
if(isInt(content[1])):
eList.append(float(content[2]))
dEList.append(float(content[3]))
dEpsList.append(float(content[4]))
rmsList.append(float(content[6].split('-0.')[0]))
#end of SCF cycle
if(isInt(content[0])):
eSCF.append(float(line.split()[4]))
dESCF.append(float(line.split()[7].replace('=','')))
#plot result
legend = ['E', 'dE', 'd eps', 'RMS', 'E_SCF', 'd E_SCF']
colors = ['r', 'b', 'g', 'c', 'm', 'y']
i = 0
plt.subplot(311)
for curve in [eList, dEList]:
plt.plot(range(len(curve)), curve, label = legend[i], color = colors[i])
i += 1
plt.xlabel('Iteration')
plt.ylabel('Energies (eV)')
plt.title('Convergence follow up from OSZICAR')
plt.legend()
plt.subplot(312)
for curve in [dEpsList, rmsList]:
plt.plot(range(len(curve)), curve, label = legend[i], color = colors[i])
i += 1
plt.xlabel('Iteration')
plt.ylabel('Convergence')
plt.legend()
plt.subplot(313)
for curve in [eSCF, dESCF]:
plt.plot(range(len(curve)), curve, label = legend[i], marker = 'o', linestyle = '--', color = colors[i])
i += 1
plt.xlabel('SCF cycle')
plt.ylabel('Energies (eV)')
plt.legend()
fig=plt.gcf()
fig.show()
| 23.782609 | 105 | 0.636807 | import matplotlib.pyplot as plt
def isInt(s):
try:
int(s)
return True
except ValueError:
return False
infile = open("OSZICAR","r")
eList, dEList, dEpsList, rmsList, eSCF, dESCF = [], [], [], [], [], []
for line in infile:
content = line.replace('d eps','deps').split()
if(isInt(content[1])):
eList.append(float(content[2]))
dEList.append(float(content[3]))
dEpsList.append(float(content[4]))
rmsList.append(float(content[6].split('-0.')[0]))
if(isInt(content[0])):
eSCF.append(float(line.split()[4]))
dESCF.append(float(line.split()[7].replace('=','')))
legend = ['E', 'dE', 'd eps', 'RMS', 'E_SCF', 'd E_SCF']
colors = ['r', 'b', 'g', 'c', 'm', 'y']
i = 0
plt.subplot(311)
for curve in [eList, dEList]:
plt.plot(range(len(curve)), curve, label = legend[i], color = colors[i])
i += 1
plt.xlabel('Iteration')
plt.ylabel('Energies (eV)')
plt.title('Convergence follow up from OSZICAR')
plt.legend()
plt.subplot(312)
for curve in [dEpsList, rmsList]:
plt.plot(range(len(curve)), curve, label = legend[i], color = colors[i])
i += 1
plt.xlabel('Iteration')
plt.ylabel('Convergence')
plt.legend()
plt.subplot(313)
for curve in [eSCF, dESCF]:
plt.plot(range(len(curve)), curve, label = legend[i], marker = 'o', linestyle = '--', color = colors[i])
i += 1
plt.xlabel('SCF cycle')
plt.ylabel('Energies (eV)')
plt.legend()
fig=plt.gcf()
fig.show()
| true | true |
f7fc758c45c2bcf614451febcd14664fc1b6a9c0 | 1,659 | py | Python | examples/mnist_lenet_multigpu.py | Bhumbra/DeepNodal | 33afb2efa5e78ae6558ce60a36bb87c186c1f448 | [
"BSD-3-Clause"
] | 1 | 2019-01-06T09:49:42.000Z | 2019-01-06T09:49:42.000Z | examples/mnist_lenet_multigpu.py | Bhumbra/DeepNodal | 33afb2efa5e78ae6558ce60a36bb87c186c1f448 | [
"BSD-3-Clause"
] | 3 | 2020-10-14T14:43:33.000Z | 2022-02-09T23:46:40.000Z | examples/mnist_lenet_multigpu.py | Bhumbra/DeepNodal | 33afb2efa5e78ae6558ce60a36bb87c186c1f448 | [
"BSD-3-Clause"
] | null | null | null | """
An example of a LeNet convolutional network with SGD back-prop on MNIST data.
"""
import deepnodal as dn
from time import time
import datetime
# PARAMETERS
n_epochs = 20
batch_size = 60
learning_rate = 0.01
input_dims = [28, 28, 1]
arch = [ [6, [5, 5], [1, 1]], [[2, 2], [2, 2]],
[16, [5, 5], [1, 1]], [[2, 2], [2, 2]],
[120, [5, 5], [1, 1]], [[2, 2], [2, 2]],
"84",
10]
transfn = ['relu'] * (len(arch)-1) + ['softmax']
kernfn = ['xcorr', 'avg'] * 3 + [None, None]
window = ['valid'] + ['same'] * (len(arch)-1)
weights = 'vsi'
net_name = 'lenet_mnist'
write_dir = '/tmp/dn_logs/'
def main():
# INPUT DATA
source = dn.loaders.mnist()
source.read_data()
source.partition()
# SPECIFY ARCHITECTURE
mod = dn.stack()
mod.set_arch(arch)
mod.set_transfn(transfn)
mod.set_kernfn(kernfn)
mod.set_window(window)
mod.set_weights(weights)
# SPECIFY NETWORK
net = dn.network(net_name)
net.set_subnets(mod)
net.set_inputs(input_dims)
# SPECIFY SUPERVISOR AND TRAINING
sup = dn.hypervisor(devs = 2)
sup.set_work(net)
sup.add_schedule(learning_rate)
# TRAIN AND TEST
now = datetime.datetime.utcnow().strftime("%Y%m%d%H%M%S")
t0 = time()
with sup.call_session(write_dir+net_name+"_"+now):
for i in range(n_epochs):
while True:
data = source.next_batch('train', batch_size)
if data:
sup.train(*data)
else:
break
data = source.next_batch('test')
summary_str = sup.test(*data)
print("".join(["Epoch {} ({} s): ", summary_str]).format(str(i), str(round(time()-t0))))
if __name__ == '__main__':
main()
| 22.12 | 94 | 0.599156 |
import deepnodal as dn
from time import time
import datetime
n_epochs = 20
batch_size = 60
learning_rate = 0.01
input_dims = [28, 28, 1]
arch = [ [6, [5, 5], [1, 1]], [[2, 2], [2, 2]],
[16, [5, 5], [1, 1]], [[2, 2], [2, 2]],
[120, [5, 5], [1, 1]], [[2, 2], [2, 2]],
"84",
10]
transfn = ['relu'] * (len(arch)-1) + ['softmax']
kernfn = ['xcorr', 'avg'] * 3 + [None, None]
window = ['valid'] + ['same'] * (len(arch)-1)
weights = 'vsi'
net_name = 'lenet_mnist'
write_dir = '/tmp/dn_logs/'
def main():
source = dn.loaders.mnist()
source.read_data()
source.partition()
mod = dn.stack()
mod.set_arch(arch)
mod.set_transfn(transfn)
mod.set_kernfn(kernfn)
mod.set_window(window)
mod.set_weights(weights)
net = dn.network(net_name)
net.set_subnets(mod)
net.set_inputs(input_dims)
sup = dn.hypervisor(devs = 2)
sup.set_work(net)
sup.add_schedule(learning_rate)
now = datetime.datetime.utcnow().strftime("%Y%m%d%H%M%S")
t0 = time()
with sup.call_session(write_dir+net_name+"_"+now):
for i in range(n_epochs):
while True:
data = source.next_batch('train', batch_size)
if data:
sup.train(*data)
else:
break
data = source.next_batch('test')
summary_str = sup.test(*data)
print("".join(["Epoch {} ({} s): ", summary_str]).format(str(i), str(round(time()-t0))))
if __name__ == '__main__':
main()
| true | true |
f7fc75c11f972d1d7ae775d687ae7930041738ec | 999 | py | Python | src/log_VF.py | MarkBlanco/userland_dvfs_gov | d26e1e2e24163370d5815fa0bb23e30636b806aa | [
"BSD-3-Clause"
] | null | null | null | src/log_VF.py | MarkBlanco/userland_dvfs_gov | d26e1e2e24163370d5815fa0bb23e30636b806aa | [
"BSD-3-Clause"
] | null | null | null | src/log_VF.py | MarkBlanco/userland_dvfs_gov | d26e1e2e24163370d5815fa0bb23e30636b806aa | [
"BSD-3-Clause"
] | 1 | 2019-01-18T22:50:26.000Z | 2019-01-18T22:50:26.000Z | import time
import sys, os
import sysfs_paths as sysfs
import atexit
import cpu_usage
atexit.register(cpu_usage.unsetUserSpace)
cpu_usage.setUserSpace()
big_freqs = cpu_usage.getAvailFreqs(4)
little_freqs = cpu_usage.getAvailFreqs(0)
print(big_freqs)
print(little_freqs)
cpu_usage.setClusterFreq(0, little_freqs[0])
cpu_usage.setClusterFreq(4, big_freqs[0])
bi = 0
li = 0
up = True
print("little_v\tlittle_f\tbig_v\tbig_f")
while True:
b_freq = cpu_usage.getClusterFreq(4)
l_freq = cpu_usage.getClusterFreq(0)
b_vdd = cpu_usage.resVoltage(4)
l_vdd = cpu_usage.resVoltage(0)
print("{}\t{}\t{}\t{}".format(l_vdd, l_freq, b_vdd, b_freq))
if up and li != len(little_freqs)-1:
li += 1
elif up and bi != len(big_freqs)-1:
bi += 1
elif up and bi == len(big_freqs) - 1:
up = False
li -= 1
elif not up and li > 0:
li -= 1
elif not up and bi > 0:
bi -= 1
else:
break
cpu_usage.setClusterFreq(0, little_freqs[li])
cpu_usage.setClusterFreq(4, big_freqs[bi])
time.sleep(0.1)
| 22.2 | 61 | 0.718719 | import time
import sys, os
import sysfs_paths as sysfs
import atexit
import cpu_usage
atexit.register(cpu_usage.unsetUserSpace)
cpu_usage.setUserSpace()
big_freqs = cpu_usage.getAvailFreqs(4)
little_freqs = cpu_usage.getAvailFreqs(0)
print(big_freqs)
print(little_freqs)
cpu_usage.setClusterFreq(0, little_freqs[0])
cpu_usage.setClusterFreq(4, big_freqs[0])
bi = 0
li = 0
up = True
print("little_v\tlittle_f\tbig_v\tbig_f")
while True:
b_freq = cpu_usage.getClusterFreq(4)
l_freq = cpu_usage.getClusterFreq(0)
b_vdd = cpu_usage.resVoltage(4)
l_vdd = cpu_usage.resVoltage(0)
print("{}\t{}\t{}\t{}".format(l_vdd, l_freq, b_vdd, b_freq))
if up and li != len(little_freqs)-1:
li += 1
elif up and bi != len(big_freqs)-1:
bi += 1
elif up and bi == len(big_freqs) - 1:
up = False
li -= 1
elif not up and li > 0:
li -= 1
elif not up and bi > 0:
bi -= 1
else:
break
cpu_usage.setClusterFreq(0, little_freqs[li])
cpu_usage.setClusterFreq(4, big_freqs[bi])
time.sleep(0.1)
| true | true |
f7fc7658c41dec1c3c1fca7b6696753387305f37 | 721 | py | Python | core/interfaces/logger.py | microsoft/Guardinel | bc49c4fe051c3e533064c7043e1cee39cfb25d3a | [
"MIT"
] | 2 | 2022-01-26T07:34:44.000Z | 2022-03-29T19:51:45.000Z | core/interfaces/logger.py | microsoft/Guardinel | bc49c4fe051c3e533064c7043e1cee39cfb25d3a | [
"MIT"
] | null | null | null | core/interfaces/logger.py | microsoft/Guardinel | bc49c4fe051c3e533064c7043e1cee39cfb25d3a | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from abc import ABC, abstractmethod
class AbstractLogger(ABC):
"""
Abstract base class for all the logger implementations of this tool
"""
__debug = False
@abstractmethod
def info(self, tag, message):
raise NotImplementedError
@abstractmethod
def error(self, tag, message):
raise NotImplementedError
@abstractmethod
def debug(self, tag, message):
raise NotImplementedError
@abstractmethod
def warn(self, tag, message):
raise NotImplementedError
def enable_debug(self):
self.__debug = True
def debug_enabled(self):
return self.__debug
| 20.6 | 71 | 0.674064 |
from abc import ABC, abstractmethod
class AbstractLogger(ABC):
__debug = False
@abstractmethod
def info(self, tag, message):
raise NotImplementedError
@abstractmethod
def error(self, tag, message):
raise NotImplementedError
@abstractmethod
def debug(self, tag, message):
raise NotImplementedError
@abstractmethod
def warn(self, tag, message):
raise NotImplementedError
def enable_debug(self):
self.__debug = True
def debug_enabled(self):
return self.__debug
| true | true |
f7fc767351ee43f61ba2c9cbc4e390d6ba8b5ace | 784,552 | py | Python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_03_01/models/_models_py3.py | jalauzon-msft/azure-sdk-for-python | 15967f5c6d3376f2334a382486ba86339786e028 | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_03_01/models/_models_py3.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_03_01/models/_models_py3.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, Dict, List, Optional, Union
import msrest.serialization
from ._compute_management_client_enums import *
class AdditionalCapabilities(msrest.serialization.Model):
"""Enables or disables a capability on the virtual machine or virtual machine scale set.
:ivar ultra_ssd_enabled: The flag that enables or disables a capability to have one or more
managed data disks with UltraSSD_LRS storage account type on the VM or VMSS. Managed disks with
storage account type UltraSSD_LRS can be added to a virtual machine or virtual machine scale
set only if this property is enabled.
:vartype ultra_ssd_enabled: bool
:ivar hibernation_enabled: The flag that enables or disables hibernation capability on the VM.
:vartype hibernation_enabled: bool
"""
_attribute_map = {
'ultra_ssd_enabled': {'key': 'ultraSSDEnabled', 'type': 'bool'},
'hibernation_enabled': {'key': 'hibernationEnabled', 'type': 'bool'},
}
def __init__(
self,
*,
ultra_ssd_enabled: Optional[bool] = None,
hibernation_enabled: Optional[bool] = None,
**kwargs
):
"""
:keyword ultra_ssd_enabled: The flag that enables or disables a capability to have one or more
managed data disks with UltraSSD_LRS storage account type on the VM or VMSS. Managed disks with
storage account type UltraSSD_LRS can be added to a virtual machine or virtual machine scale
set only if this property is enabled.
:paramtype ultra_ssd_enabled: bool
:keyword hibernation_enabled: The flag that enables or disables hibernation capability on the
VM.
:paramtype hibernation_enabled: bool
"""
super(AdditionalCapabilities, self).__init__(**kwargs)
self.ultra_ssd_enabled = ultra_ssd_enabled
self.hibernation_enabled = hibernation_enabled
class AdditionalUnattendContent(msrest.serialization.Model):
"""Specifies additional XML formatted information that can be included in the Unattend.xml file, which is used by Windows Setup. Contents are defined by setting name, component name, and the pass in which the content is applied.
:ivar pass_name: The pass name. Currently, the only allowable value is OobeSystem. The only
acceptable values to pass in are None and "OobeSystem". The default value is None.
:vartype pass_name: str
:ivar component_name: The component name. Currently, the only allowable value is
Microsoft-Windows-Shell-Setup. The only acceptable values to pass in are None and
"Microsoft-Windows-Shell-Setup". The default value is None.
:vartype component_name: str
:ivar setting_name: Specifies the name of the setting to which the content applies. Possible
values are: FirstLogonCommands and AutoLogon. Possible values include: "AutoLogon",
"FirstLogonCommands".
:vartype setting_name: str or ~azure.mgmt.compute.v2022_03_01.models.SettingNames
:ivar content: Specifies the XML formatted content that is added to the unattend.xml file for
the specified path and component. The XML must be less than 4KB and must include the root
element for the setting or feature that is being inserted.
:vartype content: str
"""
_attribute_map = {
'pass_name': {'key': 'passName', 'type': 'str'},
'component_name': {'key': 'componentName', 'type': 'str'},
'setting_name': {'key': 'settingName', 'type': 'str'},
'content': {'key': 'content', 'type': 'str'},
}
def __init__(
self,
*,
pass_name: Optional[str] = None,
component_name: Optional[str] = None,
setting_name: Optional[Union[str, "SettingNames"]] = None,
content: Optional[str] = None,
**kwargs
):
"""
:keyword pass_name: The pass name. Currently, the only allowable value is OobeSystem. The only
acceptable values to pass in are None and "OobeSystem". The default value is None.
:paramtype pass_name: str
:keyword component_name: The component name. Currently, the only allowable value is
Microsoft-Windows-Shell-Setup. The only acceptable values to pass in are None and
"Microsoft-Windows-Shell-Setup". The default value is None.
:paramtype component_name: str
:keyword setting_name: Specifies the name of the setting to which the content applies. Possible
values are: FirstLogonCommands and AutoLogon. Possible values include: "AutoLogon",
"FirstLogonCommands".
:paramtype setting_name: str or ~azure.mgmt.compute.v2022_03_01.models.SettingNames
:keyword content: Specifies the XML formatted content that is added to the unattend.xml file
for the specified path and component. The XML must be less than 4KB and must include the root
element for the setting or feature that is being inserted.
:paramtype content: str
"""
super(AdditionalUnattendContent, self).__init__(**kwargs)
self.pass_name = pass_name
self.component_name = component_name
self.setting_name = setting_name
self.content = content
class ApiEntityReference(msrest.serialization.Model):
"""The API entity reference.
:ivar id: The ARM resource id in the form of
/subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/...
:vartype id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
"""
:keyword id: The ARM resource id in the form of
/subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/...
:paramtype id: str
"""
super(ApiEntityReference, self).__init__(**kwargs)
self.id = id
class ApiError(msrest.serialization.Model):
"""Api error.
:ivar details: The Api error details.
:vartype details: list[~azure.mgmt.compute.v2022_03_01.models.ApiErrorBase]
:ivar innererror: The Api inner error.
:vartype innererror: ~azure.mgmt.compute.v2022_03_01.models.InnerError
:ivar code: The error code.
:vartype code: str
:ivar target: The target of the particular error.
:vartype target: str
:ivar message: The error message.
:vartype message: str
"""
_attribute_map = {
'details': {'key': 'details', 'type': '[ApiErrorBase]'},
'innererror': {'key': 'innererror', 'type': 'InnerError'},
'code': {'key': 'code', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
details: Optional[List["ApiErrorBase"]] = None,
innererror: Optional["InnerError"] = None,
code: Optional[str] = None,
target: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
"""
:keyword details: The Api error details.
:paramtype details: list[~azure.mgmt.compute.v2022_03_01.models.ApiErrorBase]
:keyword innererror: The Api inner error.
:paramtype innererror: ~azure.mgmt.compute.v2022_03_01.models.InnerError
:keyword code: The error code.
:paramtype code: str
:keyword target: The target of the particular error.
:paramtype target: str
:keyword message: The error message.
:paramtype message: str
"""
super(ApiError, self).__init__(**kwargs)
self.details = details
self.innererror = innererror
self.code = code
self.target = target
self.message = message
class ApiErrorBase(msrest.serialization.Model):
"""Api error base.
:ivar code: The error code.
:vartype code: str
:ivar target: The target of the particular error.
:vartype target: str
:ivar message: The error message.
:vartype message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: Optional[str] = None,
target: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
"""
:keyword code: The error code.
:paramtype code: str
:keyword target: The target of the particular error.
:paramtype target: str
:keyword message: The error message.
:paramtype message: str
"""
super(ApiErrorBase, self).__init__(**kwargs)
self.code = code
self.target = target
self.message = message
class ApplicationProfile(msrest.serialization.Model):
"""Contains the list of gallery applications that should be made available to the VM/VMSS.
:ivar gallery_applications: Specifies the gallery applications that should be made available to
the VM/VMSS.
:vartype gallery_applications:
list[~azure.mgmt.compute.v2022_03_01.models.VMGalleryApplication]
"""
_attribute_map = {
'gallery_applications': {'key': 'galleryApplications', 'type': '[VMGalleryApplication]'},
}
def __init__(
self,
*,
gallery_applications: Optional[List["VMGalleryApplication"]] = None,
**kwargs
):
"""
:keyword gallery_applications: Specifies the gallery applications that should be made available
to the VM/VMSS.
:paramtype gallery_applications:
list[~azure.mgmt.compute.v2022_03_01.models.VMGalleryApplication]
"""
super(ApplicationProfile, self).__init__(**kwargs)
self.gallery_applications = gallery_applications
class AutomaticOSUpgradePolicy(msrest.serialization.Model):
"""The configuration parameters used for performing automatic OS upgrade.
:ivar enable_automatic_os_upgrade: Indicates whether OS upgrades should automatically be
applied to scale set instances in a rolling fashion when a newer version of the OS image
becomes available. Default value is false. :code:`<br>`:code:`<br>` If this is set to true for
Windows based scale sets, `enableAutomaticUpdates
<https://docs.microsoft.com/dotnet/api/microsoft.azure.management.compute.models.windowsconfiguration.enableautomaticupdates?view=azure-dotnet>`_
is automatically set to false and cannot be set to true.
:vartype enable_automatic_os_upgrade: bool
:ivar disable_automatic_rollback: Whether OS image rollback feature should be disabled. Default
value is false.
:vartype disable_automatic_rollback: bool
:ivar use_rolling_upgrade_policy: Indicates whether rolling upgrade policy should be used
during Auto OS Upgrade. Default value is false. Auto OS Upgrade will fallback to the default
policy if no policy is defined on the VMSS.
:vartype use_rolling_upgrade_policy: bool
"""
_attribute_map = {
'enable_automatic_os_upgrade': {'key': 'enableAutomaticOSUpgrade', 'type': 'bool'},
'disable_automatic_rollback': {'key': 'disableAutomaticRollback', 'type': 'bool'},
'use_rolling_upgrade_policy': {'key': 'useRollingUpgradePolicy', 'type': 'bool'},
}
def __init__(
self,
*,
enable_automatic_os_upgrade: Optional[bool] = None,
disable_automatic_rollback: Optional[bool] = None,
use_rolling_upgrade_policy: Optional[bool] = None,
**kwargs
):
"""
:keyword enable_automatic_os_upgrade: Indicates whether OS upgrades should automatically be
applied to scale set instances in a rolling fashion when a newer version of the OS image
becomes available. Default value is false. :code:`<br>`:code:`<br>` If this is set to true for
Windows based scale sets, `enableAutomaticUpdates
<https://docs.microsoft.com/dotnet/api/microsoft.azure.management.compute.models.windowsconfiguration.enableautomaticupdates?view=azure-dotnet>`_
is automatically set to false and cannot be set to true.
:paramtype enable_automatic_os_upgrade: bool
:keyword disable_automatic_rollback: Whether OS image rollback feature should be disabled.
Default value is false.
:paramtype disable_automatic_rollback: bool
:keyword use_rolling_upgrade_policy: Indicates whether rolling upgrade policy should be used
during Auto OS Upgrade. Default value is false. Auto OS Upgrade will fallback to the default
policy if no policy is defined on the VMSS.
:paramtype use_rolling_upgrade_policy: bool
"""
super(AutomaticOSUpgradePolicy, self).__init__(**kwargs)
self.enable_automatic_os_upgrade = enable_automatic_os_upgrade
self.disable_automatic_rollback = disable_automatic_rollback
self.use_rolling_upgrade_policy = use_rolling_upgrade_policy
class AutomaticOSUpgradeProperties(msrest.serialization.Model):
"""Describes automatic OS upgrade properties on the image.
All required parameters must be populated in order to send to Azure.
:ivar automatic_os_upgrade_supported: Required. Specifies whether automatic OS upgrade is
supported on the image.
:vartype automatic_os_upgrade_supported: bool
"""
_validation = {
'automatic_os_upgrade_supported': {'required': True},
}
_attribute_map = {
'automatic_os_upgrade_supported': {'key': 'automaticOSUpgradeSupported', 'type': 'bool'},
}
def __init__(
self,
*,
automatic_os_upgrade_supported: bool,
**kwargs
):
"""
:keyword automatic_os_upgrade_supported: Required. Specifies whether automatic OS upgrade is
supported on the image.
:paramtype automatic_os_upgrade_supported: bool
"""
super(AutomaticOSUpgradeProperties, self).__init__(**kwargs)
self.automatic_os_upgrade_supported = automatic_os_upgrade_supported
class AutomaticRepairsPolicy(msrest.serialization.Model):
"""Specifies the configuration parameters for automatic repairs on the virtual machine scale set.
:ivar enabled: Specifies whether automatic repairs should be enabled on the virtual machine
scale set. The default value is false.
:vartype enabled: bool
:ivar grace_period: The amount of time for which automatic repairs are suspended due to a state
change on VM. The grace time starts after the state change has completed. This helps avoid
premature or accidental repairs. The time duration should be specified in ISO 8601 format. The
minimum allowed grace period is 10 minutes (PT10M), which is also the default value. The
maximum allowed grace period is 90 minutes (PT90M).
:vartype grace_period: str
:ivar repair_action: Type of repair action (replace, restart, reimage) that will be used for
repairing unhealthy virtual machines in the scale set. Default value is replace. Possible
values include: "Replace", "Restart", "Reimage".
:vartype repair_action: str or ~azure.mgmt.compute.v2022_03_01.models.RepairAction
"""
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'grace_period': {'key': 'gracePeriod', 'type': 'str'},
'repair_action': {'key': 'repairAction', 'type': 'str'},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
grace_period: Optional[str] = None,
repair_action: Optional[Union[str, "RepairAction"]] = None,
**kwargs
):
"""
:keyword enabled: Specifies whether automatic repairs should be enabled on the virtual machine
scale set. The default value is false.
:paramtype enabled: bool
:keyword grace_period: The amount of time for which automatic repairs are suspended due to a
state change on VM. The grace time starts after the state change has completed. This helps
avoid premature or accidental repairs. The time duration should be specified in ISO 8601
format. The minimum allowed grace period is 10 minutes (PT10M), which is also the default
value. The maximum allowed grace period is 90 minutes (PT90M).
:paramtype grace_period: str
:keyword repair_action: Type of repair action (replace, restart, reimage) that will be used for
repairing unhealthy virtual machines in the scale set. Default value is replace. Possible
values include: "Replace", "Restart", "Reimage".
:paramtype repair_action: str or ~azure.mgmt.compute.v2022_03_01.models.RepairAction
"""
super(AutomaticRepairsPolicy, self).__init__(**kwargs)
self.enabled = enabled
self.grace_period = grace_period
self.repair_action = repair_action
class Resource(msrest.serialization.Model):
"""The Resource model definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Required. Resource location.
:vartype location: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword location: Required. Resource location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
"""
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
class AvailabilitySet(Resource):
"""Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see `Availability sets overview <https://docs.microsoft.com/azure/virtual-machines/availability-set-overview>`_. :code:`<br>`:code:`<br>` For more information on Azure planned maintenance, see `Maintenance and updates for Virtual Machines in Azure <https://docs.microsoft.com/azure/virtual-machines/maintenance-and-updates>`_ :code:`<br>`:code:`<br>` Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Required. Resource location.
:vartype location: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar sku: Sku of the availability set, only name is required to be set. See
AvailabilitySetSkuTypes for possible set of values. Use 'Aligned' for virtual machines with
managed disks and 'Classic' for virtual machines with unmanaged disks. Default value is
'Classic'.
:vartype sku: ~azure.mgmt.compute.v2022_03_01.models.Sku
:ivar platform_update_domain_count: Update Domain count.
:vartype platform_update_domain_count: int
:ivar platform_fault_domain_count: Fault Domain count.
:vartype platform_fault_domain_count: int
:ivar virtual_machines: A list of references to all virtual machines in the availability set.
:vartype virtual_machines: list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
:ivar proximity_placement_group: Specifies information about the proximity placement group that
the availability set should be assigned to. :code:`<br>`:code:`<br>`Minimum api-version:
2018-04-01.
:vartype proximity_placement_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar statuses: The resource status information.
:vartype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'statuses': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'platform_update_domain_count': {'key': 'properties.platformUpdateDomainCount', 'type': 'int'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'virtual_machines': {'key': 'properties.virtualMachines', 'type': '[SubResource]'},
'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'SubResource'},
'statuses': {'key': 'properties.statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
platform_update_domain_count: Optional[int] = None,
platform_fault_domain_count: Optional[int] = None,
virtual_machines: Optional[List["SubResource"]] = None,
proximity_placement_group: Optional["SubResource"] = None,
**kwargs
):
"""
:keyword location: Required. Resource location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword sku: Sku of the availability set, only name is required to be set. See
AvailabilitySetSkuTypes for possible set of values. Use 'Aligned' for virtual machines with
managed disks and 'Classic' for virtual machines with unmanaged disks. Default value is
'Classic'.
:paramtype sku: ~azure.mgmt.compute.v2022_03_01.models.Sku
:keyword platform_update_domain_count: Update Domain count.
:paramtype platform_update_domain_count: int
:keyword platform_fault_domain_count: Fault Domain count.
:paramtype platform_fault_domain_count: int
:keyword virtual_machines: A list of references to all virtual machines in the availability
set.
:paramtype virtual_machines: list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
:keyword proximity_placement_group: Specifies information about the proximity placement group
that the availability set should be assigned to. :code:`<br>`:code:`<br>`Minimum api-version:
2018-04-01.
:paramtype proximity_placement_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
"""
super(AvailabilitySet, self).__init__(location=location, tags=tags, **kwargs)
self.sku = sku
self.platform_update_domain_count = platform_update_domain_count
self.platform_fault_domain_count = platform_fault_domain_count
self.virtual_machines = virtual_machines
self.proximity_placement_group = proximity_placement_group
self.statuses = None
class AvailabilitySetListResult(msrest.serialization.Model):
"""The List Availability Set operation response.
All required parameters must be populated in order to send to Azure.
:ivar value: Required. The list of availability sets.
:vartype value: list[~azure.mgmt.compute.v2022_03_01.models.AvailabilitySet]
:ivar next_link: The URI to fetch the next page of AvailabilitySets. Call ListNext() with this
URI to fetch the next page of AvailabilitySets.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[AvailabilitySet]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["AvailabilitySet"],
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Required. The list of availability sets.
:paramtype value: list[~azure.mgmt.compute.v2022_03_01.models.AvailabilitySet]
:keyword next_link: The URI to fetch the next page of AvailabilitySets. Call ListNext() with
this URI to fetch the next page of AvailabilitySets.
:paramtype next_link: str
"""
super(AvailabilitySetListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class UpdateResource(msrest.serialization.Model):
"""The Update Resource model definition.
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
"""
super(UpdateResource, self).__init__(**kwargs)
self.tags = tags
class AvailabilitySetUpdate(UpdateResource):
"""Specifies information about the availability set that the virtual machine should be assigned to. Only tags may be updated.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar sku: Sku of the availability set.
:vartype sku: ~azure.mgmt.compute.v2022_03_01.models.Sku
:ivar platform_update_domain_count: Update Domain count.
:vartype platform_update_domain_count: int
:ivar platform_fault_domain_count: Fault Domain count.
:vartype platform_fault_domain_count: int
:ivar virtual_machines: A list of references to all virtual machines in the availability set.
:vartype virtual_machines: list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
:ivar proximity_placement_group: Specifies information about the proximity placement group that
the availability set should be assigned to. :code:`<br>`:code:`<br>`Minimum api-version:
2018-04-01.
:vartype proximity_placement_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar statuses: The resource status information.
:vartype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
"""
_validation = {
'statuses': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'platform_update_domain_count': {'key': 'properties.platformUpdateDomainCount', 'type': 'int'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'virtual_machines': {'key': 'properties.virtualMachines', 'type': '[SubResource]'},
'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'SubResource'},
'statuses': {'key': 'properties.statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
platform_update_domain_count: Optional[int] = None,
platform_fault_domain_count: Optional[int] = None,
virtual_machines: Optional[List["SubResource"]] = None,
proximity_placement_group: Optional["SubResource"] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword sku: Sku of the availability set.
:paramtype sku: ~azure.mgmt.compute.v2022_03_01.models.Sku
:keyword platform_update_domain_count: Update Domain count.
:paramtype platform_update_domain_count: int
:keyword platform_fault_domain_count: Fault Domain count.
:paramtype platform_fault_domain_count: int
:keyword virtual_machines: A list of references to all virtual machines in the availability
set.
:paramtype virtual_machines: list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
:keyword proximity_placement_group: Specifies information about the proximity placement group
that the availability set should be assigned to. :code:`<br>`:code:`<br>`Minimum api-version:
2018-04-01.
:paramtype proximity_placement_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
"""
super(AvailabilitySetUpdate, self).__init__(tags=tags, **kwargs)
self.sku = sku
self.platform_update_domain_count = platform_update_domain_count
self.platform_fault_domain_count = platform_fault_domain_count
self.virtual_machines = virtual_machines
self.proximity_placement_group = proximity_placement_group
self.statuses = None
class AvailablePatchSummary(msrest.serialization.Model):
"""Describes the properties of an virtual machine instance view for available patch summary.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar status: The overall success or failure status of the operation. It remains "InProgress"
until the operation completes. At that point it will become "Unknown", "Failed", "Succeeded",
or "CompletedWithWarnings.". Possible values include: "Unknown", "InProgress", "Failed",
"Succeeded", "CompletedWithWarnings".
:vartype status: str or ~azure.mgmt.compute.v2022_03_01.models.PatchOperationStatus
:ivar assessment_activity_id: The activity ID of the operation that produced this result. It is
used to correlate across CRP and extension logs.
:vartype assessment_activity_id: str
:ivar reboot_pending: The overall reboot status of the VM. It will be true when partially
installed patches require a reboot to complete installation but the reboot has not yet
occurred.
:vartype reboot_pending: bool
:ivar critical_and_security_patch_count: The number of critical or security patches that have
been detected as available and not yet installed.
:vartype critical_and_security_patch_count: int
:ivar other_patch_count: The number of all available patches excluding critical and security.
:vartype other_patch_count: int
:ivar start_time: The UTC timestamp when the operation began.
:vartype start_time: ~datetime.datetime
:ivar last_modified_time: The UTC timestamp when the operation began.
:vartype last_modified_time: ~datetime.datetime
:ivar error: The errors that were encountered during execution of the operation. The details
array contains the list of them.
:vartype error: ~azure.mgmt.compute.v2022_03_01.models.ApiError
"""
_validation = {
'status': {'readonly': True},
'assessment_activity_id': {'readonly': True},
'reboot_pending': {'readonly': True},
'critical_and_security_patch_count': {'readonly': True},
'other_patch_count': {'readonly': True},
'start_time': {'readonly': True},
'last_modified_time': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'assessment_activity_id': {'key': 'assessmentActivityId', 'type': 'str'},
'reboot_pending': {'key': 'rebootPending', 'type': 'bool'},
'critical_and_security_patch_count': {'key': 'criticalAndSecurityPatchCount', 'type': 'int'},
'other_patch_count': {'key': 'otherPatchCount', 'type': 'int'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'last_modified_time': {'key': 'lastModifiedTime', 'type': 'iso-8601'},
'error': {'key': 'error', 'type': 'ApiError'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(AvailablePatchSummary, self).__init__(**kwargs)
self.status = None
self.assessment_activity_id = None
self.reboot_pending = None
self.critical_and_security_patch_count = None
self.other_patch_count = None
self.start_time = None
self.last_modified_time = None
self.error = None
class BillingProfile(msrest.serialization.Model):
"""Specifies the billing related details of a Azure Spot VM or VMSS. :code:`<br>`:code:`<br>`Minimum api-version: 2019-03-01.
:ivar max_price: Specifies the maximum price you are willing to pay for a Azure Spot VM/VMSS.
This price is in US Dollars. :code:`<br>`:code:`<br>` This price will be compared with the
current Azure Spot price for the VM size. Also, the prices are compared at the time of
create/update of Azure Spot VM/VMSS and the operation will only succeed if the maxPrice is
greater than the current Azure Spot price. :code:`<br>`:code:`<br>` The maxPrice will also be
used for evicting a Azure Spot VM/VMSS if the current Azure Spot price goes beyond the maxPrice
after creation of VM/VMSS. :code:`<br>`:code:`<br>` Possible values are:
:code:`<br>`:code:`<br>` - Any decimal value greater than zero. Example: 0.01538
:code:`<br>`:code:`<br>` -1 – indicates default price to be up-to on-demand.
:code:`<br>`:code:`<br>` You can set the maxPrice to -1 to indicate that the Azure Spot VM/VMSS
should not be evicted for price reasons. Also, the default max price is -1 if it is not
provided by you. :code:`<br>`:code:`<br>`Minimum api-version: 2019-03-01.
:vartype max_price: float
"""
_attribute_map = {
'max_price': {'key': 'maxPrice', 'type': 'float'},
}
def __init__(
self,
*,
max_price: Optional[float] = None,
**kwargs
):
"""
:keyword max_price: Specifies the maximum price you are willing to pay for a Azure Spot
VM/VMSS. This price is in US Dollars. :code:`<br>`:code:`<br>` This price will be compared with
the current Azure Spot price for the VM size. Also, the prices are compared at the time of
create/update of Azure Spot VM/VMSS and the operation will only succeed if the maxPrice is
greater than the current Azure Spot price. :code:`<br>`:code:`<br>` The maxPrice will also be
used for evicting a Azure Spot VM/VMSS if the current Azure Spot price goes beyond the maxPrice
after creation of VM/VMSS. :code:`<br>`:code:`<br>` Possible values are:
:code:`<br>`:code:`<br>` - Any decimal value greater than zero. Example: 0.01538
:code:`<br>`:code:`<br>` -1 – indicates default price to be up-to on-demand.
:code:`<br>`:code:`<br>` You can set the maxPrice to -1 to indicate that the Azure Spot VM/VMSS
should not be evicted for price reasons. Also, the default max price is -1 if it is not
provided by you. :code:`<br>`:code:`<br>`Minimum api-version: 2019-03-01.
:paramtype max_price: float
"""
super(BillingProfile, self).__init__(**kwargs)
self.max_price = max_price
class BootDiagnostics(msrest.serialization.Model):
"""Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status. :code:`<br>`:code:`<br>` You can easily view the output of your console log. :code:`<br>`:code:`<br>` Azure also enables you to see a screenshot of the VM from the hypervisor.
:ivar enabled: Whether boot diagnostics should be enabled on the Virtual Machine.
:vartype enabled: bool
:ivar storage_uri: Uri of the storage account to use for placing the console output and
screenshot. :code:`<br>`:code:`<br>`If storageUri is not specified while enabling boot
diagnostics, managed storage will be used.
:vartype storage_uri: str
"""
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'storage_uri': {'key': 'storageUri', 'type': 'str'},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
storage_uri: Optional[str] = None,
**kwargs
):
"""
:keyword enabled: Whether boot diagnostics should be enabled on the Virtual Machine.
:paramtype enabled: bool
:keyword storage_uri: Uri of the storage account to use for placing the console output and
screenshot. :code:`<br>`:code:`<br>`If storageUri is not specified while enabling boot
diagnostics, managed storage will be used.
:paramtype storage_uri: str
"""
super(BootDiagnostics, self).__init__(**kwargs)
self.enabled = enabled
self.storage_uri = storage_uri
class BootDiagnosticsInstanceView(msrest.serialization.Model):
"""The instance view of a virtual machine boot diagnostics.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar console_screenshot_blob_uri: The console screenshot blob URI.
:code:`<br>`:code:`<br>`NOTE: This will **not** be set if boot diagnostics is currently enabled
with managed storage.
:vartype console_screenshot_blob_uri: str
:ivar serial_console_log_blob_uri: The serial console log blob Uri.
:code:`<br>`:code:`<br>`NOTE: This will **not** be set if boot diagnostics is currently enabled
with managed storage.
:vartype serial_console_log_blob_uri: str
:ivar status: The boot diagnostics status information for the VM. :code:`<br>`:code:`<br>`
NOTE: It will be set only if there are errors encountered in enabling boot diagnostics.
:vartype status: ~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus
"""
_validation = {
'console_screenshot_blob_uri': {'readonly': True},
'serial_console_log_blob_uri': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'console_screenshot_blob_uri': {'key': 'consoleScreenshotBlobUri', 'type': 'str'},
'serial_console_log_blob_uri': {'key': 'serialConsoleLogBlobUri', 'type': 'str'},
'status': {'key': 'status', 'type': 'InstanceViewStatus'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(BootDiagnosticsInstanceView, self).__init__(**kwargs)
self.console_screenshot_blob_uri = None
self.serial_console_log_blob_uri = None
self.status = None
class CapacityReservation(Resource):
"""Specifies information about the capacity reservation.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Required. Resource location.
:vartype location: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar sku: Required. SKU of the resource for which capacity needs be reserved. The SKU name and
capacity is required to be set. Currently VM Skus with the capability called
'CapacityReservationSupported' set to true are supported. Refer to List Microsoft.Compute SKUs
in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) for supported
values.
:vartype sku: ~azure.mgmt.compute.v2022_03_01.models.Sku
:ivar zones: Availability Zone to use for this capacity reservation. The zone has to be single
value and also should be part for the list of zones specified during the capacity reservation
group creation. The zone can be assigned only during creation. If not provided, the reservation
supports only non-zonal deployments. If provided, enforces VM/VMSS using this capacity
reservation to be in same zone.
:vartype zones: list[str]
:ivar reservation_id: A unique id generated and assigned to the capacity reservation by the
platform which does not change throughout the lifetime of the resource.
:vartype reservation_id: str
:ivar virtual_machines_associated: A list of all virtual machine resource ids that are
associated with the capacity reservation.
:vartype virtual_machines_associated:
list[~azure.mgmt.compute.v2022_03_01.models.SubResourceReadOnly]
:ivar provisioning_time: The date time when the capacity reservation was last updated.
:vartype provisioning_time: ~datetime.datetime
:ivar provisioning_state: The provisioning state, which only appears in the response.
:vartype provisioning_state: str
:ivar instance_view: The Capacity reservation instance view.
:vartype instance_view: ~azure.mgmt.compute.v2022_03_01.models.CapacityReservationInstanceView
:ivar time_created: Specifies the time at which the Capacity Reservation resource was
created.:code:`<br>`:code:`<br>`Minimum api-version: 2022-03-01.
:vartype time_created: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'sku': {'required': True},
'reservation_id': {'readonly': True},
'virtual_machines_associated': {'readonly': True},
'provisioning_time': {'readonly': True},
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
'time_created': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'zones': {'key': 'zones', 'type': '[str]'},
'reservation_id': {'key': 'properties.reservationId', 'type': 'str'},
'virtual_machines_associated': {'key': 'properties.virtualMachinesAssociated', 'type': '[SubResourceReadOnly]'},
'provisioning_time': {'key': 'properties.provisioningTime', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'CapacityReservationInstanceView'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
}
def __init__(
self,
*,
location: str,
sku: "Sku",
tags: Optional[Dict[str, str]] = None,
zones: Optional[List[str]] = None,
**kwargs
):
"""
:keyword location: Required. Resource location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword sku: Required. SKU of the resource for which capacity needs be reserved. The SKU name
and capacity is required to be set. Currently VM Skus with the capability called
'CapacityReservationSupported' set to true are supported. Refer to List Microsoft.Compute SKUs
in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) for supported
values.
:paramtype sku: ~azure.mgmt.compute.v2022_03_01.models.Sku
:keyword zones: Availability Zone to use for this capacity reservation. The zone has to be
single value and also should be part for the list of zones specified during the capacity
reservation group creation. The zone can be assigned only during creation. If not provided, the
reservation supports only non-zonal deployments. If provided, enforces VM/VMSS using this
capacity reservation to be in same zone.
:paramtype zones: list[str]
"""
super(CapacityReservation, self).__init__(location=location, tags=tags, **kwargs)
self.sku = sku
self.zones = zones
self.reservation_id = None
self.virtual_machines_associated = None
self.provisioning_time = None
self.provisioning_state = None
self.instance_view = None
self.time_created = None
class CapacityReservationGroup(Resource):
"""Specifies information about the capacity reservation group that the capacity reservations should be assigned to. :code:`<br>`:code:`<br>` Currently, a capacity reservation can only be added to a capacity reservation group at creation time. An existing capacity reservation cannot be added or moved to another capacity reservation group.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Required. Resource location.
:vartype location: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar zones: Availability Zones to use for this capacity reservation group. The zones can be
assigned only during creation. If not provided, the group supports only regional resources in
the region. If provided, enforces each capacity reservation in the group to be in one of the
zones.
:vartype zones: list[str]
:ivar capacity_reservations: A list of all capacity reservation resource ids that belong to
capacity reservation group.
:vartype capacity_reservations:
list[~azure.mgmt.compute.v2022_03_01.models.SubResourceReadOnly]
:ivar virtual_machines_associated: A list of references to all virtual machines associated to
the capacity reservation group.
:vartype virtual_machines_associated:
list[~azure.mgmt.compute.v2022_03_01.models.SubResourceReadOnly]
:ivar instance_view: The capacity reservation group instance view which has the list of
instance views for all the capacity reservations that belong to the capacity reservation group.
:vartype instance_view:
~azure.mgmt.compute.v2022_03_01.models.CapacityReservationGroupInstanceView
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'capacity_reservations': {'readonly': True},
'virtual_machines_associated': {'readonly': True},
'instance_view': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'zones': {'key': 'zones', 'type': '[str]'},
'capacity_reservations': {'key': 'properties.capacityReservations', 'type': '[SubResourceReadOnly]'},
'virtual_machines_associated': {'key': 'properties.virtualMachinesAssociated', 'type': '[SubResourceReadOnly]'},
'instance_view': {'key': 'properties.instanceView', 'type': 'CapacityReservationGroupInstanceView'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
zones: Optional[List[str]] = None,
**kwargs
):
"""
:keyword location: Required. Resource location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword zones: Availability Zones to use for this capacity reservation group. The zones can be
assigned only during creation. If not provided, the group supports only regional resources in
the region. If provided, enforces each capacity reservation in the group to be in one of the
zones.
:paramtype zones: list[str]
"""
super(CapacityReservationGroup, self).__init__(location=location, tags=tags, **kwargs)
self.zones = zones
self.capacity_reservations = None
self.virtual_machines_associated = None
self.instance_view = None
class CapacityReservationGroupInstanceView(msrest.serialization.Model):
"""CapacityReservationGroupInstanceView.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar capacity_reservations: List of instance view of the capacity reservations under the
capacity reservation group.
:vartype capacity_reservations:
list[~azure.mgmt.compute.v2022_03_01.models.CapacityReservationInstanceViewWithName]
"""
_validation = {
'capacity_reservations': {'readonly': True},
}
_attribute_map = {
'capacity_reservations': {'key': 'capacityReservations', 'type': '[CapacityReservationInstanceViewWithName]'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(CapacityReservationGroupInstanceView, self).__init__(**kwargs)
self.capacity_reservations = None
class CapacityReservationGroupListResult(msrest.serialization.Model):
"""The List capacity reservation group with resource group response.
All required parameters must be populated in order to send to Azure.
:ivar value: Required. The list of capacity reservation groups.
:vartype value: list[~azure.mgmt.compute.v2022_03_01.models.CapacityReservationGroup]
:ivar next_link: The URI to fetch the next page of capacity reservation groups. Call ListNext()
with this URI to fetch the next page of capacity reservation groups.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[CapacityReservationGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["CapacityReservationGroup"],
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Required. The list of capacity reservation groups.
:paramtype value: list[~azure.mgmt.compute.v2022_03_01.models.CapacityReservationGroup]
:keyword next_link: The URI to fetch the next page of capacity reservation groups. Call
ListNext() with this URI to fetch the next page of capacity reservation groups.
:paramtype next_link: str
"""
super(CapacityReservationGroupListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class CapacityReservationGroupUpdate(UpdateResource):
"""Specifies information about the capacity reservation group. Only tags can be updated.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar capacity_reservations: A list of all capacity reservation resource ids that belong to
capacity reservation group.
:vartype capacity_reservations:
list[~azure.mgmt.compute.v2022_03_01.models.SubResourceReadOnly]
:ivar virtual_machines_associated: A list of references to all virtual machines associated to
the capacity reservation group.
:vartype virtual_machines_associated:
list[~azure.mgmt.compute.v2022_03_01.models.SubResourceReadOnly]
:ivar instance_view: The capacity reservation group instance view which has the list of
instance views for all the capacity reservations that belong to the capacity reservation group.
:vartype instance_view:
~azure.mgmt.compute.v2022_03_01.models.CapacityReservationGroupInstanceView
"""
_validation = {
'capacity_reservations': {'readonly': True},
'virtual_machines_associated': {'readonly': True},
'instance_view': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'capacity_reservations': {'key': 'properties.capacityReservations', 'type': '[SubResourceReadOnly]'},
'virtual_machines_associated': {'key': 'properties.virtualMachinesAssociated', 'type': '[SubResourceReadOnly]'},
'instance_view': {'key': 'properties.instanceView', 'type': 'CapacityReservationGroupInstanceView'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
"""
super(CapacityReservationGroupUpdate, self).__init__(tags=tags, **kwargs)
self.capacity_reservations = None
self.virtual_machines_associated = None
self.instance_view = None
class CapacityReservationInstanceView(msrest.serialization.Model):
"""The instance view of a capacity reservation that provides as snapshot of the runtime properties of the capacity reservation that is managed by the platform and can change outside of control plane operations.
:ivar utilization_info: Unutilized capacity of the capacity reservation.
:vartype utilization_info:
~azure.mgmt.compute.v2022_03_01.models.CapacityReservationUtilization
:ivar statuses: The resource status information.
:vartype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
"""
_attribute_map = {
'utilization_info': {'key': 'utilizationInfo', 'type': 'CapacityReservationUtilization'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
utilization_info: Optional["CapacityReservationUtilization"] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
"""
:keyword utilization_info: Unutilized capacity of the capacity reservation.
:paramtype utilization_info:
~azure.mgmt.compute.v2022_03_01.models.CapacityReservationUtilization
:keyword statuses: The resource status information.
:paramtype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
"""
super(CapacityReservationInstanceView, self).__init__(**kwargs)
self.utilization_info = utilization_info
self.statuses = statuses
class CapacityReservationInstanceViewWithName(CapacityReservationInstanceView):
"""The instance view of a capacity reservation that includes the name of the capacity reservation. It is used for the response to the instance view of a capacity reservation group.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar utilization_info: Unutilized capacity of the capacity reservation.
:vartype utilization_info:
~azure.mgmt.compute.v2022_03_01.models.CapacityReservationUtilization
:ivar statuses: The resource status information.
:vartype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
:ivar name: The name of the capacity reservation.
:vartype name: str
"""
_validation = {
'name': {'readonly': True},
}
_attribute_map = {
'utilization_info': {'key': 'utilizationInfo', 'type': 'CapacityReservationUtilization'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
utilization_info: Optional["CapacityReservationUtilization"] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
"""
:keyword utilization_info: Unutilized capacity of the capacity reservation.
:paramtype utilization_info:
~azure.mgmt.compute.v2022_03_01.models.CapacityReservationUtilization
:keyword statuses: The resource status information.
:paramtype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
"""
super(CapacityReservationInstanceViewWithName, self).__init__(utilization_info=utilization_info, statuses=statuses, **kwargs)
self.name = None
class CapacityReservationListResult(msrest.serialization.Model):
"""The list capacity reservation operation response.
All required parameters must be populated in order to send to Azure.
:ivar value: Required. The list of capacity reservations.
:vartype value: list[~azure.mgmt.compute.v2022_03_01.models.CapacityReservation]
:ivar next_link: The URI to fetch the next page of capacity reservations. Call ListNext() with
this URI to fetch the next page of capacity reservations.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[CapacityReservation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["CapacityReservation"],
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Required. The list of capacity reservations.
:paramtype value: list[~azure.mgmt.compute.v2022_03_01.models.CapacityReservation]
:keyword next_link: The URI to fetch the next page of capacity reservations. Call ListNext()
with this URI to fetch the next page of capacity reservations.
:paramtype next_link: str
"""
super(CapacityReservationListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class CapacityReservationProfile(msrest.serialization.Model):
"""The parameters of a capacity reservation Profile.
:ivar capacity_reservation_group: Specifies the capacity reservation group resource id that
should be used for allocating the virtual machine or scaleset vm instances provided enough
capacity has been reserved. Please refer to https://aka.ms/CapacityReservation for more
details.
:vartype capacity_reservation_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
"""
_attribute_map = {
'capacity_reservation_group': {'key': 'capacityReservationGroup', 'type': 'SubResource'},
}
def __init__(
self,
*,
capacity_reservation_group: Optional["SubResource"] = None,
**kwargs
):
"""
:keyword capacity_reservation_group: Specifies the capacity reservation group resource id that
should be used for allocating the virtual machine or scaleset vm instances provided enough
capacity has been reserved. Please refer to https://aka.ms/CapacityReservation for more
details.
:paramtype capacity_reservation_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
"""
super(CapacityReservationProfile, self).__init__(**kwargs)
self.capacity_reservation_group = capacity_reservation_group
class CapacityReservationUpdate(UpdateResource):
"""Specifies information about the capacity reservation. Only tags and sku.capacity can be updated.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar sku: SKU of the resource for which capacity needs be reserved. The SKU name and capacity
is required to be set. Currently VM Skus with the capability called
'CapacityReservationSupported' set to true are supported. Refer to List Microsoft.Compute SKUs
in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) for supported
values.
:vartype sku: ~azure.mgmt.compute.v2022_03_01.models.Sku
:ivar reservation_id: A unique id generated and assigned to the capacity reservation by the
platform which does not change throughout the lifetime of the resource.
:vartype reservation_id: str
:ivar virtual_machines_associated: A list of all virtual machine resource ids that are
associated with the capacity reservation.
:vartype virtual_machines_associated:
list[~azure.mgmt.compute.v2022_03_01.models.SubResourceReadOnly]
:ivar provisioning_time: The date time when the capacity reservation was last updated.
:vartype provisioning_time: ~datetime.datetime
:ivar provisioning_state: The provisioning state, which only appears in the response.
:vartype provisioning_state: str
:ivar instance_view: The Capacity reservation instance view.
:vartype instance_view: ~azure.mgmt.compute.v2022_03_01.models.CapacityReservationInstanceView
:ivar time_created: Specifies the time at which the Capacity Reservation resource was
created.:code:`<br>`:code:`<br>`Minimum api-version: 2022-03-01.
:vartype time_created: ~datetime.datetime
"""
_validation = {
'reservation_id': {'readonly': True},
'virtual_machines_associated': {'readonly': True},
'provisioning_time': {'readonly': True},
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
'time_created': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'reservation_id': {'key': 'properties.reservationId', 'type': 'str'},
'virtual_machines_associated': {'key': 'properties.virtualMachinesAssociated', 'type': '[SubResourceReadOnly]'},
'provisioning_time': {'key': 'properties.provisioningTime', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'CapacityReservationInstanceView'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword sku: SKU of the resource for which capacity needs be reserved. The SKU name and
capacity is required to be set. Currently VM Skus with the capability called
'CapacityReservationSupported' set to true are supported. Refer to List Microsoft.Compute SKUs
in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) for supported
values.
:paramtype sku: ~azure.mgmt.compute.v2022_03_01.models.Sku
"""
super(CapacityReservationUpdate, self).__init__(tags=tags, **kwargs)
self.sku = sku
self.reservation_id = None
self.virtual_machines_associated = None
self.provisioning_time = None
self.provisioning_state = None
self.instance_view = None
self.time_created = None
class CapacityReservationUtilization(msrest.serialization.Model):
"""Represents the capacity reservation utilization in terms of resources allocated.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar virtual_machines_allocated: A list of all virtual machines resource ids allocated against
the capacity reservation.
:vartype virtual_machines_allocated:
list[~azure.mgmt.compute.v2022_03_01.models.SubResourceReadOnly]
"""
_validation = {
'virtual_machines_allocated': {'readonly': True},
}
_attribute_map = {
'virtual_machines_allocated': {'key': 'virtualMachinesAllocated', 'type': '[SubResourceReadOnly]'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(CapacityReservationUtilization, self).__init__(**kwargs)
self.virtual_machines_allocated = None
class ComputeOperationListResult(msrest.serialization.Model):
"""The List Compute Operation operation response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of compute operations.
:vartype value: list[~azure.mgmt.compute.v2022_03_01.models.ComputeOperationValue]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ComputeOperationValue]'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ComputeOperationListResult, self).__init__(**kwargs)
self.value = None
class ComputeOperationValue(msrest.serialization.Model):
"""Describes the properties of a Compute Operation value.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar origin: The origin of the compute operation.
:vartype origin: str
:ivar name: The name of the compute operation.
:vartype name: str
:ivar operation: The display name of the compute operation.
:vartype operation: str
:ivar resource: The display name of the resource the operation applies to.
:vartype resource: str
:ivar description: The description of the operation.
:vartype description: str
:ivar provider: The resource provider for the operation.
:vartype provider: str
"""
_validation = {
'origin': {'readonly': True},
'name': {'readonly': True},
'operation': {'readonly': True},
'resource': {'readonly': True},
'description': {'readonly': True},
'provider': {'readonly': True},
}
_attribute_map = {
'origin': {'key': 'origin', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'operation': {'key': 'display.operation', 'type': 'str'},
'resource': {'key': 'display.resource', 'type': 'str'},
'description': {'key': 'display.description', 'type': 'str'},
'provider': {'key': 'display.provider', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ComputeOperationValue, self).__init__(**kwargs)
self.origin = None
self.name = None
self.operation = None
self.resource = None
self.description = None
self.provider = None
class DataDisk(msrest.serialization.Model):
"""Describes a data disk.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar lun: Required. Specifies the logical unit number of the data disk. This value is used to
identify data disks within the VM and therefore must be unique for each data disk attached to a
VM.
:vartype lun: int
:ivar name: The disk name.
:vartype name: str
:ivar vhd: The virtual hard disk.
:vartype vhd: ~azure.mgmt.compute.v2022_03_01.models.VirtualHardDisk
:ivar image: The source user image virtual hard disk. The virtual hard disk will be copied
before being attached to the virtual machine. If SourceImage is provided, the destination
virtual hard drive must not exist.
:vartype image: ~azure.mgmt.compute.v2022_03_01.models.VirtualHardDisk
:ivar caching: Specifies the caching requirements. :code:`<br>`:code:`<br>` Possible values
are: :code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **ReadOnly**
:code:`<br>`:code:`<br>` **ReadWrite** :code:`<br>`:code:`<br>` Default: **None for Standard
storage. ReadOnly for Premium storage**. Possible values include: "None", "ReadOnly",
"ReadWrite".
:vartype caching: str or ~azure.mgmt.compute.v2022_03_01.models.CachingTypes
:ivar write_accelerator_enabled: Specifies whether writeAccelerator should be enabled or
disabled on the disk.
:vartype write_accelerator_enabled: bool
:ivar create_option: Required. Specifies how the virtual machine should be
created.:code:`<br>`:code:`<br>` Possible values are::code:`<br>`:code:`<br>` **Attach** \u2013
This value is used when you are using a specialized disk to create the virtual
machine.:code:`<br>`:code:`<br>` **FromImage** \u2013 This value is used when you are using an
image to create the virtual machine. If you are using a platform image, you also use the
imageReference element described above. If you are using a marketplace image, you also use the
plan element previously described. Possible values include: "FromImage", "Empty", "Attach".
:vartype create_option: str or ~azure.mgmt.compute.v2022_03_01.models.DiskCreateOptionTypes
:ivar disk_size_gb: Specifies the size of an empty data disk in gigabytes. This element can be
used to overwrite the size of the disk in a virtual machine image. :code:`<br>`:code:`<br>`
This value cannot be larger than 1023 GB.
:vartype disk_size_gb: int
:ivar managed_disk: The managed disk parameters.
:vartype managed_disk: ~azure.mgmt.compute.v2022_03_01.models.ManagedDiskParameters
:ivar to_be_detached: Specifies whether the data disk is in process of detachment from the
VirtualMachine/VirtualMachineScaleset.
:vartype to_be_detached: bool
:ivar disk_iops_read_write: Specifies the Read-Write IOPS for the managed disk when
StorageAccountType is UltraSSD_LRS. Returned only for VirtualMachine ScaleSet VM disks. Can be
updated only via updates to the VirtualMachine Scale Set.
:vartype disk_iops_read_write: long
:ivar disk_m_bps_read_write: Specifies the bandwidth in MB per second for the managed disk when
StorageAccountType is UltraSSD_LRS. Returned only for VirtualMachine ScaleSet VM disks. Can be
updated only via updates to the VirtualMachine Scale Set.
:vartype disk_m_bps_read_write: long
:ivar detach_option: Specifies the detach behavior to be used while detaching a disk or which
is already in the process of detachment from the virtual machine. Supported values:
**ForceDetach**. :code:`<br>`:code:`<br>` detachOption: **ForceDetach** is applicable only for
managed data disks. If a previous detachment attempt of the data disk did not complete due to
an unexpected failure from the virtual machine and the disk is still not released then use
force-detach as a last resort option to detach the disk forcibly from the VM. All writes might
not have been flushed when using this detach behavior. :code:`<br>`:code:`<br>` This feature is
still in preview mode and is not supported for VirtualMachineScaleSet. To force-detach a data
disk update toBeDetached to 'true' along with setting detachOption: 'ForceDetach'. Possible
values include: "ForceDetach".
:vartype detach_option: str or ~azure.mgmt.compute.v2022_03_01.models.DiskDetachOptionTypes
:ivar delete_option: Specifies whether data disk should be deleted or detached upon VM
deletion.:code:`<br>`:code:`<br>` Possible values: :code:`<br>`:code:`<br>` **Delete** If this
value is used, the data disk is deleted when VM is deleted.:code:`<br>`:code:`<br>` **Detach**
If this value is used, the data disk is retained after VM is deleted.:code:`<br>`:code:`<br>`
The default value is set to **detach**. Possible values include: "Delete", "Detach".
:vartype delete_option: str or ~azure.mgmt.compute.v2022_03_01.models.DiskDeleteOptionTypes
"""
_validation = {
'lun': {'required': True},
'create_option': {'required': True},
'disk_iops_read_write': {'readonly': True},
'disk_m_bps_read_write': {'readonly': True},
}
_attribute_map = {
'lun': {'key': 'lun', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'vhd': {'key': 'vhd', 'type': 'VirtualHardDisk'},
'image': {'key': 'image', 'type': 'VirtualHardDisk'},
'caching': {'key': 'caching', 'type': 'str'},
'write_accelerator_enabled': {'key': 'writeAcceleratorEnabled', 'type': 'bool'},
'create_option': {'key': 'createOption', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'managed_disk': {'key': 'managedDisk', 'type': 'ManagedDiskParameters'},
'to_be_detached': {'key': 'toBeDetached', 'type': 'bool'},
'disk_iops_read_write': {'key': 'diskIOPSReadWrite', 'type': 'long'},
'disk_m_bps_read_write': {'key': 'diskMBpsReadWrite', 'type': 'long'},
'detach_option': {'key': 'detachOption', 'type': 'str'},
'delete_option': {'key': 'deleteOption', 'type': 'str'},
}
def __init__(
self,
*,
lun: int,
create_option: Union[str, "DiskCreateOptionTypes"],
name: Optional[str] = None,
vhd: Optional["VirtualHardDisk"] = None,
image: Optional["VirtualHardDisk"] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
write_accelerator_enabled: Optional[bool] = None,
disk_size_gb: Optional[int] = None,
managed_disk: Optional["ManagedDiskParameters"] = None,
to_be_detached: Optional[bool] = None,
detach_option: Optional[Union[str, "DiskDetachOptionTypes"]] = None,
delete_option: Optional[Union[str, "DiskDeleteOptionTypes"]] = None,
**kwargs
):
"""
:keyword lun: Required. Specifies the logical unit number of the data disk. This value is used
to identify data disks within the VM and therefore must be unique for each data disk attached
to a VM.
:paramtype lun: int
:keyword name: The disk name.
:paramtype name: str
:keyword vhd: The virtual hard disk.
:paramtype vhd: ~azure.mgmt.compute.v2022_03_01.models.VirtualHardDisk
:keyword image: The source user image virtual hard disk. The virtual hard disk will be copied
before being attached to the virtual machine. If SourceImage is provided, the destination
virtual hard drive must not exist.
:paramtype image: ~azure.mgmt.compute.v2022_03_01.models.VirtualHardDisk
:keyword caching: Specifies the caching requirements. :code:`<br>`:code:`<br>` Possible values
are: :code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **ReadOnly**
:code:`<br>`:code:`<br>` **ReadWrite** :code:`<br>`:code:`<br>` Default: **None for Standard
storage. ReadOnly for Premium storage**. Possible values include: "None", "ReadOnly",
"ReadWrite".
:paramtype caching: str or ~azure.mgmt.compute.v2022_03_01.models.CachingTypes
:keyword write_accelerator_enabled: Specifies whether writeAccelerator should be enabled or
disabled on the disk.
:paramtype write_accelerator_enabled: bool
:keyword create_option: Required. Specifies how the virtual machine should be
created.:code:`<br>`:code:`<br>` Possible values are::code:`<br>`:code:`<br>` **Attach** \u2013
This value is used when you are using a specialized disk to create the virtual
machine.:code:`<br>`:code:`<br>` **FromImage** \u2013 This value is used when you are using an
image to create the virtual machine. If you are using a platform image, you also use the
imageReference element described above. If you are using a marketplace image, you also use the
plan element previously described. Possible values include: "FromImage", "Empty", "Attach".
:paramtype create_option: str or ~azure.mgmt.compute.v2022_03_01.models.DiskCreateOptionTypes
:keyword disk_size_gb: Specifies the size of an empty data disk in gigabytes. This element can
be used to overwrite the size of the disk in a virtual machine image. :code:`<br>`:code:`<br>`
This value cannot be larger than 1023 GB.
:paramtype disk_size_gb: int
:keyword managed_disk: The managed disk parameters.
:paramtype managed_disk: ~azure.mgmt.compute.v2022_03_01.models.ManagedDiskParameters
:keyword to_be_detached: Specifies whether the data disk is in process of detachment from the
VirtualMachine/VirtualMachineScaleset.
:paramtype to_be_detached: bool
:keyword detach_option: Specifies the detach behavior to be used while detaching a disk or
which is already in the process of detachment from the virtual machine. Supported values:
**ForceDetach**. :code:`<br>`:code:`<br>` detachOption: **ForceDetach** is applicable only for
managed data disks. If a previous detachment attempt of the data disk did not complete due to
an unexpected failure from the virtual machine and the disk is still not released then use
force-detach as a last resort option to detach the disk forcibly from the VM. All writes might
not have been flushed when using this detach behavior. :code:`<br>`:code:`<br>` This feature is
still in preview mode and is not supported for VirtualMachineScaleSet. To force-detach a data
disk update toBeDetached to 'true' along with setting detachOption: 'ForceDetach'. Possible
values include: "ForceDetach".
:paramtype detach_option: str or ~azure.mgmt.compute.v2022_03_01.models.DiskDetachOptionTypes
:keyword delete_option: Specifies whether data disk should be deleted or detached upon VM
deletion.:code:`<br>`:code:`<br>` Possible values: :code:`<br>`:code:`<br>` **Delete** If this
value is used, the data disk is deleted when VM is deleted.:code:`<br>`:code:`<br>` **Detach**
If this value is used, the data disk is retained after VM is deleted.:code:`<br>`:code:`<br>`
The default value is set to **detach**. Possible values include: "Delete", "Detach".
:paramtype delete_option: str or ~azure.mgmt.compute.v2022_03_01.models.DiskDeleteOptionTypes
"""
super(DataDisk, self).__init__(**kwargs)
self.lun = lun
self.name = name
self.vhd = vhd
self.image = image
self.caching = caching
self.write_accelerator_enabled = write_accelerator_enabled
self.create_option = create_option
self.disk_size_gb = disk_size_gb
self.managed_disk = managed_disk
self.to_be_detached = to_be_detached
self.disk_iops_read_write = None
self.disk_m_bps_read_write = None
self.detach_option = detach_option
self.delete_option = delete_option
class DataDiskImage(msrest.serialization.Model):
"""Contains the data disk images information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar lun: Specifies the logical unit number of the data disk. This value is used to identify
data disks within the VM and therefore must be unique for each data disk attached to a VM.
:vartype lun: int
"""
_validation = {
'lun': {'readonly': True},
}
_attribute_map = {
'lun': {'key': 'lun', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(DataDiskImage, self).__init__(**kwargs)
self.lun = None
class DedicatedHost(Resource):
"""Specifies information about the Dedicated host.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Required. Resource location.
:vartype location: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar sku: Required. SKU of the dedicated host for Hardware Generation and VM family. Only name
is required to be set. List Microsoft.Compute SKUs for a list of possible values.
:vartype sku: ~azure.mgmt.compute.v2022_03_01.models.Sku
:ivar platform_fault_domain: Fault domain of the dedicated host within a dedicated host group.
:vartype platform_fault_domain: int
:ivar auto_replace_on_failure: Specifies whether the dedicated host should be replaced
automatically in case of a failure. The value is defaulted to 'true' when not provided.
:vartype auto_replace_on_failure: bool
:ivar host_id: A unique id generated and assigned to the dedicated host by the platform.
:code:`<br>`:code:`<br>` Does not change throughout the lifetime of the host.
:vartype host_id: str
:ivar virtual_machines: A list of references to all virtual machines in the Dedicated Host.
:vartype virtual_machines: list[~azure.mgmt.compute.v2022_03_01.models.SubResourceReadOnly]
:ivar license_type: Specifies the software license type that will be applied to the VMs
deployed on the dedicated host. :code:`<br>`:code:`<br>` Possible values are:
:code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **Windows_Server_Hybrid**
:code:`<br>`:code:`<br>` **Windows_Server_Perpetual** :code:`<br>`:code:`<br>` Default:
**None**. Possible values include: "None", "Windows_Server_Hybrid", "Windows_Server_Perpetual".
:vartype license_type: str or ~azure.mgmt.compute.v2022_03_01.models.DedicatedHostLicenseTypes
:ivar provisioning_time: The date when the host was first provisioned.
:vartype provisioning_time: ~datetime.datetime
:ivar provisioning_state: The provisioning state, which only appears in the response.
:vartype provisioning_state: str
:ivar instance_view: The dedicated host instance view.
:vartype instance_view: ~azure.mgmt.compute.v2022_03_01.models.DedicatedHostInstanceView
:ivar time_created: Specifies the time at which the Dedicated Host resource was
created.:code:`<br>`:code:`<br>`Minimum api-version: 2022-03-01.
:vartype time_created: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'sku': {'required': True},
'platform_fault_domain': {'minimum': 0},
'host_id': {'readonly': True},
'virtual_machines': {'readonly': True},
'provisioning_time': {'readonly': True},
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
'time_created': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'platform_fault_domain': {'key': 'properties.platformFaultDomain', 'type': 'int'},
'auto_replace_on_failure': {'key': 'properties.autoReplaceOnFailure', 'type': 'bool'},
'host_id': {'key': 'properties.hostId', 'type': 'str'},
'virtual_machines': {'key': 'properties.virtualMachines', 'type': '[SubResourceReadOnly]'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'provisioning_time': {'key': 'properties.provisioningTime', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'DedicatedHostInstanceView'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
}
def __init__(
self,
*,
location: str,
sku: "Sku",
tags: Optional[Dict[str, str]] = None,
platform_fault_domain: Optional[int] = None,
auto_replace_on_failure: Optional[bool] = None,
license_type: Optional[Union[str, "DedicatedHostLicenseTypes"]] = None,
**kwargs
):
"""
:keyword location: Required. Resource location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword sku: Required. SKU of the dedicated host for Hardware Generation and VM family. Only
name is required to be set. List Microsoft.Compute SKUs for a list of possible values.
:paramtype sku: ~azure.mgmt.compute.v2022_03_01.models.Sku
:keyword platform_fault_domain: Fault domain of the dedicated host within a dedicated host
group.
:paramtype platform_fault_domain: int
:keyword auto_replace_on_failure: Specifies whether the dedicated host should be replaced
automatically in case of a failure. The value is defaulted to 'true' when not provided.
:paramtype auto_replace_on_failure: bool
:keyword license_type: Specifies the software license type that will be applied to the VMs
deployed on the dedicated host. :code:`<br>`:code:`<br>` Possible values are:
:code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **Windows_Server_Hybrid**
:code:`<br>`:code:`<br>` **Windows_Server_Perpetual** :code:`<br>`:code:`<br>` Default:
**None**. Possible values include: "None", "Windows_Server_Hybrid", "Windows_Server_Perpetual".
:paramtype license_type: str or
~azure.mgmt.compute.v2022_03_01.models.DedicatedHostLicenseTypes
"""
super(DedicatedHost, self).__init__(location=location, tags=tags, **kwargs)
self.sku = sku
self.platform_fault_domain = platform_fault_domain
self.auto_replace_on_failure = auto_replace_on_failure
self.host_id = None
self.virtual_machines = None
self.license_type = license_type
self.provisioning_time = None
self.provisioning_state = None
self.instance_view = None
self.time_created = None
class DedicatedHostAllocatableVM(msrest.serialization.Model):
"""Represents the dedicated host unutilized capacity in terms of a specific VM size.
:ivar vm_size: VM size in terms of which the unutilized capacity is represented.
:vartype vm_size: str
:ivar count: Maximum number of VMs of size vmSize that can fit in the dedicated host's
remaining capacity.
:vartype count: float
"""
_attribute_map = {
'vm_size': {'key': 'vmSize', 'type': 'str'},
'count': {'key': 'count', 'type': 'float'},
}
def __init__(
self,
*,
vm_size: Optional[str] = None,
count: Optional[float] = None,
**kwargs
):
"""
:keyword vm_size: VM size in terms of which the unutilized capacity is represented.
:paramtype vm_size: str
:keyword count: Maximum number of VMs of size vmSize that can fit in the dedicated host's
remaining capacity.
:paramtype count: float
"""
super(DedicatedHostAllocatableVM, self).__init__(**kwargs)
self.vm_size = vm_size
self.count = count
class DedicatedHostAvailableCapacity(msrest.serialization.Model):
"""Dedicated host unutilized capacity.
:ivar allocatable_v_ms: The unutilized capacity of the dedicated host represented in terms of
each VM size that is allowed to be deployed to the dedicated host.
:vartype allocatable_v_ms:
list[~azure.mgmt.compute.v2022_03_01.models.DedicatedHostAllocatableVM]
"""
_attribute_map = {
'allocatable_v_ms': {'key': 'allocatableVMs', 'type': '[DedicatedHostAllocatableVM]'},
}
def __init__(
self,
*,
allocatable_v_ms: Optional[List["DedicatedHostAllocatableVM"]] = None,
**kwargs
):
"""
:keyword allocatable_v_ms: The unutilized capacity of the dedicated host represented in terms
of each VM size that is allowed to be deployed to the dedicated host.
:paramtype allocatable_v_ms:
list[~azure.mgmt.compute.v2022_03_01.models.DedicatedHostAllocatableVM]
"""
super(DedicatedHostAvailableCapacity, self).__init__(**kwargs)
self.allocatable_v_ms = allocatable_v_ms
class DedicatedHostGroup(Resource):
"""Specifies information about the dedicated host group that the dedicated hosts should be assigned to. :code:`<br>`:code:`<br>` Currently, a dedicated host can only be added to a dedicated host group at creation time. An existing dedicated host cannot be added to another dedicated host group.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Required. Resource location.
:vartype location: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar zones: Availability Zone to use for this host group. Only single zone is supported. The
zone can be assigned only during creation. If not provided, the group supports all zones in the
region. If provided, enforces each host in the group to be in the same zone.
:vartype zones: list[str]
:ivar platform_fault_domain_count: Number of fault domains that the host group can span.
:vartype platform_fault_domain_count: int
:ivar hosts: A list of references to all dedicated hosts in the dedicated host group.
:vartype hosts: list[~azure.mgmt.compute.v2022_03_01.models.SubResourceReadOnly]
:ivar instance_view: The dedicated host group instance view, which has the list of instance
view of the dedicated hosts under the dedicated host group.
:vartype instance_view: ~azure.mgmt.compute.v2022_03_01.models.DedicatedHostGroupInstanceView
:ivar support_automatic_placement: Specifies whether virtual machines or virtual machine scale
sets can be placed automatically on the dedicated host group. Automatic placement means
resources are allocated on dedicated hosts, that are chosen by Azure, under the dedicated host
group. The value is defaulted to 'false' when not provided. :code:`<br>`:code:`<br>`Minimum
api-version: 2020-06-01.
:vartype support_automatic_placement: bool
:ivar additional_capabilities: Enables or disables a capability on the dedicated host
group.:code:`<br>`:code:`<br>`Minimum api-version: 2022-03-01.
:vartype additional_capabilities:
~azure.mgmt.compute.v2022_03_01.models.DedicatedHostGroupPropertiesAdditionalCapabilities
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'platform_fault_domain_count': {'minimum': 1},
'hosts': {'readonly': True},
'instance_view': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'zones': {'key': 'zones', 'type': '[str]'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'hosts': {'key': 'properties.hosts', 'type': '[SubResourceReadOnly]'},
'instance_view': {'key': 'properties.instanceView', 'type': 'DedicatedHostGroupInstanceView'},
'support_automatic_placement': {'key': 'properties.supportAutomaticPlacement', 'type': 'bool'},
'additional_capabilities': {'key': 'properties.additionalCapabilities', 'type': 'DedicatedHostGroupPropertiesAdditionalCapabilities'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
zones: Optional[List[str]] = None,
platform_fault_domain_count: Optional[int] = None,
support_automatic_placement: Optional[bool] = None,
additional_capabilities: Optional["DedicatedHostGroupPropertiesAdditionalCapabilities"] = None,
**kwargs
):
"""
:keyword location: Required. Resource location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword zones: Availability Zone to use for this host group. Only single zone is supported.
The zone can be assigned only during creation. If not provided, the group supports all zones in
the region. If provided, enforces each host in the group to be in the same zone.
:paramtype zones: list[str]
:keyword platform_fault_domain_count: Number of fault domains that the host group can span.
:paramtype platform_fault_domain_count: int
:keyword support_automatic_placement: Specifies whether virtual machines or virtual machine
scale sets can be placed automatically on the dedicated host group. Automatic placement means
resources are allocated on dedicated hosts, that are chosen by Azure, under the dedicated host
group. The value is defaulted to 'false' when not provided. :code:`<br>`:code:`<br>`Minimum
api-version: 2020-06-01.
:paramtype support_automatic_placement: bool
:keyword additional_capabilities: Enables or disables a capability on the dedicated host
group.:code:`<br>`:code:`<br>`Minimum api-version: 2022-03-01.
:paramtype additional_capabilities:
~azure.mgmt.compute.v2022_03_01.models.DedicatedHostGroupPropertiesAdditionalCapabilities
"""
super(DedicatedHostGroup, self).__init__(location=location, tags=tags, **kwargs)
self.zones = zones
self.platform_fault_domain_count = platform_fault_domain_count
self.hosts = None
self.instance_view = None
self.support_automatic_placement = support_automatic_placement
self.additional_capabilities = additional_capabilities
class DedicatedHostGroupInstanceView(msrest.serialization.Model):
"""DedicatedHostGroupInstanceView.
:ivar hosts: List of instance view of the dedicated hosts under the dedicated host group.
:vartype hosts: list[~azure.mgmt.compute.v2022_03_01.models.DedicatedHostInstanceViewWithName]
"""
_attribute_map = {
'hosts': {'key': 'hosts', 'type': '[DedicatedHostInstanceViewWithName]'},
}
def __init__(
self,
*,
hosts: Optional[List["DedicatedHostInstanceViewWithName"]] = None,
**kwargs
):
"""
:keyword hosts: List of instance view of the dedicated hosts under the dedicated host group.
:paramtype hosts:
list[~azure.mgmt.compute.v2022_03_01.models.DedicatedHostInstanceViewWithName]
"""
super(DedicatedHostGroupInstanceView, self).__init__(**kwargs)
self.hosts = hosts
class DedicatedHostGroupListResult(msrest.serialization.Model):
"""The List Dedicated Host Group with resource group response.
All required parameters must be populated in order to send to Azure.
:ivar value: Required. The list of dedicated host groups.
:vartype value: list[~azure.mgmt.compute.v2022_03_01.models.DedicatedHostGroup]
:ivar next_link: The URI to fetch the next page of Dedicated Host Groups. Call ListNext() with
this URI to fetch the next page of Dedicated Host Groups.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DedicatedHostGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["DedicatedHostGroup"],
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Required. The list of dedicated host groups.
:paramtype value: list[~azure.mgmt.compute.v2022_03_01.models.DedicatedHostGroup]
:keyword next_link: The URI to fetch the next page of Dedicated Host Groups. Call ListNext()
with this URI to fetch the next page of Dedicated Host Groups.
:paramtype next_link: str
"""
super(DedicatedHostGroupListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DedicatedHostGroupPropertiesAdditionalCapabilities(msrest.serialization.Model):
"""Enables or disables a capability on the dedicated host group.:code:`<br>`:code:`<br>`Minimum api-version: 2022-03-01.
:ivar ultra_ssd_enabled: The flag that enables or disables a capability to have UltraSSD
Enabled Virtual Machines on Dedicated Hosts of the Dedicated Host Group. For the Virtual
Machines to be UltraSSD Enabled, UltraSSDEnabled flag for the resource needs to be set true as
well. The value is defaulted to 'false' when not provided. Please refer to
https://docs.microsoft.com/en-us/azure/virtual-machines/disks-enable-ultra-ssd for more details
on Ultra SSD feature. :code:`<br>`:code:`<br>`NOTE: The ultraSSDEnabled setting can only be
enabled for Host Groups that are created as zonal. :code:`<br>`:code:`<br>`Minimum api-version:
2022-03-01.
:vartype ultra_ssd_enabled: bool
"""
_attribute_map = {
'ultra_ssd_enabled': {'key': 'ultraSSDEnabled', 'type': 'bool'},
}
def __init__(
self,
*,
ultra_ssd_enabled: Optional[bool] = None,
**kwargs
):
"""
:keyword ultra_ssd_enabled: The flag that enables or disables a capability to have UltraSSD
Enabled Virtual Machines on Dedicated Hosts of the Dedicated Host Group. For the Virtual
Machines to be UltraSSD Enabled, UltraSSDEnabled flag for the resource needs to be set true as
well. The value is defaulted to 'false' when not provided. Please refer to
https://docs.microsoft.com/en-us/azure/virtual-machines/disks-enable-ultra-ssd for more details
on Ultra SSD feature. :code:`<br>`:code:`<br>`NOTE: The ultraSSDEnabled setting can only be
enabled for Host Groups that are created as zonal. :code:`<br>`:code:`<br>`Minimum api-version:
2022-03-01.
:paramtype ultra_ssd_enabled: bool
"""
super(DedicatedHostGroupPropertiesAdditionalCapabilities, self).__init__(**kwargs)
self.ultra_ssd_enabled = ultra_ssd_enabled
class DedicatedHostGroupUpdate(UpdateResource):
"""Specifies information about the dedicated host group that the dedicated host should be assigned to. Only tags may be updated.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar zones: Availability Zone to use for this host group. Only single zone is supported. The
zone can be assigned only during creation. If not provided, the group supports all zones in the
region. If provided, enforces each host in the group to be in the same zone.
:vartype zones: list[str]
:ivar platform_fault_domain_count: Number of fault domains that the host group can span.
:vartype platform_fault_domain_count: int
:ivar hosts: A list of references to all dedicated hosts in the dedicated host group.
:vartype hosts: list[~azure.mgmt.compute.v2022_03_01.models.SubResourceReadOnly]
:ivar instance_view: The dedicated host group instance view, which has the list of instance
view of the dedicated hosts under the dedicated host group.
:vartype instance_view: ~azure.mgmt.compute.v2022_03_01.models.DedicatedHostGroupInstanceView
:ivar support_automatic_placement: Specifies whether virtual machines or virtual machine scale
sets can be placed automatically on the dedicated host group. Automatic placement means
resources are allocated on dedicated hosts, that are chosen by Azure, under the dedicated host
group. The value is defaulted to 'false' when not provided. :code:`<br>`:code:`<br>`Minimum
api-version: 2020-06-01.
:vartype support_automatic_placement: bool
:ivar additional_capabilities: Enables or disables a capability on the dedicated host
group.:code:`<br>`:code:`<br>`Minimum api-version: 2022-03-01.
:vartype additional_capabilities:
~azure.mgmt.compute.v2022_03_01.models.DedicatedHostGroupPropertiesAdditionalCapabilities
"""
_validation = {
'platform_fault_domain_count': {'minimum': 1},
'hosts': {'readonly': True},
'instance_view': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'zones': {'key': 'zones', 'type': '[str]'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'hosts': {'key': 'properties.hosts', 'type': '[SubResourceReadOnly]'},
'instance_view': {'key': 'properties.instanceView', 'type': 'DedicatedHostGroupInstanceView'},
'support_automatic_placement': {'key': 'properties.supportAutomaticPlacement', 'type': 'bool'},
'additional_capabilities': {'key': 'properties.additionalCapabilities', 'type': 'DedicatedHostGroupPropertiesAdditionalCapabilities'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
zones: Optional[List[str]] = None,
platform_fault_domain_count: Optional[int] = None,
support_automatic_placement: Optional[bool] = None,
additional_capabilities: Optional["DedicatedHostGroupPropertiesAdditionalCapabilities"] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword zones: Availability Zone to use for this host group. Only single zone is supported.
The zone can be assigned only during creation. If not provided, the group supports all zones in
the region. If provided, enforces each host in the group to be in the same zone.
:paramtype zones: list[str]
:keyword platform_fault_domain_count: Number of fault domains that the host group can span.
:paramtype platform_fault_domain_count: int
:keyword support_automatic_placement: Specifies whether virtual machines or virtual machine
scale sets can be placed automatically on the dedicated host group. Automatic placement means
resources are allocated on dedicated hosts, that are chosen by Azure, under the dedicated host
group. The value is defaulted to 'false' when not provided. :code:`<br>`:code:`<br>`Minimum
api-version: 2020-06-01.
:paramtype support_automatic_placement: bool
:keyword additional_capabilities: Enables or disables a capability on the dedicated host
group.:code:`<br>`:code:`<br>`Minimum api-version: 2022-03-01.
:paramtype additional_capabilities:
~azure.mgmt.compute.v2022_03_01.models.DedicatedHostGroupPropertiesAdditionalCapabilities
"""
super(DedicatedHostGroupUpdate, self).__init__(tags=tags, **kwargs)
self.zones = zones
self.platform_fault_domain_count = platform_fault_domain_count
self.hosts = None
self.instance_view = None
self.support_automatic_placement = support_automatic_placement
self.additional_capabilities = additional_capabilities
class DedicatedHostInstanceView(msrest.serialization.Model):
"""The instance view of a dedicated host.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar asset_id: Specifies the unique id of the dedicated physical machine on which the
dedicated host resides.
:vartype asset_id: str
:ivar available_capacity: Unutilized capacity of the dedicated host.
:vartype available_capacity:
~azure.mgmt.compute.v2022_03_01.models.DedicatedHostAvailableCapacity
:ivar statuses: The resource status information.
:vartype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
"""
_validation = {
'asset_id': {'readonly': True},
}
_attribute_map = {
'asset_id': {'key': 'assetId', 'type': 'str'},
'available_capacity': {'key': 'availableCapacity', 'type': 'DedicatedHostAvailableCapacity'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
available_capacity: Optional["DedicatedHostAvailableCapacity"] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
"""
:keyword available_capacity: Unutilized capacity of the dedicated host.
:paramtype available_capacity:
~azure.mgmt.compute.v2022_03_01.models.DedicatedHostAvailableCapacity
:keyword statuses: The resource status information.
:paramtype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
"""
super(DedicatedHostInstanceView, self).__init__(**kwargs)
self.asset_id = None
self.available_capacity = available_capacity
self.statuses = statuses
class DedicatedHostInstanceViewWithName(DedicatedHostInstanceView):
"""The instance view of a dedicated host that includes the name of the dedicated host. It is used for the response to the instance view of a dedicated host group.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar asset_id: Specifies the unique id of the dedicated physical machine on which the
dedicated host resides.
:vartype asset_id: str
:ivar available_capacity: Unutilized capacity of the dedicated host.
:vartype available_capacity:
~azure.mgmt.compute.v2022_03_01.models.DedicatedHostAvailableCapacity
:ivar statuses: The resource status information.
:vartype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
:ivar name: The name of the dedicated host.
:vartype name: str
"""
_validation = {
'asset_id': {'readonly': True},
'name': {'readonly': True},
}
_attribute_map = {
'asset_id': {'key': 'assetId', 'type': 'str'},
'available_capacity': {'key': 'availableCapacity', 'type': 'DedicatedHostAvailableCapacity'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
available_capacity: Optional["DedicatedHostAvailableCapacity"] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
"""
:keyword available_capacity: Unutilized capacity of the dedicated host.
:paramtype available_capacity:
~azure.mgmt.compute.v2022_03_01.models.DedicatedHostAvailableCapacity
:keyword statuses: The resource status information.
:paramtype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
"""
super(DedicatedHostInstanceViewWithName, self).__init__(available_capacity=available_capacity, statuses=statuses, **kwargs)
self.name = None
class DedicatedHostListResult(msrest.serialization.Model):
"""The list dedicated host operation response.
All required parameters must be populated in order to send to Azure.
:ivar value: Required. The list of dedicated hosts.
:vartype value: list[~azure.mgmt.compute.v2022_03_01.models.DedicatedHost]
:ivar next_link: The URI to fetch the next page of dedicated hosts. Call ListNext() with this
URI to fetch the next page of dedicated hosts.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DedicatedHost]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["DedicatedHost"],
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Required. The list of dedicated hosts.
:paramtype value: list[~azure.mgmt.compute.v2022_03_01.models.DedicatedHost]
:keyword next_link: The URI to fetch the next page of dedicated hosts. Call ListNext() with
this URI to fetch the next page of dedicated hosts.
:paramtype next_link: str
"""
super(DedicatedHostListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DedicatedHostUpdate(UpdateResource):
"""Specifies information about the dedicated host. Only tags, autoReplaceOnFailure and licenseType may be updated.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar platform_fault_domain: Fault domain of the dedicated host within a dedicated host group.
:vartype platform_fault_domain: int
:ivar auto_replace_on_failure: Specifies whether the dedicated host should be replaced
automatically in case of a failure. The value is defaulted to 'true' when not provided.
:vartype auto_replace_on_failure: bool
:ivar host_id: A unique id generated and assigned to the dedicated host by the platform.
:code:`<br>`:code:`<br>` Does not change throughout the lifetime of the host.
:vartype host_id: str
:ivar virtual_machines: A list of references to all virtual machines in the Dedicated Host.
:vartype virtual_machines: list[~azure.mgmt.compute.v2022_03_01.models.SubResourceReadOnly]
:ivar license_type: Specifies the software license type that will be applied to the VMs
deployed on the dedicated host. :code:`<br>`:code:`<br>` Possible values are:
:code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **Windows_Server_Hybrid**
:code:`<br>`:code:`<br>` **Windows_Server_Perpetual** :code:`<br>`:code:`<br>` Default:
**None**. Possible values include: "None", "Windows_Server_Hybrid", "Windows_Server_Perpetual".
:vartype license_type: str or ~azure.mgmt.compute.v2022_03_01.models.DedicatedHostLicenseTypes
:ivar provisioning_time: The date when the host was first provisioned.
:vartype provisioning_time: ~datetime.datetime
:ivar provisioning_state: The provisioning state, which only appears in the response.
:vartype provisioning_state: str
:ivar instance_view: The dedicated host instance view.
:vartype instance_view: ~azure.mgmt.compute.v2022_03_01.models.DedicatedHostInstanceView
:ivar time_created: Specifies the time at which the Dedicated Host resource was
created.:code:`<br>`:code:`<br>`Minimum api-version: 2022-03-01.
:vartype time_created: ~datetime.datetime
"""
_validation = {
'platform_fault_domain': {'minimum': 0},
'host_id': {'readonly': True},
'virtual_machines': {'readonly': True},
'provisioning_time': {'readonly': True},
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
'time_created': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'platform_fault_domain': {'key': 'properties.platformFaultDomain', 'type': 'int'},
'auto_replace_on_failure': {'key': 'properties.autoReplaceOnFailure', 'type': 'bool'},
'host_id': {'key': 'properties.hostId', 'type': 'str'},
'virtual_machines': {'key': 'properties.virtualMachines', 'type': '[SubResourceReadOnly]'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'provisioning_time': {'key': 'properties.provisioningTime', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'DedicatedHostInstanceView'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
platform_fault_domain: Optional[int] = None,
auto_replace_on_failure: Optional[bool] = None,
license_type: Optional[Union[str, "DedicatedHostLicenseTypes"]] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword platform_fault_domain: Fault domain of the dedicated host within a dedicated host
group.
:paramtype platform_fault_domain: int
:keyword auto_replace_on_failure: Specifies whether the dedicated host should be replaced
automatically in case of a failure. The value is defaulted to 'true' when not provided.
:paramtype auto_replace_on_failure: bool
:keyword license_type: Specifies the software license type that will be applied to the VMs
deployed on the dedicated host. :code:`<br>`:code:`<br>` Possible values are:
:code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **Windows_Server_Hybrid**
:code:`<br>`:code:`<br>` **Windows_Server_Perpetual** :code:`<br>`:code:`<br>` Default:
**None**. Possible values include: "None", "Windows_Server_Hybrid", "Windows_Server_Perpetual".
:paramtype license_type: str or
~azure.mgmt.compute.v2022_03_01.models.DedicatedHostLicenseTypes
"""
super(DedicatedHostUpdate, self).__init__(tags=tags, **kwargs)
self.platform_fault_domain = platform_fault_domain
self.auto_replace_on_failure = auto_replace_on_failure
self.host_id = None
self.virtual_machines = None
self.license_type = license_type
self.provisioning_time = None
self.provisioning_state = None
self.instance_view = None
self.time_created = None
class DiagnosticsProfile(msrest.serialization.Model):
"""Specifies the boot diagnostic settings state. :code:`<br>`:code:`<br>`Minimum api-version: 2015-06-15.
:ivar boot_diagnostics: Boot Diagnostics is a debugging feature which allows you to view
Console Output and Screenshot to diagnose VM status. :code:`<br>`\ **NOTE**\ : If storageUri is
being specified then ensure that the storage account is in the same region and subscription as
the VM. :code:`<br>`:code:`<br>` You can easily view the output of your console log.
:code:`<br>`:code:`<br>` Azure also enables you to see a screenshot of the VM from the
hypervisor.
:vartype boot_diagnostics: ~azure.mgmt.compute.v2022_03_01.models.BootDiagnostics
"""
_attribute_map = {
'boot_diagnostics': {'key': 'bootDiagnostics', 'type': 'BootDiagnostics'},
}
def __init__(
self,
*,
boot_diagnostics: Optional["BootDiagnostics"] = None,
**kwargs
):
"""
:keyword boot_diagnostics: Boot Diagnostics is a debugging feature which allows you to view
Console Output and Screenshot to diagnose VM status. :code:`<br>`\ **NOTE**\ : If storageUri is
being specified then ensure that the storage account is in the same region and subscription as
the VM. :code:`<br>`:code:`<br>` You can easily view the output of your console log.
:code:`<br>`:code:`<br>` Azure also enables you to see a screenshot of the VM from the
hypervisor.
:paramtype boot_diagnostics: ~azure.mgmt.compute.v2022_03_01.models.BootDiagnostics
"""
super(DiagnosticsProfile, self).__init__(**kwargs)
self.boot_diagnostics = boot_diagnostics
class DiffDiskSettings(msrest.serialization.Model):
"""Describes the parameters of ephemeral disk settings that can be specified for operating system disk. :code:`<br>`:code:`<br>` NOTE: The ephemeral disk settings can only be specified for managed disk.
:ivar option: Specifies the ephemeral disk settings for operating system disk. Possible values
include: "Local".
:vartype option: str or ~azure.mgmt.compute.v2022_03_01.models.DiffDiskOptions
:ivar placement: Specifies the ephemeral disk placement for operating system
disk.:code:`<br>`:code:`<br>` Possible values are: :code:`<br>`:code:`<br>` **CacheDisk**
:code:`<br>`:code:`<br>` **ResourceDisk** :code:`<br>`:code:`<br>` Default: **CacheDisk** if
one is configured for the VM size otherwise **ResourceDisk** is used.:code:`<br>`:code:`<br>`
Refer to VM size documentation for Windows VM at
https://docs.microsoft.com/azure/virtual-machines/windows/sizes and Linux VM at
https://docs.microsoft.com/azure/virtual-machines/linux/sizes to check which VM sizes exposes a
cache disk. Possible values include: "CacheDisk", "ResourceDisk".
:vartype placement: str or ~azure.mgmt.compute.v2022_03_01.models.DiffDiskPlacement
"""
_attribute_map = {
'option': {'key': 'option', 'type': 'str'},
'placement': {'key': 'placement', 'type': 'str'},
}
def __init__(
self,
*,
option: Optional[Union[str, "DiffDiskOptions"]] = None,
placement: Optional[Union[str, "DiffDiskPlacement"]] = None,
**kwargs
):
"""
:keyword option: Specifies the ephemeral disk settings for operating system disk. Possible
values include: "Local".
:paramtype option: str or ~azure.mgmt.compute.v2022_03_01.models.DiffDiskOptions
:keyword placement: Specifies the ephemeral disk placement for operating system
disk.:code:`<br>`:code:`<br>` Possible values are: :code:`<br>`:code:`<br>` **CacheDisk**
:code:`<br>`:code:`<br>` **ResourceDisk** :code:`<br>`:code:`<br>` Default: **CacheDisk** if
one is configured for the VM size otherwise **ResourceDisk** is used.:code:`<br>`:code:`<br>`
Refer to VM size documentation for Windows VM at
https://docs.microsoft.com/azure/virtual-machines/windows/sizes and Linux VM at
https://docs.microsoft.com/azure/virtual-machines/linux/sizes to check which VM sizes exposes a
cache disk. Possible values include: "CacheDisk", "ResourceDisk".
:paramtype placement: str or ~azure.mgmt.compute.v2022_03_01.models.DiffDiskPlacement
"""
super(DiffDiskSettings, self).__init__(**kwargs)
self.option = option
self.placement = placement
class DisallowedConfiguration(msrest.serialization.Model):
"""Specifies the disallowed configuration for a virtual machine image.
:ivar vm_disk_type: VM disk types which are disallowed. Possible values include: "None",
"Unmanaged".
:vartype vm_disk_type: str or ~azure.mgmt.compute.v2022_03_01.models.VmDiskTypes
"""
_attribute_map = {
'vm_disk_type': {'key': 'vmDiskType', 'type': 'str'},
}
def __init__(
self,
*,
vm_disk_type: Optional[Union[str, "VmDiskTypes"]] = None,
**kwargs
):
"""
:keyword vm_disk_type: VM disk types which are disallowed. Possible values include: "None",
"Unmanaged".
:paramtype vm_disk_type: str or ~azure.mgmt.compute.v2022_03_01.models.VmDiskTypes
"""
super(DisallowedConfiguration, self).__init__(**kwargs)
self.vm_disk_type = vm_disk_type
class SubResource(msrest.serialization.Model):
"""SubResource.
:ivar id: Resource Id.
:vartype id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
"""
:keyword id: Resource Id.
:paramtype id: str
"""
super(SubResource, self).__init__(**kwargs)
self.id = id
class DiskEncryptionSetParameters(SubResource):
"""Describes the parameter of customer managed disk encryption set resource id that can be specified for disk. :code:`<br>`:code:`<br>` NOTE: The disk encryption set resource id can only be specified for managed disk. Please refer https://aka.ms/mdssewithcmkoverview for more details.
:ivar id: Resource Id.
:vartype id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
"""
:keyword id: Resource Id.
:paramtype id: str
"""
super(DiskEncryptionSetParameters, self).__init__(id=id, **kwargs)
class DiskEncryptionSettings(msrest.serialization.Model):
"""Describes a Encryption Settings for a Disk.
:ivar disk_encryption_key: Specifies the location of the disk encryption key, which is a Key
Vault Secret.
:vartype disk_encryption_key: ~azure.mgmt.compute.v2022_03_01.models.KeyVaultSecretReference
:ivar key_encryption_key: Specifies the location of the key encryption key in Key Vault.
:vartype key_encryption_key: ~azure.mgmt.compute.v2022_03_01.models.KeyVaultKeyReference
:ivar enabled: Specifies whether disk encryption should be enabled on the virtual machine.
:vartype enabled: bool
"""
_attribute_map = {
'disk_encryption_key': {'key': 'diskEncryptionKey', 'type': 'KeyVaultSecretReference'},
'key_encryption_key': {'key': 'keyEncryptionKey', 'type': 'KeyVaultKeyReference'},
'enabled': {'key': 'enabled', 'type': 'bool'},
}
def __init__(
self,
*,
disk_encryption_key: Optional["KeyVaultSecretReference"] = None,
key_encryption_key: Optional["KeyVaultKeyReference"] = None,
enabled: Optional[bool] = None,
**kwargs
):
"""
:keyword disk_encryption_key: Specifies the location of the disk encryption key, which is a Key
Vault Secret.
:paramtype disk_encryption_key: ~azure.mgmt.compute.v2022_03_01.models.KeyVaultSecretReference
:keyword key_encryption_key: Specifies the location of the key encryption key in Key Vault.
:paramtype key_encryption_key: ~azure.mgmt.compute.v2022_03_01.models.KeyVaultKeyReference
:keyword enabled: Specifies whether disk encryption should be enabled on the virtual machine.
:paramtype enabled: bool
"""
super(DiskEncryptionSettings, self).__init__(**kwargs)
self.disk_encryption_key = disk_encryption_key
self.key_encryption_key = key_encryption_key
self.enabled = enabled
class DiskInstanceView(msrest.serialization.Model):
"""The instance view of the disk.
:ivar name: The disk name.
:vartype name: str
:ivar encryption_settings: Specifies the encryption settings for the OS Disk.
:code:`<br>`:code:`<br>` Minimum api-version: 2015-06-15.
:vartype encryption_settings:
list[~azure.mgmt.compute.v2022_03_01.models.DiskEncryptionSettings]
:ivar statuses: The resource status information.
:vartype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'encryption_settings': {'key': 'encryptionSettings', 'type': '[DiskEncryptionSettings]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
name: Optional[str] = None,
encryption_settings: Optional[List["DiskEncryptionSettings"]] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
"""
:keyword name: The disk name.
:paramtype name: str
:keyword encryption_settings: Specifies the encryption settings for the OS Disk.
:code:`<br>`:code:`<br>` Minimum api-version: 2015-06-15.
:paramtype encryption_settings:
list[~azure.mgmt.compute.v2022_03_01.models.DiskEncryptionSettings]
:keyword statuses: The resource status information.
:paramtype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
"""
super(DiskInstanceView, self).__init__(**kwargs)
self.name = name
self.encryption_settings = encryption_settings
self.statuses = statuses
class DiskRestorePointInstanceView(msrest.serialization.Model):
"""The instance view of a disk restore point.
:ivar id: Disk restore point Id.
:vartype id: str
:ivar replication_status: The disk restore point replication status information.
:vartype replication_status:
~azure.mgmt.compute.v2022_03_01.models.DiskRestorePointReplicationStatus
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'replication_status': {'key': 'replicationStatus', 'type': 'DiskRestorePointReplicationStatus'},
}
def __init__(
self,
*,
id: Optional[str] = None,
replication_status: Optional["DiskRestorePointReplicationStatus"] = None,
**kwargs
):
"""
:keyword id: Disk restore point Id.
:paramtype id: str
:keyword replication_status: The disk restore point replication status information.
:paramtype replication_status:
~azure.mgmt.compute.v2022_03_01.models.DiskRestorePointReplicationStatus
"""
super(DiskRestorePointInstanceView, self).__init__(**kwargs)
self.id = id
self.replication_status = replication_status
class DiskRestorePointReplicationStatus(msrest.serialization.Model):
"""The instance view of a disk restore point.
:ivar status: The resource status information.
:vartype status: ~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus
:ivar completion_percent: Replication completion percentage.
:vartype completion_percent: int
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'InstanceViewStatus'},
'completion_percent': {'key': 'completionPercent', 'type': 'int'},
}
def __init__(
self,
*,
status: Optional["InstanceViewStatus"] = None,
completion_percent: Optional[int] = None,
**kwargs
):
"""
:keyword status: The resource status information.
:paramtype status: ~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus
:keyword completion_percent: Replication completion percentage.
:paramtype completion_percent: int
"""
super(DiskRestorePointReplicationStatus, self).__init__(**kwargs)
self.status = status
self.completion_percent = completion_percent
class ExtendedLocation(msrest.serialization.Model):
"""The complex type of the extended location.
:ivar name: The name of the extended location.
:vartype name: str
:ivar type: The type of the extended location. Possible values include: "EdgeZone".
:vartype type: str or ~azure.mgmt.compute.v2022_03_01.models.ExtendedLocationTypes
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
type: Optional[Union[str, "ExtendedLocationTypes"]] = None,
**kwargs
):
"""
:keyword name: The name of the extended location.
:paramtype name: str
:keyword type: The type of the extended location. Possible values include: "EdgeZone".
:paramtype type: str or ~azure.mgmt.compute.v2022_03_01.models.ExtendedLocationTypes
"""
super(ExtendedLocation, self).__init__(**kwargs)
self.name = name
self.type = type
class HardwareProfile(msrest.serialization.Model):
"""Specifies the hardware settings for the virtual machine.
:ivar vm_size: Specifies the size of the virtual machine. :code:`<br>`:code:`<br>` The enum
data type is currently deprecated and will be removed by December 23rd 2023.
:code:`<br>`:code:`<br>` Recommended way to get the list of available sizes is using these
APIs: :code:`<br>`:code:`<br>` `List all available virtual machine sizes in an availability set
<https://docs.microsoft.com/rest/api/compute/availabilitysets/listavailablesizes>`_
:code:`<br>`:code:`<br>` `List all available virtual machine sizes in a region
<https://docs.microsoft.com/rest/api/compute/resourceskus/list>`_ :code:`<br>`:code:`<br>`
`List all available virtual machine sizes for resizing
<https://docs.microsoft.com/rest/api/compute/virtualmachines/listavailablesizes>`_. For more
information about virtual machine sizes, see `Sizes for virtual machines
<https://docs.microsoft.com/azure/virtual-machines/sizes>`_. :code:`<br>`:code:`<br>` The
available VM sizes depend on region and availability set. Possible values include: "Basic_A0",
"Basic_A1", "Basic_A2", "Basic_A3", "Basic_A4", "Standard_A0", "Standard_A1", "Standard_A2",
"Standard_A3", "Standard_A4", "Standard_A5", "Standard_A6", "Standard_A7", "Standard_A8",
"Standard_A9", "Standard_A10", "Standard_A11", "Standard_A1_v2", "Standard_A2_v2",
"Standard_A4_v2", "Standard_A8_v2", "Standard_A2m_v2", "Standard_A4m_v2", "Standard_A8m_v2",
"Standard_B1s", "Standard_B1ms", "Standard_B2s", "Standard_B2ms", "Standard_B4ms",
"Standard_B8ms", "Standard_D1", "Standard_D2", "Standard_D3", "Standard_D4", "Standard_D11",
"Standard_D12", "Standard_D13", "Standard_D14", "Standard_D1_v2", "Standard_D2_v2",
"Standard_D3_v2", "Standard_D4_v2", "Standard_D5_v2", "Standard_D2_v3", "Standard_D4_v3",
"Standard_D8_v3", "Standard_D16_v3", "Standard_D32_v3", "Standard_D64_v3", "Standard_D2s_v3",
"Standard_D4s_v3", "Standard_D8s_v3", "Standard_D16s_v3", "Standard_D32s_v3",
"Standard_D64s_v3", "Standard_D11_v2", "Standard_D12_v2", "Standard_D13_v2", "Standard_D14_v2",
"Standard_D15_v2", "Standard_DS1", "Standard_DS2", "Standard_DS3", "Standard_DS4",
"Standard_DS11", "Standard_DS12", "Standard_DS13", "Standard_DS14", "Standard_DS1_v2",
"Standard_DS2_v2", "Standard_DS3_v2", "Standard_DS4_v2", "Standard_DS5_v2", "Standard_DS11_v2",
"Standard_DS12_v2", "Standard_DS13_v2", "Standard_DS14_v2", "Standard_DS15_v2",
"Standard_DS13-4_v2", "Standard_DS13-2_v2", "Standard_DS14-8_v2", "Standard_DS14-4_v2",
"Standard_E2_v3", "Standard_E4_v3", "Standard_E8_v3", "Standard_E16_v3", "Standard_E32_v3",
"Standard_E64_v3", "Standard_E2s_v3", "Standard_E4s_v3", "Standard_E8s_v3", "Standard_E16s_v3",
"Standard_E32s_v3", "Standard_E64s_v3", "Standard_E32-16_v3", "Standard_E32-8s_v3",
"Standard_E64-32s_v3", "Standard_E64-16s_v3", "Standard_F1", "Standard_F2", "Standard_F4",
"Standard_F8", "Standard_F16", "Standard_F1s", "Standard_F2s", "Standard_F4s", "Standard_F8s",
"Standard_F16s", "Standard_F2s_v2", "Standard_F4s_v2", "Standard_F8s_v2", "Standard_F16s_v2",
"Standard_F32s_v2", "Standard_F64s_v2", "Standard_F72s_v2", "Standard_G1", "Standard_G2",
"Standard_G3", "Standard_G4", "Standard_G5", "Standard_GS1", "Standard_GS2", "Standard_GS3",
"Standard_GS4", "Standard_GS5", "Standard_GS4-8", "Standard_GS4-4", "Standard_GS5-16",
"Standard_GS5-8", "Standard_H8", "Standard_H16", "Standard_H8m", "Standard_H16m",
"Standard_H16r", "Standard_H16mr", "Standard_L4s", "Standard_L8s", "Standard_L16s",
"Standard_L32s", "Standard_M64s", "Standard_M64ms", "Standard_M128s", "Standard_M128ms",
"Standard_M64-32ms", "Standard_M64-16ms", "Standard_M128-64ms", "Standard_M128-32ms",
"Standard_NC6", "Standard_NC12", "Standard_NC24", "Standard_NC24r", "Standard_NC6s_v2",
"Standard_NC12s_v2", "Standard_NC24s_v2", "Standard_NC24rs_v2", "Standard_NC6s_v3",
"Standard_NC12s_v3", "Standard_NC24s_v3", "Standard_NC24rs_v3", "Standard_ND6s",
"Standard_ND12s", "Standard_ND24s", "Standard_ND24rs", "Standard_NV6", "Standard_NV12",
"Standard_NV24".
:vartype vm_size: str or ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineSizeTypes
:ivar vm_size_properties: Specifies the properties for customizing the size of the virtual
machine. Minimum api-version: 2021-07-01. :code:`<br>`:code:`<br>` This feature is still in
preview mode and is not supported for VirtualMachineScaleSet. :code:`<br>`:code:`<br>` Please
follow the instructions in `VM Customization <https://aka.ms/vmcustomization>`_ for more
details.
:vartype vm_size_properties: ~azure.mgmt.compute.v2022_03_01.models.VMSizeProperties
"""
_attribute_map = {
'vm_size': {'key': 'vmSize', 'type': 'str'},
'vm_size_properties': {'key': 'vmSizeProperties', 'type': 'VMSizeProperties'},
}
def __init__(
self,
*,
vm_size: Optional[Union[str, "VirtualMachineSizeTypes"]] = None,
vm_size_properties: Optional["VMSizeProperties"] = None,
**kwargs
):
"""
:keyword vm_size: Specifies the size of the virtual machine. :code:`<br>`:code:`<br>` The enum
data type is currently deprecated and will be removed by December 23rd 2023.
:code:`<br>`:code:`<br>` Recommended way to get the list of available sizes is using these
APIs: :code:`<br>`:code:`<br>` `List all available virtual machine sizes in an availability set
<https://docs.microsoft.com/rest/api/compute/availabilitysets/listavailablesizes>`_
:code:`<br>`:code:`<br>` `List all available virtual machine sizes in a region
<https://docs.microsoft.com/rest/api/compute/resourceskus/list>`_ :code:`<br>`:code:`<br>`
`List all available virtual machine sizes for resizing
<https://docs.microsoft.com/rest/api/compute/virtualmachines/listavailablesizes>`_. For more
information about virtual machine sizes, see `Sizes for virtual machines
<https://docs.microsoft.com/azure/virtual-machines/sizes>`_. :code:`<br>`:code:`<br>` The
available VM sizes depend on region and availability set. Possible values include: "Basic_A0",
"Basic_A1", "Basic_A2", "Basic_A3", "Basic_A4", "Standard_A0", "Standard_A1", "Standard_A2",
"Standard_A3", "Standard_A4", "Standard_A5", "Standard_A6", "Standard_A7", "Standard_A8",
"Standard_A9", "Standard_A10", "Standard_A11", "Standard_A1_v2", "Standard_A2_v2",
"Standard_A4_v2", "Standard_A8_v2", "Standard_A2m_v2", "Standard_A4m_v2", "Standard_A8m_v2",
"Standard_B1s", "Standard_B1ms", "Standard_B2s", "Standard_B2ms", "Standard_B4ms",
"Standard_B8ms", "Standard_D1", "Standard_D2", "Standard_D3", "Standard_D4", "Standard_D11",
"Standard_D12", "Standard_D13", "Standard_D14", "Standard_D1_v2", "Standard_D2_v2",
"Standard_D3_v2", "Standard_D4_v2", "Standard_D5_v2", "Standard_D2_v3", "Standard_D4_v3",
"Standard_D8_v3", "Standard_D16_v3", "Standard_D32_v3", "Standard_D64_v3", "Standard_D2s_v3",
"Standard_D4s_v3", "Standard_D8s_v3", "Standard_D16s_v3", "Standard_D32s_v3",
"Standard_D64s_v3", "Standard_D11_v2", "Standard_D12_v2", "Standard_D13_v2", "Standard_D14_v2",
"Standard_D15_v2", "Standard_DS1", "Standard_DS2", "Standard_DS3", "Standard_DS4",
"Standard_DS11", "Standard_DS12", "Standard_DS13", "Standard_DS14", "Standard_DS1_v2",
"Standard_DS2_v2", "Standard_DS3_v2", "Standard_DS4_v2", "Standard_DS5_v2", "Standard_DS11_v2",
"Standard_DS12_v2", "Standard_DS13_v2", "Standard_DS14_v2", "Standard_DS15_v2",
"Standard_DS13-4_v2", "Standard_DS13-2_v2", "Standard_DS14-8_v2", "Standard_DS14-4_v2",
"Standard_E2_v3", "Standard_E4_v3", "Standard_E8_v3", "Standard_E16_v3", "Standard_E32_v3",
"Standard_E64_v3", "Standard_E2s_v3", "Standard_E4s_v3", "Standard_E8s_v3", "Standard_E16s_v3",
"Standard_E32s_v3", "Standard_E64s_v3", "Standard_E32-16_v3", "Standard_E32-8s_v3",
"Standard_E64-32s_v3", "Standard_E64-16s_v3", "Standard_F1", "Standard_F2", "Standard_F4",
"Standard_F8", "Standard_F16", "Standard_F1s", "Standard_F2s", "Standard_F4s", "Standard_F8s",
"Standard_F16s", "Standard_F2s_v2", "Standard_F4s_v2", "Standard_F8s_v2", "Standard_F16s_v2",
"Standard_F32s_v2", "Standard_F64s_v2", "Standard_F72s_v2", "Standard_G1", "Standard_G2",
"Standard_G3", "Standard_G4", "Standard_G5", "Standard_GS1", "Standard_GS2", "Standard_GS3",
"Standard_GS4", "Standard_GS5", "Standard_GS4-8", "Standard_GS4-4", "Standard_GS5-16",
"Standard_GS5-8", "Standard_H8", "Standard_H16", "Standard_H8m", "Standard_H16m",
"Standard_H16r", "Standard_H16mr", "Standard_L4s", "Standard_L8s", "Standard_L16s",
"Standard_L32s", "Standard_M64s", "Standard_M64ms", "Standard_M128s", "Standard_M128ms",
"Standard_M64-32ms", "Standard_M64-16ms", "Standard_M128-64ms", "Standard_M128-32ms",
"Standard_NC6", "Standard_NC12", "Standard_NC24", "Standard_NC24r", "Standard_NC6s_v2",
"Standard_NC12s_v2", "Standard_NC24s_v2", "Standard_NC24rs_v2", "Standard_NC6s_v3",
"Standard_NC12s_v3", "Standard_NC24s_v3", "Standard_NC24rs_v3", "Standard_ND6s",
"Standard_ND12s", "Standard_ND24s", "Standard_ND24rs", "Standard_NV6", "Standard_NV12",
"Standard_NV24".
:paramtype vm_size: str or ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineSizeTypes
:keyword vm_size_properties: Specifies the properties for customizing the size of the virtual
machine. Minimum api-version: 2021-07-01. :code:`<br>`:code:`<br>` This feature is still in
preview mode and is not supported for VirtualMachineScaleSet. :code:`<br>`:code:`<br>` Please
follow the instructions in `VM Customization <https://aka.ms/vmcustomization>`_ for more
details.
:paramtype vm_size_properties: ~azure.mgmt.compute.v2022_03_01.models.VMSizeProperties
"""
super(HardwareProfile, self).__init__(**kwargs)
self.vm_size = vm_size
self.vm_size_properties = vm_size_properties
class Image(Resource):
"""The source user image virtual hard disk. The virtual hard disk will be copied before being attached to the virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Required. Resource location.
:vartype location: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar extended_location: The extended location of the Image.
:vartype extended_location: ~azure.mgmt.compute.v2022_03_01.models.ExtendedLocation
:ivar source_virtual_machine: The source virtual machine from which Image is created.
:vartype source_virtual_machine: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar storage_profile: Specifies the storage settings for the virtual machine disks.
:vartype storage_profile: ~azure.mgmt.compute.v2022_03_01.models.ImageStorageProfile
:ivar provisioning_state: The provisioning state.
:vartype provisioning_state: str
:ivar hyper_v_generation: Specifies the HyperVGenerationType of the VirtualMachine created from
the image. From API Version 2019-03-01 if the image source is a blob, then we need the user to
specify the value, if the source is managed resource like disk or snapshot, we may require the
user to specify the property if we cannot deduce it from the source managed resource. Possible
values include: "V1", "V2".
:vartype hyper_v_generation: str or
~azure.mgmt.compute.v2022_03_01.models.HyperVGenerationTypes
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'extended_location': {'key': 'extendedLocation', 'type': 'ExtendedLocation'},
'source_virtual_machine': {'key': 'properties.sourceVirtualMachine', 'type': 'SubResource'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'ImageStorageProfile'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'hyper_v_generation': {'key': 'properties.hyperVGeneration', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
extended_location: Optional["ExtendedLocation"] = None,
source_virtual_machine: Optional["SubResource"] = None,
storage_profile: Optional["ImageStorageProfile"] = None,
hyper_v_generation: Optional[Union[str, "HyperVGenerationTypes"]] = None,
**kwargs
):
"""
:keyword location: Required. Resource location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword extended_location: The extended location of the Image.
:paramtype extended_location: ~azure.mgmt.compute.v2022_03_01.models.ExtendedLocation
:keyword source_virtual_machine: The source virtual machine from which Image is created.
:paramtype source_virtual_machine: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword storage_profile: Specifies the storage settings for the virtual machine disks.
:paramtype storage_profile: ~azure.mgmt.compute.v2022_03_01.models.ImageStorageProfile
:keyword hyper_v_generation: Specifies the HyperVGenerationType of the VirtualMachine created
from the image. From API Version 2019-03-01 if the image source is a blob, then we need the
user to specify the value, if the source is managed resource like disk or snapshot, we may
require the user to specify the property if we cannot deduce it from the source managed
resource. Possible values include: "V1", "V2".
:paramtype hyper_v_generation: str or
~azure.mgmt.compute.v2022_03_01.models.HyperVGenerationTypes
"""
super(Image, self).__init__(location=location, tags=tags, **kwargs)
self.extended_location = extended_location
self.source_virtual_machine = source_virtual_machine
self.storage_profile = storage_profile
self.provisioning_state = None
self.hyper_v_generation = hyper_v_generation
class ImageDisk(msrest.serialization.Model):
"""Describes a image disk.
:ivar snapshot: The snapshot.
:vartype snapshot: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar managed_disk: The managedDisk.
:vartype managed_disk: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar blob_uri: The Virtual Hard Disk.
:vartype blob_uri: str
:ivar caching: Specifies the caching requirements. :code:`<br>`:code:`<br>` Possible values
are: :code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **ReadOnly**
:code:`<br>`:code:`<br>` **ReadWrite** :code:`<br>`:code:`<br>` Default: **None for Standard
storage. ReadOnly for Premium storage**. Possible values include: "None", "ReadOnly",
"ReadWrite".
:vartype caching: str or ~azure.mgmt.compute.v2022_03_01.models.CachingTypes
:ivar disk_size_gb: Specifies the size of empty data disks in gigabytes. This element can be
used to overwrite the name of the disk in a virtual machine image. :code:`<br>`:code:`<br>`
This value cannot be larger than 1023 GB.
:vartype disk_size_gb: int
:ivar storage_account_type: Specifies the storage account type for the managed disk. NOTE:
UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values
include: "Standard_LRS", "Premium_LRS", "StandardSSD_LRS", "UltraSSD_LRS", "Premium_ZRS",
"StandardSSD_ZRS", "PremiumV2_LRS".
:vartype storage_account_type: str or
~azure.mgmt.compute.v2022_03_01.models.StorageAccountTypes
:ivar disk_encryption_set: Specifies the customer managed disk encryption set resource id for
the managed image disk.
:vartype disk_encryption_set:
~azure.mgmt.compute.v2022_03_01.models.DiskEncryptionSetParameters
"""
_attribute_map = {
'snapshot': {'key': 'snapshot', 'type': 'SubResource'},
'managed_disk': {'key': 'managedDisk', 'type': 'SubResource'},
'blob_uri': {'key': 'blobUri', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
'disk_encryption_set': {'key': 'diskEncryptionSet', 'type': 'DiskEncryptionSetParameters'},
}
def __init__(
self,
*,
snapshot: Optional["SubResource"] = None,
managed_disk: Optional["SubResource"] = None,
blob_uri: Optional[str] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
disk_size_gb: Optional[int] = None,
storage_account_type: Optional[Union[str, "StorageAccountTypes"]] = None,
disk_encryption_set: Optional["DiskEncryptionSetParameters"] = None,
**kwargs
):
"""
:keyword snapshot: The snapshot.
:paramtype snapshot: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword managed_disk: The managedDisk.
:paramtype managed_disk: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword blob_uri: The Virtual Hard Disk.
:paramtype blob_uri: str
:keyword caching: Specifies the caching requirements. :code:`<br>`:code:`<br>` Possible values
are: :code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **ReadOnly**
:code:`<br>`:code:`<br>` **ReadWrite** :code:`<br>`:code:`<br>` Default: **None for Standard
storage. ReadOnly for Premium storage**. Possible values include: "None", "ReadOnly",
"ReadWrite".
:paramtype caching: str or ~azure.mgmt.compute.v2022_03_01.models.CachingTypes
:keyword disk_size_gb: Specifies the size of empty data disks in gigabytes. This element can be
used to overwrite the name of the disk in a virtual machine image. :code:`<br>`:code:`<br>`
This value cannot be larger than 1023 GB.
:paramtype disk_size_gb: int
:keyword storage_account_type: Specifies the storage account type for the managed disk. NOTE:
UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values
include: "Standard_LRS", "Premium_LRS", "StandardSSD_LRS", "UltraSSD_LRS", "Premium_ZRS",
"StandardSSD_ZRS", "PremiumV2_LRS".
:paramtype storage_account_type: str or
~azure.mgmt.compute.v2022_03_01.models.StorageAccountTypes
:keyword disk_encryption_set: Specifies the customer managed disk encryption set resource id
for the managed image disk.
:paramtype disk_encryption_set:
~azure.mgmt.compute.v2022_03_01.models.DiskEncryptionSetParameters
"""
super(ImageDisk, self).__init__(**kwargs)
self.snapshot = snapshot
self.managed_disk = managed_disk
self.blob_uri = blob_uri
self.caching = caching
self.disk_size_gb = disk_size_gb
self.storage_account_type = storage_account_type
self.disk_encryption_set = disk_encryption_set
class ImageDataDisk(ImageDisk):
"""Describes a data disk.
All required parameters must be populated in order to send to Azure.
:ivar snapshot: The snapshot.
:vartype snapshot: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar managed_disk: The managedDisk.
:vartype managed_disk: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar blob_uri: The Virtual Hard Disk.
:vartype blob_uri: str
:ivar caching: Specifies the caching requirements. :code:`<br>`:code:`<br>` Possible values
are: :code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **ReadOnly**
:code:`<br>`:code:`<br>` **ReadWrite** :code:`<br>`:code:`<br>` Default: **None for Standard
storage. ReadOnly for Premium storage**. Possible values include: "None", "ReadOnly",
"ReadWrite".
:vartype caching: str or ~azure.mgmt.compute.v2022_03_01.models.CachingTypes
:ivar disk_size_gb: Specifies the size of empty data disks in gigabytes. This element can be
used to overwrite the name of the disk in a virtual machine image. :code:`<br>`:code:`<br>`
This value cannot be larger than 1023 GB.
:vartype disk_size_gb: int
:ivar storage_account_type: Specifies the storage account type for the managed disk. NOTE:
UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values
include: "Standard_LRS", "Premium_LRS", "StandardSSD_LRS", "UltraSSD_LRS", "Premium_ZRS",
"StandardSSD_ZRS", "PremiumV2_LRS".
:vartype storage_account_type: str or
~azure.mgmt.compute.v2022_03_01.models.StorageAccountTypes
:ivar disk_encryption_set: Specifies the customer managed disk encryption set resource id for
the managed image disk.
:vartype disk_encryption_set:
~azure.mgmt.compute.v2022_03_01.models.DiskEncryptionSetParameters
:ivar lun: Required. Specifies the logical unit number of the data disk. This value is used to
identify data disks within the VM and therefore must be unique for each data disk attached to a
VM.
:vartype lun: int
"""
_validation = {
'lun': {'required': True},
}
_attribute_map = {
'snapshot': {'key': 'snapshot', 'type': 'SubResource'},
'managed_disk': {'key': 'managedDisk', 'type': 'SubResource'},
'blob_uri': {'key': 'blobUri', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
'disk_encryption_set': {'key': 'diskEncryptionSet', 'type': 'DiskEncryptionSetParameters'},
'lun': {'key': 'lun', 'type': 'int'},
}
def __init__(
self,
*,
lun: int,
snapshot: Optional["SubResource"] = None,
managed_disk: Optional["SubResource"] = None,
blob_uri: Optional[str] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
disk_size_gb: Optional[int] = None,
storage_account_type: Optional[Union[str, "StorageAccountTypes"]] = None,
disk_encryption_set: Optional["DiskEncryptionSetParameters"] = None,
**kwargs
):
"""
:keyword snapshot: The snapshot.
:paramtype snapshot: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword managed_disk: The managedDisk.
:paramtype managed_disk: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword blob_uri: The Virtual Hard Disk.
:paramtype blob_uri: str
:keyword caching: Specifies the caching requirements. :code:`<br>`:code:`<br>` Possible values
are: :code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **ReadOnly**
:code:`<br>`:code:`<br>` **ReadWrite** :code:`<br>`:code:`<br>` Default: **None for Standard
storage. ReadOnly for Premium storage**. Possible values include: "None", "ReadOnly",
"ReadWrite".
:paramtype caching: str or ~azure.mgmt.compute.v2022_03_01.models.CachingTypes
:keyword disk_size_gb: Specifies the size of empty data disks in gigabytes. This element can be
used to overwrite the name of the disk in a virtual machine image. :code:`<br>`:code:`<br>`
This value cannot be larger than 1023 GB.
:paramtype disk_size_gb: int
:keyword storage_account_type: Specifies the storage account type for the managed disk. NOTE:
UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values
include: "Standard_LRS", "Premium_LRS", "StandardSSD_LRS", "UltraSSD_LRS", "Premium_ZRS",
"StandardSSD_ZRS", "PremiumV2_LRS".
:paramtype storage_account_type: str or
~azure.mgmt.compute.v2022_03_01.models.StorageAccountTypes
:keyword disk_encryption_set: Specifies the customer managed disk encryption set resource id
for the managed image disk.
:paramtype disk_encryption_set:
~azure.mgmt.compute.v2022_03_01.models.DiskEncryptionSetParameters
:keyword lun: Required. Specifies the logical unit number of the data disk. This value is used
to identify data disks within the VM and therefore must be unique for each data disk attached
to a VM.
:paramtype lun: int
"""
super(ImageDataDisk, self).__init__(snapshot=snapshot, managed_disk=managed_disk, blob_uri=blob_uri, caching=caching, disk_size_gb=disk_size_gb, storage_account_type=storage_account_type, disk_encryption_set=disk_encryption_set, **kwargs)
self.lun = lun
class ImageListResult(msrest.serialization.Model):
"""The List Image operation response.
All required parameters must be populated in order to send to Azure.
:ivar value: Required. The list of Images.
:vartype value: list[~azure.mgmt.compute.v2022_03_01.models.Image]
:ivar next_link: The uri to fetch the next page of Images. Call ListNext() with this to fetch
the next page of Images.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Image]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Image"],
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Required. The list of Images.
:paramtype value: list[~azure.mgmt.compute.v2022_03_01.models.Image]
:keyword next_link: The uri to fetch the next page of Images. Call ListNext() with this to
fetch the next page of Images.
:paramtype next_link: str
"""
super(ImageListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ImageOSDisk(ImageDisk):
"""Describes an Operating System disk.
All required parameters must be populated in order to send to Azure.
:ivar snapshot: The snapshot.
:vartype snapshot: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar managed_disk: The managedDisk.
:vartype managed_disk: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar blob_uri: The Virtual Hard Disk.
:vartype blob_uri: str
:ivar caching: Specifies the caching requirements. :code:`<br>`:code:`<br>` Possible values
are: :code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **ReadOnly**
:code:`<br>`:code:`<br>` **ReadWrite** :code:`<br>`:code:`<br>` Default: **None for Standard
storage. ReadOnly for Premium storage**. Possible values include: "None", "ReadOnly",
"ReadWrite".
:vartype caching: str or ~azure.mgmt.compute.v2022_03_01.models.CachingTypes
:ivar disk_size_gb: Specifies the size of empty data disks in gigabytes. This element can be
used to overwrite the name of the disk in a virtual machine image. :code:`<br>`:code:`<br>`
This value cannot be larger than 1023 GB.
:vartype disk_size_gb: int
:ivar storage_account_type: Specifies the storage account type for the managed disk. NOTE:
UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values
include: "Standard_LRS", "Premium_LRS", "StandardSSD_LRS", "UltraSSD_LRS", "Premium_ZRS",
"StandardSSD_ZRS", "PremiumV2_LRS".
:vartype storage_account_type: str or
~azure.mgmt.compute.v2022_03_01.models.StorageAccountTypes
:ivar disk_encryption_set: Specifies the customer managed disk encryption set resource id for
the managed image disk.
:vartype disk_encryption_set:
~azure.mgmt.compute.v2022_03_01.models.DiskEncryptionSetParameters
:ivar os_type: Required. This property allows you to specify the type of the OS that is
included in the disk if creating a VM from a custom image. :code:`<br>`:code:`<br>` Possible
values are: :code:`<br>`:code:`<br>` **Windows** :code:`<br>`:code:`<br>` **Linux**. Possible
values include: "Windows", "Linux".
:vartype os_type: str or ~azure.mgmt.compute.v2022_03_01.models.OperatingSystemTypes
:ivar os_state: Required. The OS State. Possible values include: "Generalized", "Specialized".
:vartype os_state: str or ~azure.mgmt.compute.v2022_03_01.models.OperatingSystemStateTypes
"""
_validation = {
'os_type': {'required': True},
'os_state': {'required': True},
}
_attribute_map = {
'snapshot': {'key': 'snapshot', 'type': 'SubResource'},
'managed_disk': {'key': 'managedDisk', 'type': 'SubResource'},
'blob_uri': {'key': 'blobUri', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
'disk_encryption_set': {'key': 'diskEncryptionSet', 'type': 'DiskEncryptionSetParameters'},
'os_type': {'key': 'osType', 'type': 'str'},
'os_state': {'key': 'osState', 'type': 'str'},
}
def __init__(
self,
*,
os_type: Union[str, "OperatingSystemTypes"],
os_state: Union[str, "OperatingSystemStateTypes"],
snapshot: Optional["SubResource"] = None,
managed_disk: Optional["SubResource"] = None,
blob_uri: Optional[str] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
disk_size_gb: Optional[int] = None,
storage_account_type: Optional[Union[str, "StorageAccountTypes"]] = None,
disk_encryption_set: Optional["DiskEncryptionSetParameters"] = None,
**kwargs
):
"""
:keyword snapshot: The snapshot.
:paramtype snapshot: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword managed_disk: The managedDisk.
:paramtype managed_disk: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword blob_uri: The Virtual Hard Disk.
:paramtype blob_uri: str
:keyword caching: Specifies the caching requirements. :code:`<br>`:code:`<br>` Possible values
are: :code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **ReadOnly**
:code:`<br>`:code:`<br>` **ReadWrite** :code:`<br>`:code:`<br>` Default: **None for Standard
storage. ReadOnly for Premium storage**. Possible values include: "None", "ReadOnly",
"ReadWrite".
:paramtype caching: str or ~azure.mgmt.compute.v2022_03_01.models.CachingTypes
:keyword disk_size_gb: Specifies the size of empty data disks in gigabytes. This element can be
used to overwrite the name of the disk in a virtual machine image. :code:`<br>`:code:`<br>`
This value cannot be larger than 1023 GB.
:paramtype disk_size_gb: int
:keyword storage_account_type: Specifies the storage account type for the managed disk. NOTE:
UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values
include: "Standard_LRS", "Premium_LRS", "StandardSSD_LRS", "UltraSSD_LRS", "Premium_ZRS",
"StandardSSD_ZRS", "PremiumV2_LRS".
:paramtype storage_account_type: str or
~azure.mgmt.compute.v2022_03_01.models.StorageAccountTypes
:keyword disk_encryption_set: Specifies the customer managed disk encryption set resource id
for the managed image disk.
:paramtype disk_encryption_set:
~azure.mgmt.compute.v2022_03_01.models.DiskEncryptionSetParameters
:keyword os_type: Required. This property allows you to specify the type of the OS that is
included in the disk if creating a VM from a custom image. :code:`<br>`:code:`<br>` Possible
values are: :code:`<br>`:code:`<br>` **Windows** :code:`<br>`:code:`<br>` **Linux**. Possible
values include: "Windows", "Linux".
:paramtype os_type: str or ~azure.mgmt.compute.v2022_03_01.models.OperatingSystemTypes
:keyword os_state: Required. The OS State. Possible values include: "Generalized",
"Specialized".
:paramtype os_state: str or ~azure.mgmt.compute.v2022_03_01.models.OperatingSystemStateTypes
"""
super(ImageOSDisk, self).__init__(snapshot=snapshot, managed_disk=managed_disk, blob_uri=blob_uri, caching=caching, disk_size_gb=disk_size_gb, storage_account_type=storage_account_type, disk_encryption_set=disk_encryption_set, **kwargs)
self.os_type = os_type
self.os_state = os_state
class ImageReference(SubResource):
"""Specifies information about the image to use. You can specify information about platform images, marketplace images, or virtual machine images. This element is required when you want to use a platform image, marketplace image, or virtual machine image, but is not used in other creation operations. NOTE: Image reference publisher and offer can only be set when you create the scale set.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar publisher: The image publisher.
:vartype publisher: str
:ivar offer: Specifies the offer of the platform image or marketplace image used to create the
virtual machine.
:vartype offer: str
:ivar sku: The image SKU.
:vartype sku: str
:ivar version: Specifies the version of the platform image or marketplace image used to create
the virtual machine. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and
Build are decimal numbers. Specify 'latest' to use the latest version of an image available at
deploy time. Even if you use 'latest', the VM image will not automatically update after deploy
time even if a new version becomes available. Please do not use field 'version' for gallery
image deployment, gallery image should always use 'id' field for deployment, to use 'latest'
version of gallery image, just set
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageName}'
in the 'id' field without version input.
:vartype version: str
:ivar exact_version: Specifies in decimal numbers, the version of platform image or marketplace
image used to create the virtual machine. This readonly field differs from 'version', only if
the value specified in 'version' field is 'latest'.
:vartype exact_version: str
:ivar shared_gallery_image_id: Specified the shared gallery image unique id for vm deployment.
This can be fetched from shared gallery image GET call.
:vartype shared_gallery_image_id: str
:ivar community_gallery_image_id: Specified the community gallery image unique id for vm
deployment. This can be fetched from community gallery image GET call.
:vartype community_gallery_image_id: str
"""
_validation = {
'exact_version': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'publisher': {'key': 'publisher', 'type': 'str'},
'offer': {'key': 'offer', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'exact_version': {'key': 'exactVersion', 'type': 'str'},
'shared_gallery_image_id': {'key': 'sharedGalleryImageId', 'type': 'str'},
'community_gallery_image_id': {'key': 'communityGalleryImageId', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
publisher: Optional[str] = None,
offer: Optional[str] = None,
sku: Optional[str] = None,
version: Optional[str] = None,
shared_gallery_image_id: Optional[str] = None,
community_gallery_image_id: Optional[str] = None,
**kwargs
):
"""
:keyword id: Resource Id.
:paramtype id: str
:keyword publisher: The image publisher.
:paramtype publisher: str
:keyword offer: Specifies the offer of the platform image or marketplace image used to create
the virtual machine.
:paramtype offer: str
:keyword sku: The image SKU.
:paramtype sku: str
:keyword version: Specifies the version of the platform image or marketplace image used to
create the virtual machine. The allowed formats are Major.Minor.Build or 'latest'. Major,
Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image
available at deploy time. Even if you use 'latest', the VM image will not automatically update
after deploy time even if a new version becomes available. Please do not use field 'version'
for gallery image deployment, gallery image should always use 'id' field for deployment, to use
'latest' version of gallery image, just set
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageName}'
in the 'id' field without version input.
:paramtype version: str
:keyword shared_gallery_image_id: Specified the shared gallery image unique id for vm
deployment. This can be fetched from shared gallery image GET call.
:paramtype shared_gallery_image_id: str
:keyword community_gallery_image_id: Specified the community gallery image unique id for vm
deployment. This can be fetched from community gallery image GET call.
:paramtype community_gallery_image_id: str
"""
super(ImageReference, self).__init__(id=id, **kwargs)
self.publisher = publisher
self.offer = offer
self.sku = sku
self.version = version
self.exact_version = None
self.shared_gallery_image_id = shared_gallery_image_id
self.community_gallery_image_id = community_gallery_image_id
class ImageStorageProfile(msrest.serialization.Model):
"""Describes a storage profile.
:ivar os_disk: Specifies information about the operating system disk used by the virtual
machine. :code:`<br>`:code:`<br>` For more information about disks, see `About disks and VHDs
for Azure virtual machines
<https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview>`_.
:vartype os_disk: ~azure.mgmt.compute.v2022_03_01.models.ImageOSDisk
:ivar data_disks: Specifies the parameters that are used to add a data disk to a virtual
machine. :code:`<br>`:code:`<br>` For more information about disks, see `About disks and VHDs
for Azure virtual machines
<https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview>`_.
:vartype data_disks: list[~azure.mgmt.compute.v2022_03_01.models.ImageDataDisk]
:ivar zone_resilient: Specifies whether an image is zone resilient or not. Default is false.
Zone resilient images can be created only in regions that provide Zone Redundant Storage (ZRS).
:vartype zone_resilient: bool
"""
_attribute_map = {
'os_disk': {'key': 'osDisk', 'type': 'ImageOSDisk'},
'data_disks': {'key': 'dataDisks', 'type': '[ImageDataDisk]'},
'zone_resilient': {'key': 'zoneResilient', 'type': 'bool'},
}
def __init__(
self,
*,
os_disk: Optional["ImageOSDisk"] = None,
data_disks: Optional[List["ImageDataDisk"]] = None,
zone_resilient: Optional[bool] = None,
**kwargs
):
"""
:keyword os_disk: Specifies information about the operating system disk used by the virtual
machine. :code:`<br>`:code:`<br>` For more information about disks, see `About disks and VHDs
for Azure virtual machines
<https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview>`_.
:paramtype os_disk: ~azure.mgmt.compute.v2022_03_01.models.ImageOSDisk
:keyword data_disks: Specifies the parameters that are used to add a data disk to a virtual
machine. :code:`<br>`:code:`<br>` For more information about disks, see `About disks and VHDs
for Azure virtual machines
<https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview>`_.
:paramtype data_disks: list[~azure.mgmt.compute.v2022_03_01.models.ImageDataDisk]
:keyword zone_resilient: Specifies whether an image is zone resilient or not. Default is false.
Zone resilient images can be created only in regions that provide Zone Redundant Storage (ZRS).
:paramtype zone_resilient: bool
"""
super(ImageStorageProfile, self).__init__(**kwargs)
self.os_disk = os_disk
self.data_disks = data_disks
self.zone_resilient = zone_resilient
class ImageUpdate(UpdateResource):
"""The source user image virtual hard disk. Only tags may be updated.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar source_virtual_machine: The source virtual machine from which Image is created.
:vartype source_virtual_machine: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar storage_profile: Specifies the storage settings for the virtual machine disks.
:vartype storage_profile: ~azure.mgmt.compute.v2022_03_01.models.ImageStorageProfile
:ivar provisioning_state: The provisioning state.
:vartype provisioning_state: str
:ivar hyper_v_generation: Specifies the HyperVGenerationType of the VirtualMachine created from
the image. From API Version 2019-03-01 if the image source is a blob, then we need the user to
specify the value, if the source is managed resource like disk or snapshot, we may require the
user to specify the property if we cannot deduce it from the source managed resource. Possible
values include: "V1", "V2".
:vartype hyper_v_generation: str or
~azure.mgmt.compute.v2022_03_01.models.HyperVGenerationTypes
"""
_validation = {
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'source_virtual_machine': {'key': 'properties.sourceVirtualMachine', 'type': 'SubResource'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'ImageStorageProfile'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'hyper_v_generation': {'key': 'properties.hyperVGeneration', 'type': 'str'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
source_virtual_machine: Optional["SubResource"] = None,
storage_profile: Optional["ImageStorageProfile"] = None,
hyper_v_generation: Optional[Union[str, "HyperVGenerationTypes"]] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword source_virtual_machine: The source virtual machine from which Image is created.
:paramtype source_virtual_machine: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword storage_profile: Specifies the storage settings for the virtual machine disks.
:paramtype storage_profile: ~azure.mgmt.compute.v2022_03_01.models.ImageStorageProfile
:keyword hyper_v_generation: Specifies the HyperVGenerationType of the VirtualMachine created
from the image. From API Version 2019-03-01 if the image source is a blob, then we need the
user to specify the value, if the source is managed resource like disk or snapshot, we may
require the user to specify the property if we cannot deduce it from the source managed
resource. Possible values include: "V1", "V2".
:paramtype hyper_v_generation: str or
~azure.mgmt.compute.v2022_03_01.models.HyperVGenerationTypes
"""
super(ImageUpdate, self).__init__(tags=tags, **kwargs)
self.source_virtual_machine = source_virtual_machine
self.storage_profile = storage_profile
self.provisioning_state = None
self.hyper_v_generation = hyper_v_generation
class InnerError(msrest.serialization.Model):
"""Inner error details.
:ivar exceptiontype: The exception type.
:vartype exceptiontype: str
:ivar errordetail: The internal error message or exception dump.
:vartype errordetail: str
"""
_attribute_map = {
'exceptiontype': {'key': 'exceptiontype', 'type': 'str'},
'errordetail': {'key': 'errordetail', 'type': 'str'},
}
def __init__(
self,
*,
exceptiontype: Optional[str] = None,
errordetail: Optional[str] = None,
**kwargs
):
"""
:keyword exceptiontype: The exception type.
:paramtype exceptiontype: str
:keyword errordetail: The internal error message or exception dump.
:paramtype errordetail: str
"""
super(InnerError, self).__init__(**kwargs)
self.exceptiontype = exceptiontype
self.errordetail = errordetail
class InstanceViewStatus(msrest.serialization.Model):
"""Instance view status.
:ivar code: The status code.
:vartype code: str
:ivar level: The level code. Possible values include: "Info", "Warning", "Error".
:vartype level: str or ~azure.mgmt.compute.v2022_03_01.models.StatusLevelTypes
:ivar display_status: The short localizable label for the status.
:vartype display_status: str
:ivar message: The detailed status message, including for alerts and error messages.
:vartype message: str
:ivar time: The time of the status.
:vartype time: ~datetime.datetime
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'level': {'key': 'level', 'type': 'str'},
'display_status': {'key': 'displayStatus', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'time': {'key': 'time', 'type': 'iso-8601'},
}
def __init__(
self,
*,
code: Optional[str] = None,
level: Optional[Union[str, "StatusLevelTypes"]] = None,
display_status: Optional[str] = None,
message: Optional[str] = None,
time: Optional[datetime.datetime] = None,
**kwargs
):
"""
:keyword code: The status code.
:paramtype code: str
:keyword level: The level code. Possible values include: "Info", "Warning", "Error".
:paramtype level: str or ~azure.mgmt.compute.v2022_03_01.models.StatusLevelTypes
:keyword display_status: The short localizable label for the status.
:paramtype display_status: str
:keyword message: The detailed status message, including for alerts and error messages.
:paramtype message: str
:keyword time: The time of the status.
:paramtype time: ~datetime.datetime
"""
super(InstanceViewStatus, self).__init__(**kwargs)
self.code = code
self.level = level
self.display_status = display_status
self.message = message
self.time = time
class KeyVaultKeyReference(msrest.serialization.Model):
"""Describes a reference to Key Vault Key.
All required parameters must be populated in order to send to Azure.
:ivar key_url: Required. The URL referencing a key encryption key in Key Vault.
:vartype key_url: str
:ivar source_vault: Required. The relative URL of the Key Vault containing the key.
:vartype source_vault: ~azure.mgmt.compute.v2022_03_01.models.SubResource
"""
_validation = {
'key_url': {'required': True},
'source_vault': {'required': True},
}
_attribute_map = {
'key_url': {'key': 'keyUrl', 'type': 'str'},
'source_vault': {'key': 'sourceVault', 'type': 'SubResource'},
}
def __init__(
self,
*,
key_url: str,
source_vault: "SubResource",
**kwargs
):
"""
:keyword key_url: Required. The URL referencing a key encryption key in Key Vault.
:paramtype key_url: str
:keyword source_vault: Required. The relative URL of the Key Vault containing the key.
:paramtype source_vault: ~azure.mgmt.compute.v2022_03_01.models.SubResource
"""
super(KeyVaultKeyReference, self).__init__(**kwargs)
self.key_url = key_url
self.source_vault = source_vault
class KeyVaultSecretReference(msrest.serialization.Model):
"""Describes a reference to Key Vault Secret.
All required parameters must be populated in order to send to Azure.
:ivar secret_url: Required. The URL referencing a secret in a Key Vault.
:vartype secret_url: str
:ivar source_vault: Required. The relative URL of the Key Vault containing the secret.
:vartype source_vault: ~azure.mgmt.compute.v2022_03_01.models.SubResource
"""
_validation = {
'secret_url': {'required': True},
'source_vault': {'required': True},
}
_attribute_map = {
'secret_url': {'key': 'secretUrl', 'type': 'str'},
'source_vault': {'key': 'sourceVault', 'type': 'SubResource'},
}
def __init__(
self,
*,
secret_url: str,
source_vault: "SubResource",
**kwargs
):
"""
:keyword secret_url: Required. The URL referencing a secret in a Key Vault.
:paramtype secret_url: str
:keyword source_vault: Required. The relative URL of the Key Vault containing the secret.
:paramtype source_vault: ~azure.mgmt.compute.v2022_03_01.models.SubResource
"""
super(KeyVaultSecretReference, self).__init__(**kwargs)
self.secret_url = secret_url
self.source_vault = source_vault
class LastPatchInstallationSummary(msrest.serialization.Model):
"""Describes the properties of the last installed patch summary.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar status: The overall success or failure status of the operation. It remains "InProgress"
until the operation completes. At that point it will become "Unknown", "Failed", "Succeeded",
or "CompletedWithWarnings.". Possible values include: "Unknown", "InProgress", "Failed",
"Succeeded", "CompletedWithWarnings".
:vartype status: str or ~azure.mgmt.compute.v2022_03_01.models.PatchOperationStatus
:ivar installation_activity_id: The activity ID of the operation that produced this result. It
is used to correlate across CRP and extension logs.
:vartype installation_activity_id: str
:ivar maintenance_window_exceeded: Describes whether the operation ran out of time before it
completed all its intended actions.
:vartype maintenance_window_exceeded: bool
:ivar not_selected_patch_count: The number of all available patches but not going to be
installed because it didn't match a classification or inclusion list entry.
:vartype not_selected_patch_count: int
:ivar excluded_patch_count: The number of all available patches but excluded explicitly by a
customer-specified exclusion list match.
:vartype excluded_patch_count: int
:ivar pending_patch_count: The number of all available patches expected to be installed over
the course of the patch installation operation.
:vartype pending_patch_count: int
:ivar installed_patch_count: The count of patches that successfully installed.
:vartype installed_patch_count: int
:ivar failed_patch_count: The count of patches that failed installation.
:vartype failed_patch_count: int
:ivar start_time: The UTC timestamp when the operation began.
:vartype start_time: ~datetime.datetime
:ivar last_modified_time: The UTC timestamp when the operation began.
:vartype last_modified_time: ~datetime.datetime
:ivar error: The errors that were encountered during execution of the operation. The details
array contains the list of them.
:vartype error: ~azure.mgmt.compute.v2022_03_01.models.ApiError
"""
_validation = {
'status': {'readonly': True},
'installation_activity_id': {'readonly': True},
'maintenance_window_exceeded': {'readonly': True},
'not_selected_patch_count': {'readonly': True},
'excluded_patch_count': {'readonly': True},
'pending_patch_count': {'readonly': True},
'installed_patch_count': {'readonly': True},
'failed_patch_count': {'readonly': True},
'start_time': {'readonly': True},
'last_modified_time': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'installation_activity_id': {'key': 'installationActivityId', 'type': 'str'},
'maintenance_window_exceeded': {'key': 'maintenanceWindowExceeded', 'type': 'bool'},
'not_selected_patch_count': {'key': 'notSelectedPatchCount', 'type': 'int'},
'excluded_patch_count': {'key': 'excludedPatchCount', 'type': 'int'},
'pending_patch_count': {'key': 'pendingPatchCount', 'type': 'int'},
'installed_patch_count': {'key': 'installedPatchCount', 'type': 'int'},
'failed_patch_count': {'key': 'failedPatchCount', 'type': 'int'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'last_modified_time': {'key': 'lastModifiedTime', 'type': 'iso-8601'},
'error': {'key': 'error', 'type': 'ApiError'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(LastPatchInstallationSummary, self).__init__(**kwargs)
self.status = None
self.installation_activity_id = None
self.maintenance_window_exceeded = None
self.not_selected_patch_count = None
self.excluded_patch_count = None
self.pending_patch_count = None
self.installed_patch_count = None
self.failed_patch_count = None
self.start_time = None
self.last_modified_time = None
self.error = None
class LinuxConfiguration(msrest.serialization.Model):
"""Specifies the Linux operating system settings on the virtual machine. :code:`<br>`:code:`<br>`For a list of supported Linux distributions, see `Linux on Azure-Endorsed Distributions <https://docs.microsoft.com/azure/virtual-machines/linux/endorsed-distros>`_.
:ivar disable_password_authentication: Specifies whether password authentication should be
disabled.
:vartype disable_password_authentication: bool
:ivar ssh: Specifies the ssh key configuration for a Linux OS.
:vartype ssh: ~azure.mgmt.compute.v2022_03_01.models.SshConfiguration
:ivar provision_vm_agent: Indicates whether virtual machine agent should be provisioned on the
virtual machine. :code:`<br>`:code:`<br>` When this property is not specified in the request
body, default behavior is to set it to true. This will ensure that VM Agent is installed on
the VM so that extensions can be added to the VM later.
:vartype provision_vm_agent: bool
:ivar patch_settings: [Preview Feature] Specifies settings related to VM Guest Patching on
Linux.
:vartype patch_settings: ~azure.mgmt.compute.v2022_03_01.models.LinuxPatchSettings
"""
_attribute_map = {
'disable_password_authentication': {'key': 'disablePasswordAuthentication', 'type': 'bool'},
'ssh': {'key': 'ssh', 'type': 'SshConfiguration'},
'provision_vm_agent': {'key': 'provisionVMAgent', 'type': 'bool'},
'patch_settings': {'key': 'patchSettings', 'type': 'LinuxPatchSettings'},
}
def __init__(
self,
*,
disable_password_authentication: Optional[bool] = None,
ssh: Optional["SshConfiguration"] = None,
provision_vm_agent: Optional[bool] = None,
patch_settings: Optional["LinuxPatchSettings"] = None,
**kwargs
):
"""
:keyword disable_password_authentication: Specifies whether password authentication should be
disabled.
:paramtype disable_password_authentication: bool
:keyword ssh: Specifies the ssh key configuration for a Linux OS.
:paramtype ssh: ~azure.mgmt.compute.v2022_03_01.models.SshConfiguration
:keyword provision_vm_agent: Indicates whether virtual machine agent should be provisioned on
the virtual machine. :code:`<br>`:code:`<br>` When this property is not specified in the
request body, default behavior is to set it to true. This will ensure that VM Agent is
installed on the VM so that extensions can be added to the VM later.
:paramtype provision_vm_agent: bool
:keyword patch_settings: [Preview Feature] Specifies settings related to VM Guest Patching on
Linux.
:paramtype patch_settings: ~azure.mgmt.compute.v2022_03_01.models.LinuxPatchSettings
"""
super(LinuxConfiguration, self).__init__(**kwargs)
self.disable_password_authentication = disable_password_authentication
self.ssh = ssh
self.provision_vm_agent = provision_vm_agent
self.patch_settings = patch_settings
class LinuxParameters(msrest.serialization.Model):
"""Input for InstallPatches on a Linux VM, as directly received by the API.
:ivar classifications_to_include: The update classifications to select when installing patches
for Linux.
:vartype classifications_to_include: list[str or
~azure.mgmt.compute.v2022_03_01.models.VMGuestPatchClassificationLinux]
:ivar package_name_masks_to_include: packages to include in the patch operation. Format:
packageName_packageVersion.
:vartype package_name_masks_to_include: list[str]
:ivar package_name_masks_to_exclude: packages to exclude in the patch operation. Format:
packageName_packageVersion.
:vartype package_name_masks_to_exclude: list[str]
:ivar maintenance_run_id: This is used as a maintenance run identifier for Auto VM Guest
Patching in Linux.
:vartype maintenance_run_id: str
"""
_attribute_map = {
'classifications_to_include': {'key': 'classificationsToInclude', 'type': '[str]'},
'package_name_masks_to_include': {'key': 'packageNameMasksToInclude', 'type': '[str]'},
'package_name_masks_to_exclude': {'key': 'packageNameMasksToExclude', 'type': '[str]'},
'maintenance_run_id': {'key': 'maintenanceRunId', 'type': 'str'},
}
def __init__(
self,
*,
classifications_to_include: Optional[List[Union[str, "VMGuestPatchClassificationLinux"]]] = None,
package_name_masks_to_include: Optional[List[str]] = None,
package_name_masks_to_exclude: Optional[List[str]] = None,
maintenance_run_id: Optional[str] = None,
**kwargs
):
"""
:keyword classifications_to_include: The update classifications to select when installing
patches for Linux.
:paramtype classifications_to_include: list[str or
~azure.mgmt.compute.v2022_03_01.models.VMGuestPatchClassificationLinux]
:keyword package_name_masks_to_include: packages to include in the patch operation. Format:
packageName_packageVersion.
:paramtype package_name_masks_to_include: list[str]
:keyword package_name_masks_to_exclude: packages to exclude in the patch operation. Format:
packageName_packageVersion.
:paramtype package_name_masks_to_exclude: list[str]
:keyword maintenance_run_id: This is used as a maintenance run identifier for Auto VM Guest
Patching in Linux.
:paramtype maintenance_run_id: str
"""
super(LinuxParameters, self).__init__(**kwargs)
self.classifications_to_include = classifications_to_include
self.package_name_masks_to_include = package_name_masks_to_include
self.package_name_masks_to_exclude = package_name_masks_to_exclude
self.maintenance_run_id = maintenance_run_id
class LinuxPatchSettings(msrest.serialization.Model):
"""Specifies settings related to VM Guest Patching on Linux.
:ivar patch_mode: Specifies the mode of VM Guest Patching to IaaS virtual machine or virtual
machines associated to virtual machine scale set with OrchestrationMode as Flexible.:code:`<br
/>`:code:`<br />` Possible values are::code:`<br />`:code:`<br />` **ImageDefault** - The
virtual machine's default patching configuration is used. :code:`<br />`:code:`<br />`
**AutomaticByPlatform** - The virtual machine will be automatically updated by the platform.
The property provisionVMAgent must be true. Possible values include: "ImageDefault",
"AutomaticByPlatform".
:vartype patch_mode: str or ~azure.mgmt.compute.v2022_03_01.models.LinuxVMGuestPatchMode
:ivar assessment_mode: Specifies the mode of VM Guest Patch Assessment for the IaaS virtual
machine.:code:`<br />`:code:`<br />` Possible values are::code:`<br />`:code:`<br />`
**ImageDefault** - You control the timing of patch assessments on a virtual machine. :code:`<br
/>`:code:`<br />` **AutomaticByPlatform** - The platform will trigger periodic patch
assessments. The property provisionVMAgent must be true. Possible values include:
"ImageDefault", "AutomaticByPlatform".
:vartype assessment_mode: str or
~azure.mgmt.compute.v2022_03_01.models.LinuxPatchAssessmentMode
:ivar automatic_by_platform_settings: Specifies additional settings for patch mode
AutomaticByPlatform in VM Guest Patching on Linux.
:vartype automatic_by_platform_settings:
~azure.mgmt.compute.v2022_03_01.models.LinuxVMGuestPatchAutomaticByPlatformSettings
"""
_attribute_map = {
'patch_mode': {'key': 'patchMode', 'type': 'str'},
'assessment_mode': {'key': 'assessmentMode', 'type': 'str'},
'automatic_by_platform_settings': {'key': 'automaticByPlatformSettings', 'type': 'LinuxVMGuestPatchAutomaticByPlatformSettings'},
}
def __init__(
self,
*,
patch_mode: Optional[Union[str, "LinuxVMGuestPatchMode"]] = None,
assessment_mode: Optional[Union[str, "LinuxPatchAssessmentMode"]] = None,
automatic_by_platform_settings: Optional["LinuxVMGuestPatchAutomaticByPlatformSettings"] = None,
**kwargs
):
"""
:keyword patch_mode: Specifies the mode of VM Guest Patching to IaaS virtual machine or virtual
machines associated to virtual machine scale set with OrchestrationMode as Flexible.:code:`<br
/>`:code:`<br />` Possible values are::code:`<br />`:code:`<br />` **ImageDefault** - The
virtual machine's default patching configuration is used. :code:`<br />`:code:`<br />`
**AutomaticByPlatform** - The virtual machine will be automatically updated by the platform.
The property provisionVMAgent must be true. Possible values include: "ImageDefault",
"AutomaticByPlatform".
:paramtype patch_mode: str or ~azure.mgmt.compute.v2022_03_01.models.LinuxVMGuestPatchMode
:keyword assessment_mode: Specifies the mode of VM Guest Patch Assessment for the IaaS virtual
machine.:code:`<br />`:code:`<br />` Possible values are::code:`<br />`:code:`<br />`
**ImageDefault** - You control the timing of patch assessments on a virtual machine. :code:`<br
/>`:code:`<br />` **AutomaticByPlatform** - The platform will trigger periodic patch
assessments. The property provisionVMAgent must be true. Possible values include:
"ImageDefault", "AutomaticByPlatform".
:paramtype assessment_mode: str or
~azure.mgmt.compute.v2022_03_01.models.LinuxPatchAssessmentMode
:keyword automatic_by_platform_settings: Specifies additional settings for patch mode
AutomaticByPlatform in VM Guest Patching on Linux.
:paramtype automatic_by_platform_settings:
~azure.mgmt.compute.v2022_03_01.models.LinuxVMGuestPatchAutomaticByPlatformSettings
"""
super(LinuxPatchSettings, self).__init__(**kwargs)
self.patch_mode = patch_mode
self.assessment_mode = assessment_mode
self.automatic_by_platform_settings = automatic_by_platform_settings
class LinuxVMGuestPatchAutomaticByPlatformSettings(msrest.serialization.Model):
"""Specifies additional settings to be applied when patch mode AutomaticByPlatform is selected in Linux patch settings.
:ivar reboot_setting: Specifies the reboot setting for all AutomaticByPlatform patch
installation operations. Possible values include: "Unknown", "IfRequired", "Never", "Always".
:vartype reboot_setting: str or
~azure.mgmt.compute.v2022_03_01.models.LinuxVMGuestPatchAutomaticByPlatformRebootSetting
"""
_attribute_map = {
'reboot_setting': {'key': 'rebootSetting', 'type': 'str'},
}
def __init__(
self,
*,
reboot_setting: Optional[Union[str, "LinuxVMGuestPatchAutomaticByPlatformRebootSetting"]] = None,
**kwargs
):
"""
:keyword reboot_setting: Specifies the reboot setting for all AutomaticByPlatform patch
installation operations. Possible values include: "Unknown", "IfRequired", "Never", "Always".
:paramtype reboot_setting: str or
~azure.mgmt.compute.v2022_03_01.models.LinuxVMGuestPatchAutomaticByPlatformRebootSetting
"""
super(LinuxVMGuestPatchAutomaticByPlatformSettings, self).__init__(**kwargs)
self.reboot_setting = reboot_setting
class ListUsagesResult(msrest.serialization.Model):
"""The List Usages operation response.
All required parameters must be populated in order to send to Azure.
:ivar value: Required. The list of compute resource usages.
:vartype value: list[~azure.mgmt.compute.v2022_03_01.models.Usage]
:ivar next_link: The URI to fetch the next page of compute resource usage information. Call
ListNext() with this to fetch the next page of compute resource usage information.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Usage]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Usage"],
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Required. The list of compute resource usages.
:paramtype value: list[~azure.mgmt.compute.v2022_03_01.models.Usage]
:keyword next_link: The URI to fetch the next page of compute resource usage information. Call
ListNext() with this to fetch the next page of compute resource usage information.
:paramtype next_link: str
"""
super(ListUsagesResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class LogAnalyticsInputBase(msrest.serialization.Model):
"""Api input base class for LogAnalytics Api.
All required parameters must be populated in order to send to Azure.
:ivar blob_container_sas_uri: Required. SAS Uri of the logging blob container to which
LogAnalytics Api writes output logs to.
:vartype blob_container_sas_uri: str
:ivar from_time: Required. From time of the query.
:vartype from_time: ~datetime.datetime
:ivar to_time: Required. To time of the query.
:vartype to_time: ~datetime.datetime
:ivar group_by_throttle_policy: Group query result by Throttle Policy applied.
:vartype group_by_throttle_policy: bool
:ivar group_by_operation_name: Group query result by Operation Name.
:vartype group_by_operation_name: bool
:ivar group_by_resource_name: Group query result by Resource Name.
:vartype group_by_resource_name: bool
:ivar group_by_client_application_id: Group query result by Client Application ID.
:vartype group_by_client_application_id: bool
:ivar group_by_user_agent: Group query result by User Agent.
:vartype group_by_user_agent: bool
"""
_validation = {
'blob_container_sas_uri': {'required': True},
'from_time': {'required': True},
'to_time': {'required': True},
}
_attribute_map = {
'blob_container_sas_uri': {'key': 'blobContainerSasUri', 'type': 'str'},
'from_time': {'key': 'fromTime', 'type': 'iso-8601'},
'to_time': {'key': 'toTime', 'type': 'iso-8601'},
'group_by_throttle_policy': {'key': 'groupByThrottlePolicy', 'type': 'bool'},
'group_by_operation_name': {'key': 'groupByOperationName', 'type': 'bool'},
'group_by_resource_name': {'key': 'groupByResourceName', 'type': 'bool'},
'group_by_client_application_id': {'key': 'groupByClientApplicationId', 'type': 'bool'},
'group_by_user_agent': {'key': 'groupByUserAgent', 'type': 'bool'},
}
def __init__(
self,
*,
blob_container_sas_uri: str,
from_time: datetime.datetime,
to_time: datetime.datetime,
group_by_throttle_policy: Optional[bool] = None,
group_by_operation_name: Optional[bool] = None,
group_by_resource_name: Optional[bool] = None,
group_by_client_application_id: Optional[bool] = None,
group_by_user_agent: Optional[bool] = None,
**kwargs
):
"""
:keyword blob_container_sas_uri: Required. SAS Uri of the logging blob container to which
LogAnalytics Api writes output logs to.
:paramtype blob_container_sas_uri: str
:keyword from_time: Required. From time of the query.
:paramtype from_time: ~datetime.datetime
:keyword to_time: Required. To time of the query.
:paramtype to_time: ~datetime.datetime
:keyword group_by_throttle_policy: Group query result by Throttle Policy applied.
:paramtype group_by_throttle_policy: bool
:keyword group_by_operation_name: Group query result by Operation Name.
:paramtype group_by_operation_name: bool
:keyword group_by_resource_name: Group query result by Resource Name.
:paramtype group_by_resource_name: bool
:keyword group_by_client_application_id: Group query result by Client Application ID.
:paramtype group_by_client_application_id: bool
:keyword group_by_user_agent: Group query result by User Agent.
:paramtype group_by_user_agent: bool
"""
super(LogAnalyticsInputBase, self).__init__(**kwargs)
self.blob_container_sas_uri = blob_container_sas_uri
self.from_time = from_time
self.to_time = to_time
self.group_by_throttle_policy = group_by_throttle_policy
self.group_by_operation_name = group_by_operation_name
self.group_by_resource_name = group_by_resource_name
self.group_by_client_application_id = group_by_client_application_id
self.group_by_user_agent = group_by_user_agent
class LogAnalyticsOperationResult(msrest.serialization.Model):
"""LogAnalytics operation status response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar properties: LogAnalyticsOutput.
:vartype properties: ~azure.mgmt.compute.v2022_03_01.models.LogAnalyticsOutput
"""
_validation = {
'properties': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': 'LogAnalyticsOutput'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(LogAnalyticsOperationResult, self).__init__(**kwargs)
self.properties = None
class LogAnalyticsOutput(msrest.serialization.Model):
"""LogAnalytics output properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar output: Output file Uri path to blob container.
:vartype output: str
"""
_validation = {
'output': {'readonly': True},
}
_attribute_map = {
'output': {'key': 'output', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(LogAnalyticsOutput, self).__init__(**kwargs)
self.output = None
class MaintenanceRedeployStatus(msrest.serialization.Model):
"""Maintenance Operation Status.
:ivar is_customer_initiated_maintenance_allowed: True, if customer is allowed to perform
Maintenance.
:vartype is_customer_initiated_maintenance_allowed: bool
:ivar pre_maintenance_window_start_time: Start Time for the Pre Maintenance Window.
:vartype pre_maintenance_window_start_time: ~datetime.datetime
:ivar pre_maintenance_window_end_time: End Time for the Pre Maintenance Window.
:vartype pre_maintenance_window_end_time: ~datetime.datetime
:ivar maintenance_window_start_time: Start Time for the Maintenance Window.
:vartype maintenance_window_start_time: ~datetime.datetime
:ivar maintenance_window_end_time: End Time for the Maintenance Window.
:vartype maintenance_window_end_time: ~datetime.datetime
:ivar last_operation_result_code: The Last Maintenance Operation Result Code. Possible values
include: "None", "RetryLater", "MaintenanceAborted", "MaintenanceCompleted".
:vartype last_operation_result_code: str or
~azure.mgmt.compute.v2022_03_01.models.MaintenanceOperationResultCodeTypes
:ivar last_operation_message: Message returned for the last Maintenance Operation.
:vartype last_operation_message: str
"""
_attribute_map = {
'is_customer_initiated_maintenance_allowed': {'key': 'isCustomerInitiatedMaintenanceAllowed', 'type': 'bool'},
'pre_maintenance_window_start_time': {'key': 'preMaintenanceWindowStartTime', 'type': 'iso-8601'},
'pre_maintenance_window_end_time': {'key': 'preMaintenanceWindowEndTime', 'type': 'iso-8601'},
'maintenance_window_start_time': {'key': 'maintenanceWindowStartTime', 'type': 'iso-8601'},
'maintenance_window_end_time': {'key': 'maintenanceWindowEndTime', 'type': 'iso-8601'},
'last_operation_result_code': {'key': 'lastOperationResultCode', 'type': 'str'},
'last_operation_message': {'key': 'lastOperationMessage', 'type': 'str'},
}
def __init__(
self,
*,
is_customer_initiated_maintenance_allowed: Optional[bool] = None,
pre_maintenance_window_start_time: Optional[datetime.datetime] = None,
pre_maintenance_window_end_time: Optional[datetime.datetime] = None,
maintenance_window_start_time: Optional[datetime.datetime] = None,
maintenance_window_end_time: Optional[datetime.datetime] = None,
last_operation_result_code: Optional[Union[str, "MaintenanceOperationResultCodeTypes"]] = None,
last_operation_message: Optional[str] = None,
**kwargs
):
"""
:keyword is_customer_initiated_maintenance_allowed: True, if customer is allowed to perform
Maintenance.
:paramtype is_customer_initiated_maintenance_allowed: bool
:keyword pre_maintenance_window_start_time: Start Time for the Pre Maintenance Window.
:paramtype pre_maintenance_window_start_time: ~datetime.datetime
:keyword pre_maintenance_window_end_time: End Time for the Pre Maintenance Window.
:paramtype pre_maintenance_window_end_time: ~datetime.datetime
:keyword maintenance_window_start_time: Start Time for the Maintenance Window.
:paramtype maintenance_window_start_time: ~datetime.datetime
:keyword maintenance_window_end_time: End Time for the Maintenance Window.
:paramtype maintenance_window_end_time: ~datetime.datetime
:keyword last_operation_result_code: The Last Maintenance Operation Result Code. Possible
values include: "None", "RetryLater", "MaintenanceAborted", "MaintenanceCompleted".
:paramtype last_operation_result_code: str or
~azure.mgmt.compute.v2022_03_01.models.MaintenanceOperationResultCodeTypes
:keyword last_operation_message: Message returned for the last Maintenance Operation.
:paramtype last_operation_message: str
"""
super(MaintenanceRedeployStatus, self).__init__(**kwargs)
self.is_customer_initiated_maintenance_allowed = is_customer_initiated_maintenance_allowed
self.pre_maintenance_window_start_time = pre_maintenance_window_start_time
self.pre_maintenance_window_end_time = pre_maintenance_window_end_time
self.maintenance_window_start_time = maintenance_window_start_time
self.maintenance_window_end_time = maintenance_window_end_time
self.last_operation_result_code = last_operation_result_code
self.last_operation_message = last_operation_message
class ManagedDiskParameters(SubResource):
"""The parameters of a managed disk.
:ivar id: Resource Id.
:vartype id: str
:ivar storage_account_type: Specifies the storage account type for the managed disk. NOTE:
UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values
include: "Standard_LRS", "Premium_LRS", "StandardSSD_LRS", "UltraSSD_LRS", "Premium_ZRS",
"StandardSSD_ZRS", "PremiumV2_LRS".
:vartype storage_account_type: str or
~azure.mgmt.compute.v2022_03_01.models.StorageAccountTypes
:ivar disk_encryption_set: Specifies the customer managed disk encryption set resource id for
the managed disk.
:vartype disk_encryption_set:
~azure.mgmt.compute.v2022_03_01.models.DiskEncryptionSetParameters
:ivar security_profile: Specifies the security profile for the managed disk.
:vartype security_profile: ~azure.mgmt.compute.v2022_03_01.models.VMDiskSecurityProfile
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
'disk_encryption_set': {'key': 'diskEncryptionSet', 'type': 'DiskEncryptionSetParameters'},
'security_profile': {'key': 'securityProfile', 'type': 'VMDiskSecurityProfile'},
}
def __init__(
self,
*,
id: Optional[str] = None,
storage_account_type: Optional[Union[str, "StorageAccountTypes"]] = None,
disk_encryption_set: Optional["DiskEncryptionSetParameters"] = None,
security_profile: Optional["VMDiskSecurityProfile"] = None,
**kwargs
):
"""
:keyword id: Resource Id.
:paramtype id: str
:keyword storage_account_type: Specifies the storage account type for the managed disk. NOTE:
UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values
include: "Standard_LRS", "Premium_LRS", "StandardSSD_LRS", "UltraSSD_LRS", "Premium_ZRS",
"StandardSSD_ZRS", "PremiumV2_LRS".
:paramtype storage_account_type: str or
~azure.mgmt.compute.v2022_03_01.models.StorageAccountTypes
:keyword disk_encryption_set: Specifies the customer managed disk encryption set resource id
for the managed disk.
:paramtype disk_encryption_set:
~azure.mgmt.compute.v2022_03_01.models.DiskEncryptionSetParameters
:keyword security_profile: Specifies the security profile for the managed disk.
:paramtype security_profile: ~azure.mgmt.compute.v2022_03_01.models.VMDiskSecurityProfile
"""
super(ManagedDiskParameters, self).__init__(id=id, **kwargs)
self.storage_account_type = storage_account_type
self.disk_encryption_set = disk_encryption_set
self.security_profile = security_profile
class NetworkInterfaceReference(SubResource):
"""Describes a network interface reference.
:ivar id: Resource Id.
:vartype id: str
:ivar primary: Specifies the primary network interface in case the virtual machine has more
than 1 network interface.
:vartype primary: bool
:ivar delete_option: Specify what happens to the network interface when the VM is deleted.
Possible values include: "Delete", "Detach".
:vartype delete_option: str or ~azure.mgmt.compute.v2022_03_01.models.DeleteOptions
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'delete_option': {'key': 'properties.deleteOption', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
primary: Optional[bool] = None,
delete_option: Optional[Union[str, "DeleteOptions"]] = None,
**kwargs
):
"""
:keyword id: Resource Id.
:paramtype id: str
:keyword primary: Specifies the primary network interface in case the virtual machine has more
than 1 network interface.
:paramtype primary: bool
:keyword delete_option: Specify what happens to the network interface when the VM is deleted.
Possible values include: "Delete", "Detach".
:paramtype delete_option: str or ~azure.mgmt.compute.v2022_03_01.models.DeleteOptions
"""
super(NetworkInterfaceReference, self).__init__(id=id, **kwargs)
self.primary = primary
self.delete_option = delete_option
class NetworkProfile(msrest.serialization.Model):
"""Specifies the network interfaces or the networking configuration of the virtual machine.
:ivar network_interfaces: Specifies the list of resource Ids for the network interfaces
associated with the virtual machine.
:vartype network_interfaces:
list[~azure.mgmt.compute.v2022_03_01.models.NetworkInterfaceReference]
:ivar network_api_version: specifies the Microsoft.Network API version used when creating
networking resources in the Network Interface Configurations. Possible values include:
"2020-11-01".
:vartype network_api_version: str or ~azure.mgmt.compute.v2022_03_01.models.NetworkApiVersion
:ivar network_interface_configurations: Specifies the networking configurations that will be
used to create the virtual machine networking resources.
:vartype network_interface_configurations:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineNetworkInterfaceConfiguration]
"""
_attribute_map = {
'network_interfaces': {'key': 'networkInterfaces', 'type': '[NetworkInterfaceReference]'},
'network_api_version': {'key': 'networkApiVersion', 'type': 'str'},
'network_interface_configurations': {'key': 'networkInterfaceConfigurations', 'type': '[VirtualMachineNetworkInterfaceConfiguration]'},
}
def __init__(
self,
*,
network_interfaces: Optional[List["NetworkInterfaceReference"]] = None,
network_api_version: Optional[Union[str, "NetworkApiVersion"]] = None,
network_interface_configurations: Optional[List["VirtualMachineNetworkInterfaceConfiguration"]] = None,
**kwargs
):
"""
:keyword network_interfaces: Specifies the list of resource Ids for the network interfaces
associated with the virtual machine.
:paramtype network_interfaces:
list[~azure.mgmt.compute.v2022_03_01.models.NetworkInterfaceReference]
:keyword network_api_version: specifies the Microsoft.Network API version used when creating
networking resources in the Network Interface Configurations. Possible values include:
"2020-11-01".
:paramtype network_api_version: str or ~azure.mgmt.compute.v2022_03_01.models.NetworkApiVersion
:keyword network_interface_configurations: Specifies the networking configurations that will be
used to create the virtual machine networking resources.
:paramtype network_interface_configurations:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineNetworkInterfaceConfiguration]
"""
super(NetworkProfile, self).__init__(**kwargs)
self.network_interfaces = network_interfaces
self.network_api_version = network_api_version
self.network_interface_configurations = network_interface_configurations
class OrchestrationServiceStateInput(msrest.serialization.Model):
"""The input for OrchestrationServiceState.
All required parameters must be populated in order to send to Azure.
:ivar service_name: Required. The name of the service. Possible values include:
"AutomaticRepairs", "DummyOrchestrationServiceName".
:vartype service_name: str or ~azure.mgmt.compute.v2022_03_01.models.OrchestrationServiceNames
:ivar action: Required. The action to be performed. Possible values include: "Resume",
"Suspend".
:vartype action: str or ~azure.mgmt.compute.v2022_03_01.models.OrchestrationServiceStateAction
"""
_validation = {
'service_name': {'required': True},
'action': {'required': True},
}
_attribute_map = {
'service_name': {'key': 'serviceName', 'type': 'str'},
'action': {'key': 'action', 'type': 'str'},
}
def __init__(
self,
*,
service_name: Union[str, "OrchestrationServiceNames"],
action: Union[str, "OrchestrationServiceStateAction"],
**kwargs
):
"""
:keyword service_name: Required. The name of the service. Possible values include:
"AutomaticRepairs", "DummyOrchestrationServiceName".
:paramtype service_name: str or
~azure.mgmt.compute.v2022_03_01.models.OrchestrationServiceNames
:keyword action: Required. The action to be performed. Possible values include: "Resume",
"Suspend".
:paramtype action: str or
~azure.mgmt.compute.v2022_03_01.models.OrchestrationServiceStateAction
"""
super(OrchestrationServiceStateInput, self).__init__(**kwargs)
self.service_name = service_name
self.action = action
class OrchestrationServiceSummary(msrest.serialization.Model):
"""Summary for an orchestration service of a virtual machine scale set.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar service_name: The name of the service. Possible values include: "AutomaticRepairs",
"DummyOrchestrationServiceName".
:vartype service_name: str or ~azure.mgmt.compute.v2022_03_01.models.OrchestrationServiceNames
:ivar service_state: The current state of the service. Possible values include: "NotRunning",
"Running", "Suspended".
:vartype service_state: str or ~azure.mgmt.compute.v2022_03_01.models.OrchestrationServiceState
"""
_validation = {
'service_name': {'readonly': True},
'service_state': {'readonly': True},
}
_attribute_map = {
'service_name': {'key': 'serviceName', 'type': 'str'},
'service_state': {'key': 'serviceState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(OrchestrationServiceSummary, self).__init__(**kwargs)
self.service_name = None
self.service_state = None
class OSDisk(msrest.serialization.Model):
"""Specifies information about the operating system disk used by the virtual machine. :code:`<br>`:code:`<br>` For more information about disks, see `About disks and VHDs for Azure virtual machines <https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview>`_.
All required parameters must be populated in order to send to Azure.
:ivar os_type: This property allows you to specify the type of the OS that is included in the
disk if creating a VM from user-image or a specialized VHD. :code:`<br>`:code:`<br>` Possible
values are: :code:`<br>`:code:`<br>` **Windows** :code:`<br>`:code:`<br>` **Linux**. Possible
values include: "Windows", "Linux".
:vartype os_type: str or ~azure.mgmt.compute.v2022_03_01.models.OperatingSystemTypes
:ivar encryption_settings: Specifies the encryption settings for the OS Disk.
:code:`<br>`:code:`<br>` Minimum api-version: 2015-06-15.
:vartype encryption_settings: ~azure.mgmt.compute.v2022_03_01.models.DiskEncryptionSettings
:ivar name: The disk name.
:vartype name: str
:ivar vhd: The virtual hard disk.
:vartype vhd: ~azure.mgmt.compute.v2022_03_01.models.VirtualHardDisk
:ivar image: The source user image virtual hard disk. The virtual hard disk will be copied
before being attached to the virtual machine. If SourceImage is provided, the destination
virtual hard drive must not exist.
:vartype image: ~azure.mgmt.compute.v2022_03_01.models.VirtualHardDisk
:ivar caching: Specifies the caching requirements. :code:`<br>`:code:`<br>` Possible values
are: :code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **ReadOnly**
:code:`<br>`:code:`<br>` **ReadWrite** :code:`<br>`:code:`<br>` Default: **None** for Standard
storage. **ReadOnly** for Premium storage. Possible values include: "None", "ReadOnly",
"ReadWrite".
:vartype caching: str or ~azure.mgmt.compute.v2022_03_01.models.CachingTypes
:ivar write_accelerator_enabled: Specifies whether writeAccelerator should be enabled or
disabled on the disk.
:vartype write_accelerator_enabled: bool
:ivar diff_disk_settings: Specifies the ephemeral Disk Settings for the operating system disk
used by the virtual machine.
:vartype diff_disk_settings: ~azure.mgmt.compute.v2022_03_01.models.DiffDiskSettings
:ivar create_option: Required. Specifies how the virtual machine should be
created.:code:`<br>`:code:`<br>` Possible values are::code:`<br>`:code:`<br>` **Attach** \u2013
This value is used when you are using a specialized disk to create the virtual
machine.:code:`<br>`:code:`<br>` **FromImage** \u2013 This value is used when you are using an
image to create the virtual machine. If you are using a platform image, you also use the
imageReference element described above. If you are using a marketplace image, you also use the
plan element previously described. Possible values include: "FromImage", "Empty", "Attach".
:vartype create_option: str or ~azure.mgmt.compute.v2022_03_01.models.DiskCreateOptionTypes
:ivar disk_size_gb: Specifies the size of an empty data disk in gigabytes. This element can be
used to overwrite the size of the disk in a virtual machine image. :code:`<br>`:code:`<br>`
This value cannot be larger than 1023 GB.
:vartype disk_size_gb: int
:ivar managed_disk: The managed disk parameters.
:vartype managed_disk: ~azure.mgmt.compute.v2022_03_01.models.ManagedDiskParameters
:ivar delete_option: Specifies whether OS Disk should be deleted or detached upon VM deletion.
:code:`<br>`:code:`<br>` Possible values: :code:`<br>`:code:`<br>` **Delete** If this value is
used, the OS disk is deleted when VM is deleted.:code:`<br>`:code:`<br>` **Detach** If this
value is used, the os disk is retained after VM is deleted. :code:`<br>`:code:`<br>` The
default value is set to **detach**. For an ephemeral OS Disk, the default value is set to
**Delete**. User cannot change the delete option for ephemeral OS Disk. Possible values
include: "Delete", "Detach".
:vartype delete_option: str or ~azure.mgmt.compute.v2022_03_01.models.DiskDeleteOptionTypes
"""
_validation = {
'create_option': {'required': True},
}
_attribute_map = {
'os_type': {'key': 'osType', 'type': 'str'},
'encryption_settings': {'key': 'encryptionSettings', 'type': 'DiskEncryptionSettings'},
'name': {'key': 'name', 'type': 'str'},
'vhd': {'key': 'vhd', 'type': 'VirtualHardDisk'},
'image': {'key': 'image', 'type': 'VirtualHardDisk'},
'caching': {'key': 'caching', 'type': 'str'},
'write_accelerator_enabled': {'key': 'writeAcceleratorEnabled', 'type': 'bool'},
'diff_disk_settings': {'key': 'diffDiskSettings', 'type': 'DiffDiskSettings'},
'create_option': {'key': 'createOption', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'managed_disk': {'key': 'managedDisk', 'type': 'ManagedDiskParameters'},
'delete_option': {'key': 'deleteOption', 'type': 'str'},
}
def __init__(
self,
*,
create_option: Union[str, "DiskCreateOptionTypes"],
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
encryption_settings: Optional["DiskEncryptionSettings"] = None,
name: Optional[str] = None,
vhd: Optional["VirtualHardDisk"] = None,
image: Optional["VirtualHardDisk"] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
write_accelerator_enabled: Optional[bool] = None,
diff_disk_settings: Optional["DiffDiskSettings"] = None,
disk_size_gb: Optional[int] = None,
managed_disk: Optional["ManagedDiskParameters"] = None,
delete_option: Optional[Union[str, "DiskDeleteOptionTypes"]] = None,
**kwargs
):
"""
:keyword os_type: This property allows you to specify the type of the OS that is included in
the disk if creating a VM from user-image or a specialized VHD. :code:`<br>`:code:`<br>`
Possible values are: :code:`<br>`:code:`<br>` **Windows** :code:`<br>`:code:`<br>` **Linux**.
Possible values include: "Windows", "Linux".
:paramtype os_type: str or ~azure.mgmt.compute.v2022_03_01.models.OperatingSystemTypes
:keyword encryption_settings: Specifies the encryption settings for the OS Disk.
:code:`<br>`:code:`<br>` Minimum api-version: 2015-06-15.
:paramtype encryption_settings: ~azure.mgmt.compute.v2022_03_01.models.DiskEncryptionSettings
:keyword name: The disk name.
:paramtype name: str
:keyword vhd: The virtual hard disk.
:paramtype vhd: ~azure.mgmt.compute.v2022_03_01.models.VirtualHardDisk
:keyword image: The source user image virtual hard disk. The virtual hard disk will be copied
before being attached to the virtual machine. If SourceImage is provided, the destination
virtual hard drive must not exist.
:paramtype image: ~azure.mgmt.compute.v2022_03_01.models.VirtualHardDisk
:keyword caching: Specifies the caching requirements. :code:`<br>`:code:`<br>` Possible values
are: :code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **ReadOnly**
:code:`<br>`:code:`<br>` **ReadWrite** :code:`<br>`:code:`<br>` Default: **None** for Standard
storage. **ReadOnly** for Premium storage. Possible values include: "None", "ReadOnly",
"ReadWrite".
:paramtype caching: str or ~azure.mgmt.compute.v2022_03_01.models.CachingTypes
:keyword write_accelerator_enabled: Specifies whether writeAccelerator should be enabled or
disabled on the disk.
:paramtype write_accelerator_enabled: bool
:keyword diff_disk_settings: Specifies the ephemeral Disk Settings for the operating system
disk used by the virtual machine.
:paramtype diff_disk_settings: ~azure.mgmt.compute.v2022_03_01.models.DiffDiskSettings
:keyword create_option: Required. Specifies how the virtual machine should be
created.:code:`<br>`:code:`<br>` Possible values are::code:`<br>`:code:`<br>` **Attach** \u2013
This value is used when you are using a specialized disk to create the virtual
machine.:code:`<br>`:code:`<br>` **FromImage** \u2013 This value is used when you are using an
image to create the virtual machine. If you are using a platform image, you also use the
imageReference element described above. If you are using a marketplace image, you also use the
plan element previously described. Possible values include: "FromImage", "Empty", "Attach".
:paramtype create_option: str or ~azure.mgmt.compute.v2022_03_01.models.DiskCreateOptionTypes
:keyword disk_size_gb: Specifies the size of an empty data disk in gigabytes. This element can
be used to overwrite the size of the disk in a virtual machine image. :code:`<br>`:code:`<br>`
This value cannot be larger than 1023 GB.
:paramtype disk_size_gb: int
:keyword managed_disk: The managed disk parameters.
:paramtype managed_disk: ~azure.mgmt.compute.v2022_03_01.models.ManagedDiskParameters
:keyword delete_option: Specifies whether OS Disk should be deleted or detached upon VM
deletion. :code:`<br>`:code:`<br>` Possible values: :code:`<br>`:code:`<br>` **Delete** If this
value is used, the OS disk is deleted when VM is deleted.:code:`<br>`:code:`<br>` **Detach** If
this value is used, the os disk is retained after VM is deleted. :code:`<br>`:code:`<br>` The
default value is set to **detach**. For an ephemeral OS Disk, the default value is set to
**Delete**. User cannot change the delete option for ephemeral OS Disk. Possible values
include: "Delete", "Detach".
:paramtype delete_option: str or ~azure.mgmt.compute.v2022_03_01.models.DiskDeleteOptionTypes
"""
super(OSDisk, self).__init__(**kwargs)
self.os_type = os_type
self.encryption_settings = encryption_settings
self.name = name
self.vhd = vhd
self.image = image
self.caching = caching
self.write_accelerator_enabled = write_accelerator_enabled
self.diff_disk_settings = diff_disk_settings
self.create_option = create_option
self.disk_size_gb = disk_size_gb
self.managed_disk = managed_disk
self.delete_option = delete_option
class OSDiskImage(msrest.serialization.Model):
"""Contains the os disk image information.
All required parameters must be populated in order to send to Azure.
:ivar operating_system: Required. The operating system of the osDiskImage. Possible values
include: "Windows", "Linux".
:vartype operating_system: str or ~azure.mgmt.compute.v2022_03_01.models.OperatingSystemTypes
"""
_validation = {
'operating_system': {'required': True},
}
_attribute_map = {
'operating_system': {'key': 'operatingSystem', 'type': 'str'},
}
def __init__(
self,
*,
operating_system: Union[str, "OperatingSystemTypes"],
**kwargs
):
"""
:keyword operating_system: Required. The operating system of the osDiskImage. Possible values
include: "Windows", "Linux".
:paramtype operating_system: str or ~azure.mgmt.compute.v2022_03_01.models.OperatingSystemTypes
"""
super(OSDiskImage, self).__init__(**kwargs)
self.operating_system = operating_system
class OSProfile(msrest.serialization.Model):
"""Specifies the operating system settings for the virtual machine. Some of the settings cannot be changed once VM is provisioned.
:ivar computer_name: Specifies the host OS name of the virtual machine.
:code:`<br>`:code:`<br>` This name cannot be updated after the VM is created.
:code:`<br>`:code:`<br>` **Max-length (Windows):** 15 characters :code:`<br>`:code:`<br>`
**Max-length (Linux):** 64 characters. :code:`<br>`:code:`<br>` For naming conventions and
restrictions see `Azure infrastructure services implementation guidelines
<https://docs.microsoft.com/azure/azure-resource-manager/management/resource-name-rules>`_.
:vartype computer_name: str
:ivar admin_username: Specifies the name of the administrator account. :code:`<br>`:code:`<br>`
This property cannot be updated after the VM is created. :code:`<br>`:code:`<br>`
**Windows-only restriction:** Cannot end in "." :code:`<br>`:code:`<br>` **Disallowed values:**
"administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1",
"123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest",
"john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2",
"test3", "user4", "user5". :code:`<br>`:code:`<br>` **Minimum-length (Linux):** 1 character
:code:`<br>`:code:`<br>` **Max-length (Linux):** 64 characters :code:`<br>`:code:`<br>`
**Max-length (Windows):** 20 characters.
:vartype admin_username: str
:ivar admin_password: Specifies the password of the administrator account.
:code:`<br>`:code:`<br>` **Minimum-length (Windows):** 8 characters :code:`<br>`:code:`<br>`
**Minimum-length (Linux):** 6 characters :code:`<br>`:code:`<br>` **Max-length (Windows):** 123
characters :code:`<br>`:code:`<br>` **Max-length (Linux):** 72 characters
:code:`<br>`:code:`<br>` **Complexity requirements:** 3 out of 4 conditions below need to be
fulfilled :code:`<br>` Has lower characters :code:`<br>`Has upper characters :code:`<br>` Has a
digit :code:`<br>` Has a special character (Regex match [\W_]) :code:`<br>`:code:`<br>`
**Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word",
"pass@word1", "Password!", "Password1", "Password22", "iloveyou!" :code:`<br>`:code:`<br>` For
resetting the password, see `How to reset the Remote Desktop service or its login password in a
Windows VM <https://docs.microsoft.com/troubleshoot/azure/virtual-machines/reset-rdp>`_
:code:`<br>`:code:`<br>` For resetting root password, see `Manage users, SSH, and check or
repair disks on Azure Linux VMs using the VMAccess Extension
<https://docs.microsoft.com/troubleshoot/azure/virtual-machines/troubleshoot-ssh-connection>`_.
:vartype admin_password: str
:ivar custom_data: Specifies a base-64 encoded string of custom data. The base-64 encoded
string is decoded to a binary array that is saved as a file on the Virtual Machine. The maximum
length of the binary array is 65535 bytes. :code:`<br>`:code:`<br>` **Note: Do not pass any
secrets or passwords in customData property** :code:`<br>`:code:`<br>` This property cannot be
updated after the VM is created. :code:`<br>`:code:`<br>` customData is passed to the VM to be
saved as a file, for more information see `Custom Data on Azure VMs
<https://azure.microsoft.com/blog/custom-data-and-cloud-init-on-windows-azure/>`_
:code:`<br>`:code:`<br>` For using cloud-init for your Linux VM, see `Using cloud-init to
customize a Linux VM during creation
<https://docs.microsoft.com/azure/virtual-machines/linux/using-cloud-init>`_.
:vartype custom_data: str
:ivar windows_configuration: Specifies Windows operating system settings on the virtual
machine.
:vartype windows_configuration: ~azure.mgmt.compute.v2022_03_01.models.WindowsConfiguration
:ivar linux_configuration: Specifies the Linux operating system settings on the virtual
machine. :code:`<br>`:code:`<br>`For a list of supported Linux distributions, see `Linux on
Azure-Endorsed Distributions
<https://docs.microsoft.com/azure/virtual-machines/linux/endorsed-distros>`_.
:vartype linux_configuration: ~azure.mgmt.compute.v2022_03_01.models.LinuxConfiguration
:ivar secrets: Specifies set of certificates that should be installed onto the virtual machine.
To install certificates on a virtual machine it is recommended to use the `Azure Key Vault
virtual machine extension for Linux
<https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-linux>`_ or the `Azure
Key Vault virtual machine extension for Windows
<https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-windows>`_.
:vartype secrets: list[~azure.mgmt.compute.v2022_03_01.models.VaultSecretGroup]
:ivar allow_extension_operations: Specifies whether extension operations should be allowed on
the virtual machine. :code:`<br>`:code:`<br>`This may only be set to False when no extensions
are present on the virtual machine.
:vartype allow_extension_operations: bool
:ivar require_guest_provision_signal: Optional property which must either be set to True or
omitted.
:vartype require_guest_provision_signal: bool
"""
_attribute_map = {
'computer_name': {'key': 'computerName', 'type': 'str'},
'admin_username': {'key': 'adminUsername', 'type': 'str'},
'admin_password': {'key': 'adminPassword', 'type': 'str'},
'custom_data': {'key': 'customData', 'type': 'str'},
'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'},
'linux_configuration': {'key': 'linuxConfiguration', 'type': 'LinuxConfiguration'},
'secrets': {'key': 'secrets', 'type': '[VaultSecretGroup]'},
'allow_extension_operations': {'key': 'allowExtensionOperations', 'type': 'bool'},
'require_guest_provision_signal': {'key': 'requireGuestProvisionSignal', 'type': 'bool'},
}
def __init__(
self,
*,
computer_name: Optional[str] = None,
admin_username: Optional[str] = None,
admin_password: Optional[str] = None,
custom_data: Optional[str] = None,
windows_configuration: Optional["WindowsConfiguration"] = None,
linux_configuration: Optional["LinuxConfiguration"] = None,
secrets: Optional[List["VaultSecretGroup"]] = None,
allow_extension_operations: Optional[bool] = None,
require_guest_provision_signal: Optional[bool] = None,
**kwargs
):
"""
:keyword computer_name: Specifies the host OS name of the virtual machine.
:code:`<br>`:code:`<br>` This name cannot be updated after the VM is created.
:code:`<br>`:code:`<br>` **Max-length (Windows):** 15 characters :code:`<br>`:code:`<br>`
**Max-length (Linux):** 64 characters. :code:`<br>`:code:`<br>` For naming conventions and
restrictions see `Azure infrastructure services implementation guidelines
<https://docs.microsoft.com/azure/azure-resource-manager/management/resource-name-rules>`_.
:paramtype computer_name: str
:keyword admin_username: Specifies the name of the administrator account.
:code:`<br>`:code:`<br>` This property cannot be updated after the VM is created.
:code:`<br>`:code:`<br>` **Windows-only restriction:** Cannot end in "."
:code:`<br>`:code:`<br>` **Disallowed values:** "administrator", "admin", "user", "user1",
"test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2",
"aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql",
"support", "support_388945a0", "sys", "test2", "test3", "user4", "user5".
:code:`<br>`:code:`<br>` **Minimum-length (Linux):** 1 character :code:`<br>`:code:`<br>`
**Max-length (Linux):** 64 characters :code:`<br>`:code:`<br>` **Max-length (Windows):** 20
characters.
:paramtype admin_username: str
:keyword admin_password: Specifies the password of the administrator account.
:code:`<br>`:code:`<br>` **Minimum-length (Windows):** 8 characters :code:`<br>`:code:`<br>`
**Minimum-length (Linux):** 6 characters :code:`<br>`:code:`<br>` **Max-length (Windows):** 123
characters :code:`<br>`:code:`<br>` **Max-length (Linux):** 72 characters
:code:`<br>`:code:`<br>` **Complexity requirements:** 3 out of 4 conditions below need to be
fulfilled :code:`<br>` Has lower characters :code:`<br>`Has upper characters :code:`<br>` Has a
digit :code:`<br>` Has a special character (Regex match [\W_]) :code:`<br>`:code:`<br>`
**Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word",
"pass@word1", "Password!", "Password1", "Password22", "iloveyou!" :code:`<br>`:code:`<br>` For
resetting the password, see `How to reset the Remote Desktop service or its login password in a
Windows VM <https://docs.microsoft.com/troubleshoot/azure/virtual-machines/reset-rdp>`_
:code:`<br>`:code:`<br>` For resetting root password, see `Manage users, SSH, and check or
repair disks on Azure Linux VMs using the VMAccess Extension
<https://docs.microsoft.com/troubleshoot/azure/virtual-machines/troubleshoot-ssh-connection>`_.
:paramtype admin_password: str
:keyword custom_data: Specifies a base-64 encoded string of custom data. The base-64 encoded
string is decoded to a binary array that is saved as a file on the Virtual Machine. The maximum
length of the binary array is 65535 bytes. :code:`<br>`:code:`<br>` **Note: Do not pass any
secrets or passwords in customData property** :code:`<br>`:code:`<br>` This property cannot be
updated after the VM is created. :code:`<br>`:code:`<br>` customData is passed to the VM to be
saved as a file, for more information see `Custom Data on Azure VMs
<https://azure.microsoft.com/blog/custom-data-and-cloud-init-on-windows-azure/>`_
:code:`<br>`:code:`<br>` For using cloud-init for your Linux VM, see `Using cloud-init to
customize a Linux VM during creation
<https://docs.microsoft.com/azure/virtual-machines/linux/using-cloud-init>`_.
:paramtype custom_data: str
:keyword windows_configuration: Specifies Windows operating system settings on the virtual
machine.
:paramtype windows_configuration: ~azure.mgmt.compute.v2022_03_01.models.WindowsConfiguration
:keyword linux_configuration: Specifies the Linux operating system settings on the virtual
machine. :code:`<br>`:code:`<br>`For a list of supported Linux distributions, see `Linux on
Azure-Endorsed Distributions
<https://docs.microsoft.com/azure/virtual-machines/linux/endorsed-distros>`_.
:paramtype linux_configuration: ~azure.mgmt.compute.v2022_03_01.models.LinuxConfiguration
:keyword secrets: Specifies set of certificates that should be installed onto the virtual
machine. To install certificates on a virtual machine it is recommended to use the `Azure Key
Vault virtual machine extension for Linux
<https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-linux>`_ or the `Azure
Key Vault virtual machine extension for Windows
<https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-windows>`_.
:paramtype secrets: list[~azure.mgmt.compute.v2022_03_01.models.VaultSecretGroup]
:keyword allow_extension_operations: Specifies whether extension operations should be allowed
on the virtual machine. :code:`<br>`:code:`<br>`This may only be set to False when no
extensions are present on the virtual machine.
:paramtype allow_extension_operations: bool
:keyword require_guest_provision_signal: Optional property which must either be set to True or
omitted.
:paramtype require_guest_provision_signal: bool
"""
super(OSProfile, self).__init__(**kwargs)
self.computer_name = computer_name
self.admin_username = admin_username
self.admin_password = admin_password
self.custom_data = custom_data
self.windows_configuration = windows_configuration
self.linux_configuration = linux_configuration
self.secrets = secrets
self.allow_extension_operations = allow_extension_operations
self.require_guest_provision_signal = require_guest_provision_signal
class PatchInstallationDetail(msrest.serialization.Model):
"""Information about a specific patch that was encountered during an installation action.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar patch_id: A unique identifier for the patch.
:vartype patch_id: str
:ivar name: The friendly name of the patch.
:vartype name: str
:ivar version: The version string of the package. It may conform to Semantic Versioning. Only
applies to Linux.
:vartype version: str
:ivar kb_id: The KBID of the patch. Only applies to Windows patches.
:vartype kb_id: str
:ivar classifications: The classification(s) of the patch as provided by the patch publisher.
:vartype classifications: list[str]
:ivar installation_state: The state of the patch after the installation operation completed.
Possible values include: "Unknown", "Installed", "Failed", "Excluded", "NotSelected",
"Pending".
:vartype installation_state: str or
~azure.mgmt.compute.v2022_03_01.models.PatchInstallationState
"""
_validation = {
'patch_id': {'readonly': True},
'name': {'readonly': True},
'version': {'readonly': True},
'kb_id': {'readonly': True},
'classifications': {'readonly': True},
'installation_state': {'readonly': True},
}
_attribute_map = {
'patch_id': {'key': 'patchId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'kb_id': {'key': 'kbId', 'type': 'str'},
'classifications': {'key': 'classifications', 'type': '[str]'},
'installation_state': {'key': 'installationState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(PatchInstallationDetail, self).__init__(**kwargs)
self.patch_id = None
self.name = None
self.version = None
self.kb_id = None
self.classifications = None
self.installation_state = None
class PatchSettings(msrest.serialization.Model):
"""Specifies settings related to VM Guest Patching on Windows.
:ivar patch_mode: Specifies the mode of VM Guest Patching to IaaS virtual machine or virtual
machines associated to virtual machine scale set with OrchestrationMode as Flexible.:code:`<br
/>`:code:`<br />` Possible values are::code:`<br />`:code:`<br />` **Manual** - You control
the application of patches to a virtual machine. You do this by applying patches manually
inside the VM. In this mode, automatic updates are disabled; the property
WindowsConfiguration.enableAutomaticUpdates must be false:code:`<br />`:code:`<br />`
**AutomaticByOS** - The virtual machine will automatically be updated by the OS. The property
WindowsConfiguration.enableAutomaticUpdates must be true. :code:`<br />`:code:`<br />`
**AutomaticByPlatform** - the virtual machine will automatically updated by the platform. The
properties provisionVMAgent and WindowsConfiguration.enableAutomaticUpdates must be true.
Possible values include: "Manual", "AutomaticByOS", "AutomaticByPlatform".
:vartype patch_mode: str or ~azure.mgmt.compute.v2022_03_01.models.WindowsVMGuestPatchMode
:ivar enable_hotpatching: Enables customers to patch their Azure VMs without requiring a
reboot. For enableHotpatching, the 'provisionVMAgent' must be set to true and 'patchMode' must
be set to 'AutomaticByPlatform'.
:vartype enable_hotpatching: bool
:ivar assessment_mode: Specifies the mode of VM Guest patch assessment for the IaaS virtual
machine.:code:`<br />`:code:`<br />` Possible values are::code:`<br />`:code:`<br />`
**ImageDefault** - You control the timing of patch assessments on a virtual machine.:code:`<br
/>`:code:`<br />` **AutomaticByPlatform** - The platform will trigger periodic patch
assessments. The property provisionVMAgent must be true. Possible values include:
"ImageDefault", "AutomaticByPlatform".
:vartype assessment_mode: str or
~azure.mgmt.compute.v2022_03_01.models.WindowsPatchAssessmentMode
:ivar automatic_by_platform_settings: Specifies additional settings for patch mode
AutomaticByPlatform in VM Guest Patching on Windows.
:vartype automatic_by_platform_settings:
~azure.mgmt.compute.v2022_03_01.models.WindowsVMGuestPatchAutomaticByPlatformSettings
"""
_attribute_map = {
'patch_mode': {'key': 'patchMode', 'type': 'str'},
'enable_hotpatching': {'key': 'enableHotpatching', 'type': 'bool'},
'assessment_mode': {'key': 'assessmentMode', 'type': 'str'},
'automatic_by_platform_settings': {'key': 'automaticByPlatformSettings', 'type': 'WindowsVMGuestPatchAutomaticByPlatformSettings'},
}
def __init__(
self,
*,
patch_mode: Optional[Union[str, "WindowsVMGuestPatchMode"]] = None,
enable_hotpatching: Optional[bool] = None,
assessment_mode: Optional[Union[str, "WindowsPatchAssessmentMode"]] = None,
automatic_by_platform_settings: Optional["WindowsVMGuestPatchAutomaticByPlatformSettings"] = None,
**kwargs
):
"""
:keyword patch_mode: Specifies the mode of VM Guest Patching to IaaS virtual machine or virtual
machines associated to virtual machine scale set with OrchestrationMode as Flexible.:code:`<br
/>`:code:`<br />` Possible values are::code:`<br />`:code:`<br />` **Manual** - You control
the application of patches to a virtual machine. You do this by applying patches manually
inside the VM. In this mode, automatic updates are disabled; the property
WindowsConfiguration.enableAutomaticUpdates must be false:code:`<br />`:code:`<br />`
**AutomaticByOS** - The virtual machine will automatically be updated by the OS. The property
WindowsConfiguration.enableAutomaticUpdates must be true. :code:`<br />`:code:`<br />`
**AutomaticByPlatform** - the virtual machine will automatically updated by the platform. The
properties provisionVMAgent and WindowsConfiguration.enableAutomaticUpdates must be true.
Possible values include: "Manual", "AutomaticByOS", "AutomaticByPlatform".
:paramtype patch_mode: str or ~azure.mgmt.compute.v2022_03_01.models.WindowsVMGuestPatchMode
:keyword enable_hotpatching: Enables customers to patch their Azure VMs without requiring a
reboot. For enableHotpatching, the 'provisionVMAgent' must be set to true and 'patchMode' must
be set to 'AutomaticByPlatform'.
:paramtype enable_hotpatching: bool
:keyword assessment_mode: Specifies the mode of VM Guest patch assessment for the IaaS virtual
machine.:code:`<br />`:code:`<br />` Possible values are::code:`<br />`:code:`<br />`
**ImageDefault** - You control the timing of patch assessments on a virtual machine.:code:`<br
/>`:code:`<br />` **AutomaticByPlatform** - The platform will trigger periodic patch
assessments. The property provisionVMAgent must be true. Possible values include:
"ImageDefault", "AutomaticByPlatform".
:paramtype assessment_mode: str or
~azure.mgmt.compute.v2022_03_01.models.WindowsPatchAssessmentMode
:keyword automatic_by_platform_settings: Specifies additional settings for patch mode
AutomaticByPlatform in VM Guest Patching on Windows.
:paramtype automatic_by_platform_settings:
~azure.mgmt.compute.v2022_03_01.models.WindowsVMGuestPatchAutomaticByPlatformSettings
"""
super(PatchSettings, self).__init__(**kwargs)
self.patch_mode = patch_mode
self.enable_hotpatching = enable_hotpatching
self.assessment_mode = assessment_mode
self.automatic_by_platform_settings = automatic_by_platform_settings
class Plan(msrest.serialization.Model):
"""Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**.
:ivar name: The plan ID.
:vartype name: str
:ivar publisher: The publisher ID.
:vartype publisher: str
:ivar product: Specifies the product of the image from the marketplace. This is the same value
as Offer under the imageReference element.
:vartype product: str
:ivar promotion_code: The promotion code.
:vartype promotion_code: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'publisher': {'key': 'publisher', 'type': 'str'},
'product': {'key': 'product', 'type': 'str'},
'promotion_code': {'key': 'promotionCode', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
publisher: Optional[str] = None,
product: Optional[str] = None,
promotion_code: Optional[str] = None,
**kwargs
):
"""
:keyword name: The plan ID.
:paramtype name: str
:keyword publisher: The publisher ID.
:paramtype publisher: str
:keyword product: Specifies the product of the image from the marketplace. This is the same
value as Offer under the imageReference element.
:paramtype product: str
:keyword promotion_code: The promotion code.
:paramtype promotion_code: str
"""
super(Plan, self).__init__(**kwargs)
self.name = name
self.publisher = publisher
self.product = product
self.promotion_code = promotion_code
class ProximityPlacementGroup(Resource):
"""Specifies information about the proximity placement group.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Required. Resource location.
:vartype location: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar zones: Specifies the Availability Zone where virtual machine, virtual machine scale set
or availability set associated with the proximity placement group can be created.
:vartype zones: list[str]
:ivar proximity_placement_group_type: Specifies the type of the proximity placement group.
:code:`<br>`:code:`<br>` Possible values are: :code:`<br>`:code:`<br>` **Standard** : Co-locate
resources within an Azure region or Availability Zone. :code:`<br>`:code:`<br>` **Ultra** : For
future use. Possible values include: "Standard", "Ultra".
:vartype proximity_placement_group_type: str or
~azure.mgmt.compute.v2022_03_01.models.ProximityPlacementGroupType
:ivar virtual_machines: A list of references to all virtual machines in the proximity placement
group.
:vartype virtual_machines:
list[~azure.mgmt.compute.v2022_03_01.models.SubResourceWithColocationStatus]
:ivar virtual_machine_scale_sets: A list of references to all virtual machine scale sets in the
proximity placement group.
:vartype virtual_machine_scale_sets:
list[~azure.mgmt.compute.v2022_03_01.models.SubResourceWithColocationStatus]
:ivar availability_sets: A list of references to all availability sets in the proximity
placement group.
:vartype availability_sets:
list[~azure.mgmt.compute.v2022_03_01.models.SubResourceWithColocationStatus]
:ivar colocation_status: Describes colocation status of the Proximity Placement Group.
:vartype colocation_status: ~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus
:ivar intent: Specifies the user intent of the proximity placement group.
:vartype intent: ~azure.mgmt.compute.v2022_03_01.models.ProximityPlacementGroupPropertiesIntent
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'virtual_machines': {'readonly': True},
'virtual_machine_scale_sets': {'readonly': True},
'availability_sets': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'zones': {'key': 'zones', 'type': '[str]'},
'proximity_placement_group_type': {'key': 'properties.proximityPlacementGroupType', 'type': 'str'},
'virtual_machines': {'key': 'properties.virtualMachines', 'type': '[SubResourceWithColocationStatus]'},
'virtual_machine_scale_sets': {'key': 'properties.virtualMachineScaleSets', 'type': '[SubResourceWithColocationStatus]'},
'availability_sets': {'key': 'properties.availabilitySets', 'type': '[SubResourceWithColocationStatus]'},
'colocation_status': {'key': 'properties.colocationStatus', 'type': 'InstanceViewStatus'},
'intent': {'key': 'properties.intent', 'type': 'ProximityPlacementGroupPropertiesIntent'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
zones: Optional[List[str]] = None,
proximity_placement_group_type: Optional[Union[str, "ProximityPlacementGroupType"]] = None,
colocation_status: Optional["InstanceViewStatus"] = None,
intent: Optional["ProximityPlacementGroupPropertiesIntent"] = None,
**kwargs
):
"""
:keyword location: Required. Resource location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword zones: Specifies the Availability Zone where virtual machine, virtual machine scale
set or availability set associated with the proximity placement group can be created.
:paramtype zones: list[str]
:keyword proximity_placement_group_type: Specifies the type of the proximity placement group.
:code:`<br>`:code:`<br>` Possible values are: :code:`<br>`:code:`<br>` **Standard** : Co-locate
resources within an Azure region or Availability Zone. :code:`<br>`:code:`<br>` **Ultra** : For
future use. Possible values include: "Standard", "Ultra".
:paramtype proximity_placement_group_type: str or
~azure.mgmt.compute.v2022_03_01.models.ProximityPlacementGroupType
:keyword colocation_status: Describes colocation status of the Proximity Placement Group.
:paramtype colocation_status: ~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus
:keyword intent: Specifies the user intent of the proximity placement group.
:paramtype intent:
~azure.mgmt.compute.v2022_03_01.models.ProximityPlacementGroupPropertiesIntent
"""
super(ProximityPlacementGroup, self).__init__(location=location, tags=tags, **kwargs)
self.zones = zones
self.proximity_placement_group_type = proximity_placement_group_type
self.virtual_machines = None
self.virtual_machine_scale_sets = None
self.availability_sets = None
self.colocation_status = colocation_status
self.intent = intent
class ProximityPlacementGroupListResult(msrest.serialization.Model):
"""The List Proximity Placement Group operation response.
All required parameters must be populated in order to send to Azure.
:ivar value: Required. The list of proximity placement groups.
:vartype value: list[~azure.mgmt.compute.v2022_03_01.models.ProximityPlacementGroup]
:ivar next_link: The URI to fetch the next page of proximity placement groups.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ProximityPlacementGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["ProximityPlacementGroup"],
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Required. The list of proximity placement groups.
:paramtype value: list[~azure.mgmt.compute.v2022_03_01.models.ProximityPlacementGroup]
:keyword next_link: The URI to fetch the next page of proximity placement groups.
:paramtype next_link: str
"""
super(ProximityPlacementGroupListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ProximityPlacementGroupPropertiesIntent(msrest.serialization.Model):
"""Specifies the user intent of the proximity placement group.
:ivar vm_sizes: Specifies possible sizes of virtual machines that can be created in the
proximity placement group.
:vartype vm_sizes: list[str]
"""
_attribute_map = {
'vm_sizes': {'key': 'vmSizes', 'type': '[str]'},
}
def __init__(
self,
*,
vm_sizes: Optional[List[str]] = None,
**kwargs
):
"""
:keyword vm_sizes: Specifies possible sizes of virtual machines that can be created in the
proximity placement group.
:paramtype vm_sizes: list[str]
"""
super(ProximityPlacementGroupPropertiesIntent, self).__init__(**kwargs)
self.vm_sizes = vm_sizes
class ProximityPlacementGroupUpdate(UpdateResource):
"""Specifies information about the proximity placement group.
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
"""
super(ProximityPlacementGroupUpdate, self).__init__(tags=tags, **kwargs)
class ProxyResource(msrest.serialization.Model):
"""The resource model definition for an Azure Resource Manager proxy resource. It will not have tags and a location.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ProxyResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class PublicIPAddressSku(msrest.serialization.Model):
"""Describes the public IP Sku. It can only be set with OrchestrationMode as Flexible.
:ivar name: Specify public IP sku name. Possible values include: "Basic", "Standard".
:vartype name: str or ~azure.mgmt.compute.v2022_03_01.models.PublicIPAddressSkuName
:ivar tier: Specify public IP sku tier. Possible values include: "Regional", "Global".
:vartype tier: str or ~azure.mgmt.compute.v2022_03_01.models.PublicIPAddressSkuTier
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[Union[str, "PublicIPAddressSkuName"]] = None,
tier: Optional[Union[str, "PublicIPAddressSkuTier"]] = None,
**kwargs
):
"""
:keyword name: Specify public IP sku name. Possible values include: "Basic", "Standard".
:paramtype name: str or ~azure.mgmt.compute.v2022_03_01.models.PublicIPAddressSkuName
:keyword tier: Specify public IP sku tier. Possible values include: "Regional", "Global".
:paramtype tier: str or ~azure.mgmt.compute.v2022_03_01.models.PublicIPAddressSkuTier
"""
super(PublicIPAddressSku, self).__init__(**kwargs)
self.name = name
self.tier = tier
class PurchasePlan(msrest.serialization.Model):
"""Used for establishing the purchase context of any 3rd Party artifact through MarketPlace.
All required parameters must be populated in order to send to Azure.
:ivar publisher: Required. The publisher ID.
:vartype publisher: str
:ivar name: Required. The plan ID.
:vartype name: str
:ivar product: Required. Specifies the product of the image from the marketplace. This is the
same value as Offer under the imageReference element.
:vartype product: str
"""
_validation = {
'publisher': {'required': True},
'name': {'required': True},
'product': {'required': True},
}
_attribute_map = {
'publisher': {'key': 'publisher', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'product': {'key': 'product', 'type': 'str'},
}
def __init__(
self,
*,
publisher: str,
name: str,
product: str,
**kwargs
):
"""
:keyword publisher: Required. The publisher ID.
:paramtype publisher: str
:keyword name: Required. The plan ID.
:paramtype name: str
:keyword product: Required. Specifies the product of the image from the marketplace. This is
the same value as Offer under the imageReference element.
:paramtype product: str
"""
super(PurchasePlan, self).__init__(**kwargs)
self.publisher = publisher
self.name = name
self.product = product
class RecoveryWalkResponse(msrest.serialization.Model):
"""Response after calling a manual recovery walk.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar walk_performed: Whether the recovery walk was performed.
:vartype walk_performed: bool
:ivar next_platform_update_domain: The next update domain that needs to be walked. Null means
walk spanning all update domains has been completed.
:vartype next_platform_update_domain: int
"""
_validation = {
'walk_performed': {'readonly': True},
'next_platform_update_domain': {'readonly': True},
}
_attribute_map = {
'walk_performed': {'key': 'walkPerformed', 'type': 'bool'},
'next_platform_update_domain': {'key': 'nextPlatformUpdateDomain', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(RecoveryWalkResponse, self).__init__(**kwargs)
self.walk_performed = None
self.next_platform_update_domain = None
class RequestRateByIntervalInput(LogAnalyticsInputBase):
"""Api request input for LogAnalytics getRequestRateByInterval Api.
All required parameters must be populated in order to send to Azure.
:ivar blob_container_sas_uri: Required. SAS Uri of the logging blob container to which
LogAnalytics Api writes output logs to.
:vartype blob_container_sas_uri: str
:ivar from_time: Required. From time of the query.
:vartype from_time: ~datetime.datetime
:ivar to_time: Required. To time of the query.
:vartype to_time: ~datetime.datetime
:ivar group_by_throttle_policy: Group query result by Throttle Policy applied.
:vartype group_by_throttle_policy: bool
:ivar group_by_operation_name: Group query result by Operation Name.
:vartype group_by_operation_name: bool
:ivar group_by_resource_name: Group query result by Resource Name.
:vartype group_by_resource_name: bool
:ivar group_by_client_application_id: Group query result by Client Application ID.
:vartype group_by_client_application_id: bool
:ivar group_by_user_agent: Group query result by User Agent.
:vartype group_by_user_agent: bool
:ivar interval_length: Required. Interval value in minutes used to create LogAnalytics call
rate logs. Possible values include: "ThreeMins", "FiveMins", "ThirtyMins", "SixtyMins".
:vartype interval_length: str or ~azure.mgmt.compute.v2022_03_01.models.IntervalInMins
"""
_validation = {
'blob_container_sas_uri': {'required': True},
'from_time': {'required': True},
'to_time': {'required': True},
'interval_length': {'required': True},
}
_attribute_map = {
'blob_container_sas_uri': {'key': 'blobContainerSasUri', 'type': 'str'},
'from_time': {'key': 'fromTime', 'type': 'iso-8601'},
'to_time': {'key': 'toTime', 'type': 'iso-8601'},
'group_by_throttle_policy': {'key': 'groupByThrottlePolicy', 'type': 'bool'},
'group_by_operation_name': {'key': 'groupByOperationName', 'type': 'bool'},
'group_by_resource_name': {'key': 'groupByResourceName', 'type': 'bool'},
'group_by_client_application_id': {'key': 'groupByClientApplicationId', 'type': 'bool'},
'group_by_user_agent': {'key': 'groupByUserAgent', 'type': 'bool'},
'interval_length': {'key': 'intervalLength', 'type': 'str'},
}
def __init__(
self,
*,
blob_container_sas_uri: str,
from_time: datetime.datetime,
to_time: datetime.datetime,
interval_length: Union[str, "IntervalInMins"],
group_by_throttle_policy: Optional[bool] = None,
group_by_operation_name: Optional[bool] = None,
group_by_resource_name: Optional[bool] = None,
group_by_client_application_id: Optional[bool] = None,
group_by_user_agent: Optional[bool] = None,
**kwargs
):
"""
:keyword blob_container_sas_uri: Required. SAS Uri of the logging blob container to which
LogAnalytics Api writes output logs to.
:paramtype blob_container_sas_uri: str
:keyword from_time: Required. From time of the query.
:paramtype from_time: ~datetime.datetime
:keyword to_time: Required. To time of the query.
:paramtype to_time: ~datetime.datetime
:keyword group_by_throttle_policy: Group query result by Throttle Policy applied.
:paramtype group_by_throttle_policy: bool
:keyword group_by_operation_name: Group query result by Operation Name.
:paramtype group_by_operation_name: bool
:keyword group_by_resource_name: Group query result by Resource Name.
:paramtype group_by_resource_name: bool
:keyword group_by_client_application_id: Group query result by Client Application ID.
:paramtype group_by_client_application_id: bool
:keyword group_by_user_agent: Group query result by User Agent.
:paramtype group_by_user_agent: bool
:keyword interval_length: Required. Interval value in minutes used to create LogAnalytics call
rate logs. Possible values include: "ThreeMins", "FiveMins", "ThirtyMins", "SixtyMins".
:paramtype interval_length: str or ~azure.mgmt.compute.v2022_03_01.models.IntervalInMins
"""
super(RequestRateByIntervalInput, self).__init__(blob_container_sas_uri=blob_container_sas_uri, from_time=from_time, to_time=to_time, group_by_throttle_policy=group_by_throttle_policy, group_by_operation_name=group_by_operation_name, group_by_resource_name=group_by_resource_name, group_by_client_application_id=group_by_client_application_id, group_by_user_agent=group_by_user_agent, **kwargs)
self.interval_length = interval_length
class ResourceWithOptionalLocation(msrest.serialization.Model):
"""The Resource model definition with location property as optional.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar location: Resource location.
:vartype location: str
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword location: Resource location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
"""
super(ResourceWithOptionalLocation, self).__init__(**kwargs)
self.location = location
self.id = None
self.name = None
self.type = None
self.tags = tags
class RestorePoint(ProxyResource):
"""Restore Point details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar exclude_disks: List of disk resource ids that the customer wishes to exclude from the
restore point. If no disks are specified, all disks will be included.
:vartype exclude_disks: list[~azure.mgmt.compute.v2022_03_01.models.ApiEntityReference]
:ivar source_metadata: Gets the details of the VM captured at the time of the restore point
creation.
:vartype source_metadata: ~azure.mgmt.compute.v2022_03_01.models.RestorePointSourceMetadata
:ivar provisioning_state: Gets the provisioning state of the restore point.
:vartype provisioning_state: str
:ivar consistency_mode: ConsistencyMode of the RestorePoint. Can be specified in the input
while creating a restore point. For now, only CrashConsistent is accepted as a valid input.
Please refer to https://aka.ms/RestorePoints for more details. Possible values include:
"CrashConsistent", "FileSystemConsistent", "ApplicationConsistent".
:vartype consistency_mode: str or ~azure.mgmt.compute.v2022_03_01.models.ConsistencyModeTypes
:ivar time_created: Gets the creation time of the restore point.
:vartype time_created: ~datetime.datetime
:ivar source_restore_point: Resource Id of the source restore point from which a copy needs to
be created.
:vartype source_restore_point: ~azure.mgmt.compute.v2022_03_01.models.ApiEntityReference
:ivar instance_view: The restore point instance view.
:vartype instance_view: ~azure.mgmt.compute.v2022_03_01.models.RestorePointInstanceView
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'source_metadata': {'readonly': True},
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'exclude_disks': {'key': 'properties.excludeDisks', 'type': '[ApiEntityReference]'},
'source_metadata': {'key': 'properties.sourceMetadata', 'type': 'RestorePointSourceMetadata'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'consistency_mode': {'key': 'properties.consistencyMode', 'type': 'str'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
'source_restore_point': {'key': 'properties.sourceRestorePoint', 'type': 'ApiEntityReference'},
'instance_view': {'key': 'properties.instanceView', 'type': 'RestorePointInstanceView'},
}
def __init__(
self,
*,
exclude_disks: Optional[List["ApiEntityReference"]] = None,
consistency_mode: Optional[Union[str, "ConsistencyModeTypes"]] = None,
time_created: Optional[datetime.datetime] = None,
source_restore_point: Optional["ApiEntityReference"] = None,
**kwargs
):
"""
:keyword exclude_disks: List of disk resource ids that the customer wishes to exclude from the
restore point. If no disks are specified, all disks will be included.
:paramtype exclude_disks: list[~azure.mgmt.compute.v2022_03_01.models.ApiEntityReference]
:keyword consistency_mode: ConsistencyMode of the RestorePoint. Can be specified in the input
while creating a restore point. For now, only CrashConsistent is accepted as a valid input.
Please refer to https://aka.ms/RestorePoints for more details. Possible values include:
"CrashConsistent", "FileSystemConsistent", "ApplicationConsistent".
:paramtype consistency_mode: str or ~azure.mgmt.compute.v2022_03_01.models.ConsistencyModeTypes
:keyword time_created: Gets the creation time of the restore point.
:paramtype time_created: ~datetime.datetime
:keyword source_restore_point: Resource Id of the source restore point from which a copy needs
to be created.
:paramtype source_restore_point: ~azure.mgmt.compute.v2022_03_01.models.ApiEntityReference
"""
super(RestorePoint, self).__init__(**kwargs)
self.exclude_disks = exclude_disks
self.source_metadata = None
self.provisioning_state = None
self.consistency_mode = consistency_mode
self.time_created = time_created
self.source_restore_point = source_restore_point
self.instance_view = None
class RestorePointCollection(Resource):
"""Create or update Restore Point collection parameters.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Required. Resource location.
:vartype location: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar source: The properties of the source resource that this restore point collection is
created from.
:vartype source: ~azure.mgmt.compute.v2022_03_01.models.RestorePointCollectionSourceProperties
:ivar provisioning_state: The provisioning state of the restore point collection.
:vartype provisioning_state: str
:ivar restore_point_collection_id: The unique id of the restore point collection.
:vartype restore_point_collection_id: str
:ivar restore_points: A list containing all restore points created under this restore point
collection.
:vartype restore_points: list[~azure.mgmt.compute.v2022_03_01.models.RestorePoint]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'restore_point_collection_id': {'readonly': True},
'restore_points': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'source': {'key': 'properties.source', 'type': 'RestorePointCollectionSourceProperties'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'restore_point_collection_id': {'key': 'properties.restorePointCollectionId', 'type': 'str'},
'restore_points': {'key': 'properties.restorePoints', 'type': '[RestorePoint]'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
source: Optional["RestorePointCollectionSourceProperties"] = None,
**kwargs
):
"""
:keyword location: Required. Resource location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword source: The properties of the source resource that this restore point collection is
created from.
:paramtype source:
~azure.mgmt.compute.v2022_03_01.models.RestorePointCollectionSourceProperties
"""
super(RestorePointCollection, self).__init__(location=location, tags=tags, **kwargs)
self.source = source
self.provisioning_state = None
self.restore_point_collection_id = None
self.restore_points = None
class RestorePointCollectionListResult(msrest.serialization.Model):
"""The List restore point collection operation response.
:ivar value: Gets the list of restore point collections.
:vartype value: list[~azure.mgmt.compute.v2022_03_01.models.RestorePointCollection]
:ivar next_link: The uri to fetch the next page of RestorePointCollections. Call ListNext()
with this to fetch the next page of RestorePointCollections.
:vartype next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[RestorePointCollection]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["RestorePointCollection"]] = None,
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Gets the list of restore point collections.
:paramtype value: list[~azure.mgmt.compute.v2022_03_01.models.RestorePointCollection]
:keyword next_link: The uri to fetch the next page of RestorePointCollections. Call ListNext()
with this to fetch the next page of RestorePointCollections.
:paramtype next_link: str
"""
super(RestorePointCollectionListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class RestorePointCollectionSourceProperties(msrest.serialization.Model):
"""The properties of the source resource that this restore point collection is created from.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar location: Location of the source resource used to create this restore point collection.
:vartype location: str
:ivar id: Resource Id of the source resource used to create this restore point collection.
:vartype id: str
"""
_validation = {
'location': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
"""
:keyword id: Resource Id of the source resource used to create this restore point collection.
:paramtype id: str
"""
super(RestorePointCollectionSourceProperties, self).__init__(**kwargs)
self.location = None
self.id = id
class RestorePointCollectionUpdate(UpdateResource):
"""Update Restore Point collection parameters.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar source: The properties of the source resource that this restore point collection is
created from.
:vartype source: ~azure.mgmt.compute.v2022_03_01.models.RestorePointCollectionSourceProperties
:ivar provisioning_state: The provisioning state of the restore point collection.
:vartype provisioning_state: str
:ivar restore_point_collection_id: The unique id of the restore point collection.
:vartype restore_point_collection_id: str
:ivar restore_points: A list containing all restore points created under this restore point
collection.
:vartype restore_points: list[~azure.mgmt.compute.v2022_03_01.models.RestorePoint]
"""
_validation = {
'provisioning_state': {'readonly': True},
'restore_point_collection_id': {'readonly': True},
'restore_points': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'source': {'key': 'properties.source', 'type': 'RestorePointCollectionSourceProperties'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'restore_point_collection_id': {'key': 'properties.restorePointCollectionId', 'type': 'str'},
'restore_points': {'key': 'properties.restorePoints', 'type': '[RestorePoint]'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
source: Optional["RestorePointCollectionSourceProperties"] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword source: The properties of the source resource that this restore point collection is
created from.
:paramtype source:
~azure.mgmt.compute.v2022_03_01.models.RestorePointCollectionSourceProperties
"""
super(RestorePointCollectionUpdate, self).__init__(tags=tags, **kwargs)
self.source = source
self.provisioning_state = None
self.restore_point_collection_id = None
self.restore_points = None
class RestorePointInstanceView(msrest.serialization.Model):
"""The instance view of a restore point.
:ivar disk_restore_points: The disk restore points information.
:vartype disk_restore_points:
list[~azure.mgmt.compute.v2022_03_01.models.DiskRestorePointInstanceView]
:ivar statuses: The resource status information.
:vartype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
"""
_attribute_map = {
'disk_restore_points': {'key': 'diskRestorePoints', 'type': '[DiskRestorePointInstanceView]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
disk_restore_points: Optional[List["DiskRestorePointInstanceView"]] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
"""
:keyword disk_restore_points: The disk restore points information.
:paramtype disk_restore_points:
list[~azure.mgmt.compute.v2022_03_01.models.DiskRestorePointInstanceView]
:keyword statuses: The resource status information.
:paramtype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
"""
super(RestorePointInstanceView, self).__init__(**kwargs)
self.disk_restore_points = disk_restore_points
self.statuses = statuses
class RestorePointSourceMetadata(msrest.serialization.Model):
"""Describes the properties of the Virtual Machine for which the restore point was created. The properties provided are a subset and the snapshot of the overall Virtual Machine properties captured at the time of the restore point creation.
:ivar hardware_profile: Gets the hardware profile.
:vartype hardware_profile: ~azure.mgmt.compute.v2022_03_01.models.HardwareProfile
:ivar storage_profile: Gets the storage profile.
:vartype storage_profile:
~azure.mgmt.compute.v2022_03_01.models.RestorePointSourceVMStorageProfile
:ivar os_profile: Gets the OS profile.
:vartype os_profile: ~azure.mgmt.compute.v2022_03_01.models.OSProfile
:ivar diagnostics_profile: Gets the diagnostics profile.
:vartype diagnostics_profile: ~azure.mgmt.compute.v2022_03_01.models.DiagnosticsProfile
:ivar license_type: Gets the license type, which is for bring your own license scenario.
:vartype license_type: str
:ivar vm_id: Gets the virtual machine unique id.
:vartype vm_id: str
:ivar security_profile: Gets the security profile.
:vartype security_profile: ~azure.mgmt.compute.v2022_03_01.models.SecurityProfile
:ivar location: Location of the VM from which the restore point was created.
:vartype location: str
"""
_attribute_map = {
'hardware_profile': {'key': 'hardwareProfile', 'type': 'HardwareProfile'},
'storage_profile': {'key': 'storageProfile', 'type': 'RestorePointSourceVMStorageProfile'},
'os_profile': {'key': 'osProfile', 'type': 'OSProfile'},
'diagnostics_profile': {'key': 'diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'license_type': {'key': 'licenseType', 'type': 'str'},
'vm_id': {'key': 'vmId', 'type': 'str'},
'security_profile': {'key': 'securityProfile', 'type': 'SecurityProfile'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
*,
hardware_profile: Optional["HardwareProfile"] = None,
storage_profile: Optional["RestorePointSourceVMStorageProfile"] = None,
os_profile: Optional["OSProfile"] = None,
diagnostics_profile: Optional["DiagnosticsProfile"] = None,
license_type: Optional[str] = None,
vm_id: Optional[str] = None,
security_profile: Optional["SecurityProfile"] = None,
location: Optional[str] = None,
**kwargs
):
"""
:keyword hardware_profile: Gets the hardware profile.
:paramtype hardware_profile: ~azure.mgmt.compute.v2022_03_01.models.HardwareProfile
:keyword storage_profile: Gets the storage profile.
:paramtype storage_profile:
~azure.mgmt.compute.v2022_03_01.models.RestorePointSourceVMStorageProfile
:keyword os_profile: Gets the OS profile.
:paramtype os_profile: ~azure.mgmt.compute.v2022_03_01.models.OSProfile
:keyword diagnostics_profile: Gets the diagnostics profile.
:paramtype diagnostics_profile: ~azure.mgmt.compute.v2022_03_01.models.DiagnosticsProfile
:keyword license_type: Gets the license type, which is for bring your own license scenario.
:paramtype license_type: str
:keyword vm_id: Gets the virtual machine unique id.
:paramtype vm_id: str
:keyword security_profile: Gets the security profile.
:paramtype security_profile: ~azure.mgmt.compute.v2022_03_01.models.SecurityProfile
:keyword location: Location of the VM from which the restore point was created.
:paramtype location: str
"""
super(RestorePointSourceMetadata, self).__init__(**kwargs)
self.hardware_profile = hardware_profile
self.storage_profile = storage_profile
self.os_profile = os_profile
self.diagnostics_profile = diagnostics_profile
self.license_type = license_type
self.vm_id = vm_id
self.security_profile = security_profile
self.location = location
class RestorePointSourceVMDataDisk(msrest.serialization.Model):
"""Describes a data disk.
:ivar lun: Gets the logical unit number.
:vartype lun: int
:ivar name: Gets the disk name.
:vartype name: str
:ivar caching: Gets the caching type. Possible values include: "None", "ReadOnly", "ReadWrite".
:vartype caching: str or ~azure.mgmt.compute.v2022_03_01.models.CachingTypes
:ivar disk_size_gb: Gets the initial disk size in GB for blank data disks, and the new desired
size for existing OS and Data disks.
:vartype disk_size_gb: int
:ivar managed_disk: Gets the managed disk details.
:vartype managed_disk: ~azure.mgmt.compute.v2022_03_01.models.ManagedDiskParameters
:ivar disk_restore_point: Gets the disk restore point Id.
:vartype disk_restore_point: ~azure.mgmt.compute.v2022_03_01.models.ApiEntityReference
"""
_attribute_map = {
'lun': {'key': 'lun', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'managed_disk': {'key': 'managedDisk', 'type': 'ManagedDiskParameters'},
'disk_restore_point': {'key': 'diskRestorePoint', 'type': 'ApiEntityReference'},
}
def __init__(
self,
*,
lun: Optional[int] = None,
name: Optional[str] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
disk_size_gb: Optional[int] = None,
managed_disk: Optional["ManagedDiskParameters"] = None,
disk_restore_point: Optional["ApiEntityReference"] = None,
**kwargs
):
"""
:keyword lun: Gets the logical unit number.
:paramtype lun: int
:keyword name: Gets the disk name.
:paramtype name: str
:keyword caching: Gets the caching type. Possible values include: "None", "ReadOnly",
"ReadWrite".
:paramtype caching: str or ~azure.mgmt.compute.v2022_03_01.models.CachingTypes
:keyword disk_size_gb: Gets the initial disk size in GB for blank data disks, and the new
desired size for existing OS and Data disks.
:paramtype disk_size_gb: int
:keyword managed_disk: Gets the managed disk details.
:paramtype managed_disk: ~azure.mgmt.compute.v2022_03_01.models.ManagedDiskParameters
:keyword disk_restore_point: Gets the disk restore point Id.
:paramtype disk_restore_point: ~azure.mgmt.compute.v2022_03_01.models.ApiEntityReference
"""
super(RestorePointSourceVMDataDisk, self).__init__(**kwargs)
self.lun = lun
self.name = name
self.caching = caching
self.disk_size_gb = disk_size_gb
self.managed_disk = managed_disk
self.disk_restore_point = disk_restore_point
class RestorePointSourceVMOSDisk(msrest.serialization.Model):
"""Describes an Operating System disk.
:ivar os_type: Gets the Operating System type. Possible values include: "Windows", "Linux".
:vartype os_type: str or ~azure.mgmt.compute.v2022_03_01.models.OperatingSystemType
:ivar encryption_settings: Gets the disk encryption settings.
:vartype encryption_settings: ~azure.mgmt.compute.v2022_03_01.models.DiskEncryptionSettings
:ivar name: Gets the disk name.
:vartype name: str
:ivar caching: Gets the caching type. Possible values include: "None", "ReadOnly", "ReadWrite".
:vartype caching: str or ~azure.mgmt.compute.v2022_03_01.models.CachingTypes
:ivar disk_size_gb: Gets the disk size in GB.
:vartype disk_size_gb: int
:ivar managed_disk: Gets the managed disk details.
:vartype managed_disk: ~azure.mgmt.compute.v2022_03_01.models.ManagedDiskParameters
:ivar disk_restore_point: Gets the disk restore point Id.
:vartype disk_restore_point: ~azure.mgmt.compute.v2022_03_01.models.ApiEntityReference
"""
_attribute_map = {
'os_type': {'key': 'osType', 'type': 'str'},
'encryption_settings': {'key': 'encryptionSettings', 'type': 'DiskEncryptionSettings'},
'name': {'key': 'name', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'managed_disk': {'key': 'managedDisk', 'type': 'ManagedDiskParameters'},
'disk_restore_point': {'key': 'diskRestorePoint', 'type': 'ApiEntityReference'},
}
def __init__(
self,
*,
os_type: Optional[Union[str, "OperatingSystemType"]] = None,
encryption_settings: Optional["DiskEncryptionSettings"] = None,
name: Optional[str] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
disk_size_gb: Optional[int] = None,
managed_disk: Optional["ManagedDiskParameters"] = None,
disk_restore_point: Optional["ApiEntityReference"] = None,
**kwargs
):
"""
:keyword os_type: Gets the Operating System type. Possible values include: "Windows", "Linux".
:paramtype os_type: str or ~azure.mgmt.compute.v2022_03_01.models.OperatingSystemType
:keyword encryption_settings: Gets the disk encryption settings.
:paramtype encryption_settings: ~azure.mgmt.compute.v2022_03_01.models.DiskEncryptionSettings
:keyword name: Gets the disk name.
:paramtype name: str
:keyword caching: Gets the caching type. Possible values include: "None", "ReadOnly",
"ReadWrite".
:paramtype caching: str or ~azure.mgmt.compute.v2022_03_01.models.CachingTypes
:keyword disk_size_gb: Gets the disk size in GB.
:paramtype disk_size_gb: int
:keyword managed_disk: Gets the managed disk details.
:paramtype managed_disk: ~azure.mgmt.compute.v2022_03_01.models.ManagedDiskParameters
:keyword disk_restore_point: Gets the disk restore point Id.
:paramtype disk_restore_point: ~azure.mgmt.compute.v2022_03_01.models.ApiEntityReference
"""
super(RestorePointSourceVMOSDisk, self).__init__(**kwargs)
self.os_type = os_type
self.encryption_settings = encryption_settings
self.name = name
self.caching = caching
self.disk_size_gb = disk_size_gb
self.managed_disk = managed_disk
self.disk_restore_point = disk_restore_point
class RestorePointSourceVMStorageProfile(msrest.serialization.Model):
"""Describes the storage profile.
:ivar os_disk: Gets the OS disk of the VM captured at the time of the restore point creation.
:vartype os_disk: ~azure.mgmt.compute.v2022_03_01.models.RestorePointSourceVMOSDisk
:ivar data_disks: Gets the data disks of the VM captured at the time of the restore point
creation.
:vartype data_disks: list[~azure.mgmt.compute.v2022_03_01.models.RestorePointSourceVMDataDisk]
"""
_attribute_map = {
'os_disk': {'key': 'osDisk', 'type': 'RestorePointSourceVMOSDisk'},
'data_disks': {'key': 'dataDisks', 'type': '[RestorePointSourceVMDataDisk]'},
}
def __init__(
self,
*,
os_disk: Optional["RestorePointSourceVMOSDisk"] = None,
data_disks: Optional[List["RestorePointSourceVMDataDisk"]] = None,
**kwargs
):
"""
:keyword os_disk: Gets the OS disk of the VM captured at the time of the restore point
creation.
:paramtype os_disk: ~azure.mgmt.compute.v2022_03_01.models.RestorePointSourceVMOSDisk
:keyword data_disks: Gets the data disks of the VM captured at the time of the restore point
creation.
:paramtype data_disks:
list[~azure.mgmt.compute.v2022_03_01.models.RestorePointSourceVMDataDisk]
"""
super(RestorePointSourceVMStorageProfile, self).__init__(**kwargs)
self.os_disk = os_disk
self.data_disks = data_disks
class RetrieveBootDiagnosticsDataResult(msrest.serialization.Model):
"""The SAS URIs of the console screenshot and serial log blobs.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar console_screenshot_blob_uri: The console screenshot blob URI.
:vartype console_screenshot_blob_uri: str
:ivar serial_console_log_blob_uri: The serial console log blob URI.
:vartype serial_console_log_blob_uri: str
"""
_validation = {
'console_screenshot_blob_uri': {'readonly': True},
'serial_console_log_blob_uri': {'readonly': True},
}
_attribute_map = {
'console_screenshot_blob_uri': {'key': 'consoleScreenshotBlobUri', 'type': 'str'},
'serial_console_log_blob_uri': {'key': 'serialConsoleLogBlobUri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(RetrieveBootDiagnosticsDataResult, self).__init__(**kwargs)
self.console_screenshot_blob_uri = None
self.serial_console_log_blob_uri = None
class RollbackStatusInfo(msrest.serialization.Model):
"""Information about rollback on failed VM instances after a OS Upgrade operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar successfully_rolledback_instance_count: The number of instances which have been
successfully rolled back.
:vartype successfully_rolledback_instance_count: int
:ivar failed_rolledback_instance_count: The number of instances which failed to rollback.
:vartype failed_rolledback_instance_count: int
:ivar rollback_error: Error details if OS rollback failed.
:vartype rollback_error: ~azure.mgmt.compute.v2022_03_01.models.ApiError
"""
_validation = {
'successfully_rolledback_instance_count': {'readonly': True},
'failed_rolledback_instance_count': {'readonly': True},
'rollback_error': {'readonly': True},
}
_attribute_map = {
'successfully_rolledback_instance_count': {'key': 'successfullyRolledbackInstanceCount', 'type': 'int'},
'failed_rolledback_instance_count': {'key': 'failedRolledbackInstanceCount', 'type': 'int'},
'rollback_error': {'key': 'rollbackError', 'type': 'ApiError'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(RollbackStatusInfo, self).__init__(**kwargs)
self.successfully_rolledback_instance_count = None
self.failed_rolledback_instance_count = None
self.rollback_error = None
class RollingUpgradePolicy(msrest.serialization.Model):
"""The configuration parameters used while performing a rolling upgrade.
:ivar max_batch_instance_percent: The maximum percent of total virtual machine instances that
will be upgraded simultaneously by the rolling upgrade in one batch. As this is a maximum,
unhealthy instances in previous or future batches can cause the percentage of instances in a
batch to decrease to ensure higher reliability. The default value for this parameter is 20%.
:vartype max_batch_instance_percent: int
:ivar max_unhealthy_instance_percent: The maximum percentage of the total virtual machine
instances in the scale set that can be simultaneously unhealthy, either as a result of being
upgraded, or by being found in an unhealthy state by the virtual machine health checks before
the rolling upgrade aborts. This constraint will be checked prior to starting any batch. The
default value for this parameter is 20%.
:vartype max_unhealthy_instance_percent: int
:ivar max_unhealthy_upgraded_instance_percent: The maximum percentage of upgraded virtual
machine instances that can be found to be in an unhealthy state. This check will happen after
each batch is upgraded. If this percentage is ever exceeded, the rolling update aborts. The
default value for this parameter is 20%.
:vartype max_unhealthy_upgraded_instance_percent: int
:ivar pause_time_between_batches: The wait time between completing the update for all virtual
machines in one batch and starting the next batch. The time duration should be specified in ISO
8601 format. The default value is 0 seconds (PT0S).
:vartype pause_time_between_batches: str
:ivar enable_cross_zone_upgrade: Allow VMSS to ignore AZ boundaries when constructing upgrade
batches. Take into consideration the Update Domain and maxBatchInstancePercent to determine the
batch size.
:vartype enable_cross_zone_upgrade: bool
:ivar prioritize_unhealthy_instances: Upgrade all unhealthy instances in a scale set before any
healthy instances.
:vartype prioritize_unhealthy_instances: bool
"""
_validation = {
'max_batch_instance_percent': {'maximum': 100, 'minimum': 5},
'max_unhealthy_instance_percent': {'maximum': 100, 'minimum': 5},
'max_unhealthy_upgraded_instance_percent': {'maximum': 100, 'minimum': 0},
}
_attribute_map = {
'max_batch_instance_percent': {'key': 'maxBatchInstancePercent', 'type': 'int'},
'max_unhealthy_instance_percent': {'key': 'maxUnhealthyInstancePercent', 'type': 'int'},
'max_unhealthy_upgraded_instance_percent': {'key': 'maxUnhealthyUpgradedInstancePercent', 'type': 'int'},
'pause_time_between_batches': {'key': 'pauseTimeBetweenBatches', 'type': 'str'},
'enable_cross_zone_upgrade': {'key': 'enableCrossZoneUpgrade', 'type': 'bool'},
'prioritize_unhealthy_instances': {'key': 'prioritizeUnhealthyInstances', 'type': 'bool'},
}
def __init__(
self,
*,
max_batch_instance_percent: Optional[int] = None,
max_unhealthy_instance_percent: Optional[int] = None,
max_unhealthy_upgraded_instance_percent: Optional[int] = None,
pause_time_between_batches: Optional[str] = None,
enable_cross_zone_upgrade: Optional[bool] = None,
prioritize_unhealthy_instances: Optional[bool] = None,
**kwargs
):
"""
:keyword max_batch_instance_percent: The maximum percent of total virtual machine instances
that will be upgraded simultaneously by the rolling upgrade in one batch. As this is a maximum,
unhealthy instances in previous or future batches can cause the percentage of instances in a
batch to decrease to ensure higher reliability. The default value for this parameter is 20%.
:paramtype max_batch_instance_percent: int
:keyword max_unhealthy_instance_percent: The maximum percentage of the total virtual machine
instances in the scale set that can be simultaneously unhealthy, either as a result of being
upgraded, or by being found in an unhealthy state by the virtual machine health checks before
the rolling upgrade aborts. This constraint will be checked prior to starting any batch. The
default value for this parameter is 20%.
:paramtype max_unhealthy_instance_percent: int
:keyword max_unhealthy_upgraded_instance_percent: The maximum percentage of upgraded virtual
machine instances that can be found to be in an unhealthy state. This check will happen after
each batch is upgraded. If this percentage is ever exceeded, the rolling update aborts. The
default value for this parameter is 20%.
:paramtype max_unhealthy_upgraded_instance_percent: int
:keyword pause_time_between_batches: The wait time between completing the update for all
virtual machines in one batch and starting the next batch. The time duration should be
specified in ISO 8601 format. The default value is 0 seconds (PT0S).
:paramtype pause_time_between_batches: str
:keyword enable_cross_zone_upgrade: Allow VMSS to ignore AZ boundaries when constructing
upgrade batches. Take into consideration the Update Domain and maxBatchInstancePercent to
determine the batch size.
:paramtype enable_cross_zone_upgrade: bool
:keyword prioritize_unhealthy_instances: Upgrade all unhealthy instances in a scale set before
any healthy instances.
:paramtype prioritize_unhealthy_instances: bool
"""
super(RollingUpgradePolicy, self).__init__(**kwargs)
self.max_batch_instance_percent = max_batch_instance_percent
self.max_unhealthy_instance_percent = max_unhealthy_instance_percent
self.max_unhealthy_upgraded_instance_percent = max_unhealthy_upgraded_instance_percent
self.pause_time_between_batches = pause_time_between_batches
self.enable_cross_zone_upgrade = enable_cross_zone_upgrade
self.prioritize_unhealthy_instances = prioritize_unhealthy_instances
class RollingUpgradeProgressInfo(msrest.serialization.Model):
"""Information about the number of virtual machine instances in each upgrade state.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar successful_instance_count: The number of instances that have been successfully upgraded.
:vartype successful_instance_count: int
:ivar failed_instance_count: The number of instances that have failed to be upgraded
successfully.
:vartype failed_instance_count: int
:ivar in_progress_instance_count: The number of instances that are currently being upgraded.
:vartype in_progress_instance_count: int
:ivar pending_instance_count: The number of instances that have not yet begun to be upgraded.
:vartype pending_instance_count: int
"""
_validation = {
'successful_instance_count': {'readonly': True},
'failed_instance_count': {'readonly': True},
'in_progress_instance_count': {'readonly': True},
'pending_instance_count': {'readonly': True},
}
_attribute_map = {
'successful_instance_count': {'key': 'successfulInstanceCount', 'type': 'int'},
'failed_instance_count': {'key': 'failedInstanceCount', 'type': 'int'},
'in_progress_instance_count': {'key': 'inProgressInstanceCount', 'type': 'int'},
'pending_instance_count': {'key': 'pendingInstanceCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(RollingUpgradeProgressInfo, self).__init__(**kwargs)
self.successful_instance_count = None
self.failed_instance_count = None
self.in_progress_instance_count = None
self.pending_instance_count = None
class RollingUpgradeRunningStatus(msrest.serialization.Model):
"""Information about the current running state of the overall upgrade.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Code indicating the current status of the upgrade. Possible values include:
"RollingForward", "Cancelled", "Completed", "Faulted".
:vartype code: str or ~azure.mgmt.compute.v2022_03_01.models.RollingUpgradeStatusCode
:ivar start_time: Start time of the upgrade.
:vartype start_time: ~datetime.datetime
:ivar last_action: The last action performed on the rolling upgrade. Possible values include:
"Start", "Cancel".
:vartype last_action: str or ~azure.mgmt.compute.v2022_03_01.models.RollingUpgradeActionType
:ivar last_action_time: Last action time of the upgrade.
:vartype last_action_time: ~datetime.datetime
"""
_validation = {
'code': {'readonly': True},
'start_time': {'readonly': True},
'last_action': {'readonly': True},
'last_action_time': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'last_action': {'key': 'lastAction', 'type': 'str'},
'last_action_time': {'key': 'lastActionTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(RollingUpgradeRunningStatus, self).__init__(**kwargs)
self.code = None
self.start_time = None
self.last_action = None
self.last_action_time = None
class RollingUpgradeStatusInfo(Resource):
"""The status of the latest virtual machine scale set rolling upgrade.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Required. Resource location.
:vartype location: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar policy: The rolling upgrade policies applied for this upgrade.
:vartype policy: ~azure.mgmt.compute.v2022_03_01.models.RollingUpgradePolicy
:ivar running_status: Information about the current running state of the overall upgrade.
:vartype running_status: ~azure.mgmt.compute.v2022_03_01.models.RollingUpgradeRunningStatus
:ivar progress: Information about the number of virtual machine instances in each upgrade
state.
:vartype progress: ~azure.mgmt.compute.v2022_03_01.models.RollingUpgradeProgressInfo
:ivar error: Error details for this upgrade, if there are any.
:vartype error: ~azure.mgmt.compute.v2022_03_01.models.ApiError
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'policy': {'readonly': True},
'running_status': {'readonly': True},
'progress': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'policy': {'key': 'properties.policy', 'type': 'RollingUpgradePolicy'},
'running_status': {'key': 'properties.runningStatus', 'type': 'RollingUpgradeRunningStatus'},
'progress': {'key': 'properties.progress', 'type': 'RollingUpgradeProgressInfo'},
'error': {'key': 'properties.error', 'type': 'ApiError'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword location: Required. Resource location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
"""
super(RollingUpgradeStatusInfo, self).__init__(location=location, tags=tags, **kwargs)
self.policy = None
self.running_status = None
self.progress = None
self.error = None
class RunCommandDocumentBase(msrest.serialization.Model):
"""Describes the properties of a Run Command metadata.
All required parameters must be populated in order to send to Azure.
:ivar schema: Required. The VM run command schema.
:vartype schema: str
:ivar id: Required. The VM run command id.
:vartype id: str
:ivar os_type: Required. The Operating System type. Possible values include: "Windows",
"Linux".
:vartype os_type: str or ~azure.mgmt.compute.v2022_03_01.models.OperatingSystemTypes
:ivar label: Required. The VM run command label.
:vartype label: str
:ivar description: Required. The VM run command description.
:vartype description: str
"""
_validation = {
'schema': {'required': True},
'id': {'required': True},
'os_type': {'required': True},
'label': {'required': True},
'description': {'required': True},
}
_attribute_map = {
'schema': {'key': '$schema', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'os_type': {'key': 'osType', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
schema: str,
id: str,
os_type: Union[str, "OperatingSystemTypes"],
label: str,
description: str,
**kwargs
):
"""
:keyword schema: Required. The VM run command schema.
:paramtype schema: str
:keyword id: Required. The VM run command id.
:paramtype id: str
:keyword os_type: Required. The Operating System type. Possible values include: "Windows",
"Linux".
:paramtype os_type: str or ~azure.mgmt.compute.v2022_03_01.models.OperatingSystemTypes
:keyword label: Required. The VM run command label.
:paramtype label: str
:keyword description: Required. The VM run command description.
:paramtype description: str
"""
super(RunCommandDocumentBase, self).__init__(**kwargs)
self.schema = schema
self.id = id
self.os_type = os_type
self.label = label
self.description = description
class RunCommandDocument(RunCommandDocumentBase):
"""Describes the properties of a Run Command.
All required parameters must be populated in order to send to Azure.
:ivar schema: Required. The VM run command schema.
:vartype schema: str
:ivar id: Required. The VM run command id.
:vartype id: str
:ivar os_type: Required. The Operating System type. Possible values include: "Windows",
"Linux".
:vartype os_type: str or ~azure.mgmt.compute.v2022_03_01.models.OperatingSystemTypes
:ivar label: Required. The VM run command label.
:vartype label: str
:ivar description: Required. The VM run command description.
:vartype description: str
:ivar script: Required. The script to be executed.
:vartype script: list[str]
:ivar parameters: The parameters used by the script.
:vartype parameters: list[~azure.mgmt.compute.v2022_03_01.models.RunCommandParameterDefinition]
"""
_validation = {
'schema': {'required': True},
'id': {'required': True},
'os_type': {'required': True},
'label': {'required': True},
'description': {'required': True},
'script': {'required': True},
}
_attribute_map = {
'schema': {'key': '$schema', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'os_type': {'key': 'osType', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'script': {'key': 'script', 'type': '[str]'},
'parameters': {'key': 'parameters', 'type': '[RunCommandParameterDefinition]'},
}
def __init__(
self,
*,
schema: str,
id: str,
os_type: Union[str, "OperatingSystemTypes"],
label: str,
description: str,
script: List[str],
parameters: Optional[List["RunCommandParameterDefinition"]] = None,
**kwargs
):
"""
:keyword schema: Required. The VM run command schema.
:paramtype schema: str
:keyword id: Required. The VM run command id.
:paramtype id: str
:keyword os_type: Required. The Operating System type. Possible values include: "Windows",
"Linux".
:paramtype os_type: str or ~azure.mgmt.compute.v2022_03_01.models.OperatingSystemTypes
:keyword label: Required. The VM run command label.
:paramtype label: str
:keyword description: Required. The VM run command description.
:paramtype description: str
:keyword script: Required. The script to be executed.
:paramtype script: list[str]
:keyword parameters: The parameters used by the script.
:paramtype parameters:
list[~azure.mgmt.compute.v2022_03_01.models.RunCommandParameterDefinition]
"""
super(RunCommandDocument, self).__init__(schema=schema, id=id, os_type=os_type, label=label, description=description, **kwargs)
self.script = script
self.parameters = parameters
class RunCommandInput(msrest.serialization.Model):
"""Capture Virtual Machine parameters.
All required parameters must be populated in order to send to Azure.
:ivar command_id: Required. The run command id.
:vartype command_id: str
:ivar script: Optional. The script to be executed. When this value is given, the given script
will override the default script of the command.
:vartype script: list[str]
:ivar parameters: The run command parameters.
:vartype parameters: list[~azure.mgmt.compute.v2022_03_01.models.RunCommandInputParameter]
"""
_validation = {
'command_id': {'required': True},
}
_attribute_map = {
'command_id': {'key': 'commandId', 'type': 'str'},
'script': {'key': 'script', 'type': '[str]'},
'parameters': {'key': 'parameters', 'type': '[RunCommandInputParameter]'},
}
def __init__(
self,
*,
command_id: str,
script: Optional[List[str]] = None,
parameters: Optional[List["RunCommandInputParameter"]] = None,
**kwargs
):
"""
:keyword command_id: Required. The run command id.
:paramtype command_id: str
:keyword script: Optional. The script to be executed. When this value is given, the given
script will override the default script of the command.
:paramtype script: list[str]
:keyword parameters: The run command parameters.
:paramtype parameters: list[~azure.mgmt.compute.v2022_03_01.models.RunCommandInputParameter]
"""
super(RunCommandInput, self).__init__(**kwargs)
self.command_id = command_id
self.script = script
self.parameters = parameters
class RunCommandInputParameter(msrest.serialization.Model):
"""Describes the properties of a run command parameter.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The run command parameter name.
:vartype name: str
:ivar value: Required. The run command parameter value.
:vartype value: str
"""
_validation = {
'name': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
value: str,
**kwargs
):
"""
:keyword name: Required. The run command parameter name.
:paramtype name: str
:keyword value: Required. The run command parameter value.
:paramtype value: str
"""
super(RunCommandInputParameter, self).__init__(**kwargs)
self.name = name
self.value = value
class RunCommandListResult(msrest.serialization.Model):
"""The List Virtual Machine operation response.
All required parameters must be populated in order to send to Azure.
:ivar value: Required. The list of virtual machine run commands.
:vartype value: list[~azure.mgmt.compute.v2022_03_01.models.RunCommandDocumentBase]
:ivar next_link: The uri to fetch the next page of run commands. Call ListNext() with this to
fetch the next page of run commands.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[RunCommandDocumentBase]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["RunCommandDocumentBase"],
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Required. The list of virtual machine run commands.
:paramtype value: list[~azure.mgmt.compute.v2022_03_01.models.RunCommandDocumentBase]
:keyword next_link: The uri to fetch the next page of run commands. Call ListNext() with this
to fetch the next page of run commands.
:paramtype next_link: str
"""
super(RunCommandListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class RunCommandParameterDefinition(msrest.serialization.Model):
"""Describes the properties of a run command parameter.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The run command parameter name.
:vartype name: str
:ivar type: Required. The run command parameter type.
:vartype type: str
:ivar default_value: The run command parameter default value.
:vartype default_value: str
:ivar required: The run command parameter required.
:vartype required: bool
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'default_value': {'key': 'defaultValue', 'type': 'str'},
'required': {'key': 'required', 'type': 'bool'},
}
def __init__(
self,
*,
name: str,
type: str,
default_value: Optional[str] = None,
required: Optional[bool] = False,
**kwargs
):
"""
:keyword name: Required. The run command parameter name.
:paramtype name: str
:keyword type: Required. The run command parameter type.
:paramtype type: str
:keyword default_value: The run command parameter default value.
:paramtype default_value: str
:keyword required: The run command parameter required.
:paramtype required: bool
"""
super(RunCommandParameterDefinition, self).__init__(**kwargs)
self.name = name
self.type = type
self.default_value = default_value
self.required = required
class RunCommandResult(msrest.serialization.Model):
"""RunCommandResult.
:ivar value: Run command operation response.
:vartype value: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
value: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
"""
:keyword value: Run command operation response.
:paramtype value: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
"""
super(RunCommandResult, self).__init__(**kwargs)
self.value = value
class ScaleInPolicy(msrest.serialization.Model):
"""Describes a scale-in policy for a virtual machine scale set.
:ivar rules: The rules to be followed when scaling-in a virtual machine scale set.
:code:`<br>`:code:`<br>` Possible values are: :code:`<br>`:code:`<br>` **Default** When a
virtual machine scale set is scaled in, the scale set will first be balanced across zones if it
is a zonal scale set. Then, it will be balanced across Fault Domains as far as possible. Within
each Fault Domain, the virtual machines chosen for removal will be the newest ones that are not
protected from scale-in. :code:`<br>`:code:`<br>` **OldestVM** When a virtual machine scale set
is being scaled-in, the oldest virtual machines that are not protected from scale-in will be
chosen for removal. For zonal virtual machine scale sets, the scale set will first be balanced
across zones. Within each zone, the oldest virtual machines that are not protected will be
chosen for removal. :code:`<br>`:code:`<br>` **NewestVM** When a virtual machine scale set is
being scaled-in, the newest virtual machines that are not protected from scale-in will be
chosen for removal. For zonal virtual machine scale sets, the scale set will first be balanced
across zones. Within each zone, the newest virtual machines that are not protected will be
chosen for removal. :code:`<br>`:code:`<br>`.
:vartype rules: list[str or
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetScaleInRules]
:ivar force_deletion: This property allows you to specify if virtual machines chosen for
removal have to be force deleted when a virtual machine scale set is being scaled-in.(Feature
in Preview).
:vartype force_deletion: bool
"""
_attribute_map = {
'rules': {'key': 'rules', 'type': '[str]'},
'force_deletion': {'key': 'forceDeletion', 'type': 'bool'},
}
def __init__(
self,
*,
rules: Optional[List[Union[str, "VirtualMachineScaleSetScaleInRules"]]] = None,
force_deletion: Optional[bool] = None,
**kwargs
):
"""
:keyword rules: The rules to be followed when scaling-in a virtual machine scale set.
:code:`<br>`:code:`<br>` Possible values are: :code:`<br>`:code:`<br>` **Default** When a
virtual machine scale set is scaled in, the scale set will first be balanced across zones if it
is a zonal scale set. Then, it will be balanced across Fault Domains as far as possible. Within
each Fault Domain, the virtual machines chosen for removal will be the newest ones that are not
protected from scale-in. :code:`<br>`:code:`<br>` **OldestVM** When a virtual machine scale set
is being scaled-in, the oldest virtual machines that are not protected from scale-in will be
chosen for removal. For zonal virtual machine scale sets, the scale set will first be balanced
across zones. Within each zone, the oldest virtual machines that are not protected will be
chosen for removal. :code:`<br>`:code:`<br>` **NewestVM** When a virtual machine scale set is
being scaled-in, the newest virtual machines that are not protected from scale-in will be
chosen for removal. For zonal virtual machine scale sets, the scale set will first be balanced
across zones. Within each zone, the newest virtual machines that are not protected will be
chosen for removal. :code:`<br>`:code:`<br>`.
:paramtype rules: list[str or
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetScaleInRules]
:keyword force_deletion: This property allows you to specify if virtual machines chosen for
removal have to be force deleted when a virtual machine scale set is being scaled-in.(Feature
in Preview).
:paramtype force_deletion: bool
"""
super(ScaleInPolicy, self).__init__(**kwargs)
self.rules = rules
self.force_deletion = force_deletion
class ScheduledEventsProfile(msrest.serialization.Model):
"""ScheduledEventsProfile.
:ivar terminate_notification_profile: Specifies Terminate Scheduled Event related
configurations.
:vartype terminate_notification_profile:
~azure.mgmt.compute.v2022_03_01.models.TerminateNotificationProfile
"""
_attribute_map = {
'terminate_notification_profile': {'key': 'terminateNotificationProfile', 'type': 'TerminateNotificationProfile'},
}
def __init__(
self,
*,
terminate_notification_profile: Optional["TerminateNotificationProfile"] = None,
**kwargs
):
"""
:keyword terminate_notification_profile: Specifies Terminate Scheduled Event related
configurations.
:paramtype terminate_notification_profile:
~azure.mgmt.compute.v2022_03_01.models.TerminateNotificationProfile
"""
super(ScheduledEventsProfile, self).__init__(**kwargs)
self.terminate_notification_profile = terminate_notification_profile
class SecurityProfile(msrest.serialization.Model):
"""Specifies the Security profile settings for the virtual machine or virtual machine scale set.
:ivar uefi_settings: Specifies the security settings like secure boot and vTPM used while
creating the virtual machine. :code:`<br>`:code:`<br>`Minimum api-version: 2020-12-01.
:vartype uefi_settings: ~azure.mgmt.compute.v2022_03_01.models.UefiSettings
:ivar encryption_at_host: This property can be used by user in the request to enable or disable
the Host Encryption for the virtual machine or virtual machine scale set. This will enable the
encryption for all the disks including Resource/Temp disk at host itself.
:code:`<br>`:code:`<br>` Default: The Encryption at host will be disabled unless this property
is set to true for the resource.
:vartype encryption_at_host: bool
:ivar security_type: Specifies the SecurityType of the virtual machine. It has to be set to any
specified value to enable UefiSettings. :code:`<br>`:code:`<br>` Default: UefiSettings will not
be enabled unless this property is set. Possible values include: "TrustedLaunch",
"ConfidentialVM".
:vartype security_type: str or ~azure.mgmt.compute.v2022_03_01.models.SecurityTypes
"""
_attribute_map = {
'uefi_settings': {'key': 'uefiSettings', 'type': 'UefiSettings'},
'encryption_at_host': {'key': 'encryptionAtHost', 'type': 'bool'},
'security_type': {'key': 'securityType', 'type': 'str'},
}
def __init__(
self,
*,
uefi_settings: Optional["UefiSettings"] = None,
encryption_at_host: Optional[bool] = None,
security_type: Optional[Union[str, "SecurityTypes"]] = None,
**kwargs
):
"""
:keyword uefi_settings: Specifies the security settings like secure boot and vTPM used while
creating the virtual machine. :code:`<br>`:code:`<br>`Minimum api-version: 2020-12-01.
:paramtype uefi_settings: ~azure.mgmt.compute.v2022_03_01.models.UefiSettings
:keyword encryption_at_host: This property can be used by user in the request to enable or
disable the Host Encryption for the virtual machine or virtual machine scale set. This will
enable the encryption for all the disks including Resource/Temp disk at host itself.
:code:`<br>`:code:`<br>` Default: The Encryption at host will be disabled unless this property
is set to true for the resource.
:paramtype encryption_at_host: bool
:keyword security_type: Specifies the SecurityType of the virtual machine. It has to be set to
any specified value to enable UefiSettings. :code:`<br>`:code:`<br>` Default: UefiSettings will
not be enabled unless this property is set. Possible values include: "TrustedLaunch",
"ConfidentialVM".
:paramtype security_type: str or ~azure.mgmt.compute.v2022_03_01.models.SecurityTypes
"""
super(SecurityProfile, self).__init__(**kwargs)
self.uefi_settings = uefi_settings
self.encryption_at_host = encryption_at_host
self.security_type = security_type
class Sku(msrest.serialization.Model):
"""Describes a virtual machine scale set sku. NOTE: If the new VM SKU is not supported on the hardware the scale set is currently on, you need to deallocate the VMs in the scale set before you modify the SKU name.
:ivar name: The sku name.
:vartype name: str
:ivar tier: Specifies the tier of virtual machines in a scale set.:code:`<br />`:code:`<br />`
Possible Values::code:`<br />`:code:`<br />` **Standard**\ :code:`<br />`:code:`<br />`
**Basic**.
:vartype tier: str
:ivar capacity: Specifies the number of virtual machines in the scale set.
:vartype capacity: long
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'long'},
}
def __init__(
self,
*,
name: Optional[str] = None,
tier: Optional[str] = None,
capacity: Optional[int] = None,
**kwargs
):
"""
:keyword name: The sku name.
:paramtype name: str
:keyword tier: Specifies the tier of virtual machines in a scale set.:code:`<br />`:code:`<br
/>` Possible Values::code:`<br />`:code:`<br />` **Standard**\ :code:`<br />`:code:`<br />`
**Basic**.
:paramtype tier: str
:keyword capacity: Specifies the number of virtual machines in the scale set.
:paramtype capacity: long
"""
super(Sku, self).__init__(**kwargs)
self.name = name
self.tier = tier
self.capacity = capacity
class SpotRestorePolicy(msrest.serialization.Model):
"""Specifies the Spot-Try-Restore properties for the virtual machine scale set. :code:`<br>`:code:`<br>` With this property customer can enable or disable automatic restore of the evicted Spot VMSS VM instances opportunistically based on capacity availability and pricing constraint.
:ivar enabled: Enables the Spot-Try-Restore feature where evicted VMSS SPOT instances will be
tried to be restored opportunistically based on capacity availability and pricing constraints.
:vartype enabled: bool
:ivar restore_timeout: Timeout value expressed as an ISO 8601 time duration after which the
platform will not try to restore the VMSS SPOT instances.
:vartype restore_timeout: str
"""
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'restore_timeout': {'key': 'restoreTimeout', 'type': 'str'},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
restore_timeout: Optional[str] = None,
**kwargs
):
"""
:keyword enabled: Enables the Spot-Try-Restore feature where evicted VMSS SPOT instances will
be tried to be restored opportunistically based on capacity availability and pricing
constraints.
:paramtype enabled: bool
:keyword restore_timeout: Timeout value expressed as an ISO 8601 time duration after which the
platform will not try to restore the VMSS SPOT instances.
:paramtype restore_timeout: str
"""
super(SpotRestorePolicy, self).__init__(**kwargs)
self.enabled = enabled
self.restore_timeout = restore_timeout
class SshConfiguration(msrest.serialization.Model):
"""SSH configuration for Linux based VMs running on Azure.
:ivar public_keys: The list of SSH public keys used to authenticate with linux based VMs.
:vartype public_keys: list[~azure.mgmt.compute.v2022_03_01.models.SshPublicKey]
"""
_attribute_map = {
'public_keys': {'key': 'publicKeys', 'type': '[SshPublicKey]'},
}
def __init__(
self,
*,
public_keys: Optional[List["SshPublicKey"]] = None,
**kwargs
):
"""
:keyword public_keys: The list of SSH public keys used to authenticate with linux based VMs.
:paramtype public_keys: list[~azure.mgmt.compute.v2022_03_01.models.SshPublicKey]
"""
super(SshConfiguration, self).__init__(**kwargs)
self.public_keys = public_keys
class SshPublicKey(msrest.serialization.Model):
"""Contains information about SSH certificate public key and the path on the Linux VM where the public key is placed.
:ivar path: Specifies the full path on the created VM where ssh public key is stored. If the
file already exists, the specified key is appended to the file. Example:
/home/user/.ssh/authorized_keys.
:vartype path: str
:ivar key_data: SSH public key certificate used to authenticate with the VM through ssh. The
key needs to be at least 2048-bit and in ssh-rsa format. :code:`<br>`:code:`<br>` For creating
ssh keys, see [Create SSH keys on Linux and Mac for Linux VMs in
Azure]https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed).
:vartype key_data: str
"""
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'key_data': {'key': 'keyData', 'type': 'str'},
}
def __init__(
self,
*,
path: Optional[str] = None,
key_data: Optional[str] = None,
**kwargs
):
"""
:keyword path: Specifies the full path on the created VM where ssh public key is stored. If the
file already exists, the specified key is appended to the file. Example:
/home/user/.ssh/authorized_keys.
:paramtype path: str
:keyword key_data: SSH public key certificate used to authenticate with the VM through ssh. The
key needs to be at least 2048-bit and in ssh-rsa format. :code:`<br>`:code:`<br>` For creating
ssh keys, see [Create SSH keys on Linux and Mac for Linux VMs in
Azure]https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed).
:paramtype key_data: str
"""
super(SshPublicKey, self).__init__(**kwargs)
self.path = path
self.key_data = key_data
class SshPublicKeyGenerateKeyPairResult(msrest.serialization.Model):
"""Response from generation of an SSH key pair.
All required parameters must be populated in order to send to Azure.
:ivar private_key: Required. Private key portion of the key pair used to authenticate to a
virtual machine through ssh. The private key is returned in RFC3447 format and should be
treated as a secret.
:vartype private_key: str
:ivar public_key: Required. Public key portion of the key pair used to authenticate to a
virtual machine through ssh. The public key is in ssh-rsa format.
:vartype public_key: str
:ivar id: Required. The ARM resource id in the form of
/subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{SshPublicKeyName}.
:vartype id: str
"""
_validation = {
'private_key': {'required': True},
'public_key': {'required': True},
'id': {'required': True},
}
_attribute_map = {
'private_key': {'key': 'privateKey', 'type': 'str'},
'public_key': {'key': 'publicKey', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
private_key: str,
public_key: str,
id: str,
**kwargs
):
"""
:keyword private_key: Required. Private key portion of the key pair used to authenticate to a
virtual machine through ssh. The private key is returned in RFC3447 format and should be
treated as a secret.
:paramtype private_key: str
:keyword public_key: Required. Public key portion of the key pair used to authenticate to a
virtual machine through ssh. The public key is in ssh-rsa format.
:paramtype public_key: str
:keyword id: Required. The ARM resource id in the form of
/subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{SshPublicKeyName}.
:paramtype id: str
"""
super(SshPublicKeyGenerateKeyPairResult, self).__init__(**kwargs)
self.private_key = private_key
self.public_key = public_key
self.id = id
class SshPublicKeyResource(Resource):
"""Specifies information about the SSH public key.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Required. Resource location.
:vartype location: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar public_key: SSH public key used to authenticate to a virtual machine through ssh. If this
property is not initially provided when the resource is created, the publicKey property will be
populated when generateKeyPair is called. If the public key is provided upon resource creation,
the provided public key needs to be at least 2048-bit and in ssh-rsa format.
:vartype public_key: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'public_key': {'key': 'properties.publicKey', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
public_key: Optional[str] = None,
**kwargs
):
"""
:keyword location: Required. Resource location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword public_key: SSH public key used to authenticate to a virtual machine through ssh. If
this property is not initially provided when the resource is created, the publicKey property
will be populated when generateKeyPair is called. If the public key is provided upon resource
creation, the provided public key needs to be at least 2048-bit and in ssh-rsa format.
:paramtype public_key: str
"""
super(SshPublicKeyResource, self).__init__(location=location, tags=tags, **kwargs)
self.public_key = public_key
class SshPublicKeysGroupListResult(msrest.serialization.Model):
"""The list SSH public keys operation response.
All required parameters must be populated in order to send to Azure.
:ivar value: Required. The list of SSH public keys.
:vartype value: list[~azure.mgmt.compute.v2022_03_01.models.SshPublicKeyResource]
:ivar next_link: The URI to fetch the next page of SSH public keys. Call ListNext() with this
URI to fetch the next page of SSH public keys.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SshPublicKeyResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["SshPublicKeyResource"],
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Required. The list of SSH public keys.
:paramtype value: list[~azure.mgmt.compute.v2022_03_01.models.SshPublicKeyResource]
:keyword next_link: The URI to fetch the next page of SSH public keys. Call ListNext() with
this URI to fetch the next page of SSH public keys.
:paramtype next_link: str
"""
super(SshPublicKeysGroupListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class SshPublicKeyUpdateResource(UpdateResource):
"""Specifies information about the SSH public key.
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar public_key: SSH public key used to authenticate to a virtual machine through ssh. If this
property is not initially provided when the resource is created, the publicKey property will be
populated when generateKeyPair is called. If the public key is provided upon resource creation,
the provided public key needs to be at least 2048-bit and in ssh-rsa format.
:vartype public_key: str
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'public_key': {'key': 'properties.publicKey', 'type': 'str'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
public_key: Optional[str] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword public_key: SSH public key used to authenticate to a virtual machine through ssh. If
this property is not initially provided when the resource is created, the publicKey property
will be populated when generateKeyPair is called. If the public key is provided upon resource
creation, the provided public key needs to be at least 2048-bit and in ssh-rsa format.
:paramtype public_key: str
"""
super(SshPublicKeyUpdateResource, self).__init__(tags=tags, **kwargs)
self.public_key = public_key
class StorageProfile(msrest.serialization.Model):
"""Specifies the storage settings for the virtual machine disks.
:ivar image_reference: Specifies information about the image to use. You can specify
information about platform images, marketplace images, or virtual machine images. This element
is required when you want to use a platform image, marketplace image, or virtual machine image,
but is not used in other creation operations.
:vartype image_reference: ~azure.mgmt.compute.v2022_03_01.models.ImageReference
:ivar os_disk: Specifies information about the operating system disk used by the virtual
machine. :code:`<br>`:code:`<br>` For more information about disks, see `About disks and VHDs
for Azure virtual machines
<https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview>`_.
:vartype os_disk: ~azure.mgmt.compute.v2022_03_01.models.OSDisk
:ivar data_disks: Specifies the parameters that are used to add a data disk to a virtual
machine. :code:`<br>`:code:`<br>` For more information about disks, see `About disks and VHDs
for Azure virtual machines
<https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview>`_.
:vartype data_disks: list[~azure.mgmt.compute.v2022_03_01.models.DataDisk]
"""
_attribute_map = {
'image_reference': {'key': 'imageReference', 'type': 'ImageReference'},
'os_disk': {'key': 'osDisk', 'type': 'OSDisk'},
'data_disks': {'key': 'dataDisks', 'type': '[DataDisk]'},
}
def __init__(
self,
*,
image_reference: Optional["ImageReference"] = None,
os_disk: Optional["OSDisk"] = None,
data_disks: Optional[List["DataDisk"]] = None,
**kwargs
):
"""
:keyword image_reference: Specifies information about the image to use. You can specify
information about platform images, marketplace images, or virtual machine images. This element
is required when you want to use a platform image, marketplace image, or virtual machine image,
but is not used in other creation operations.
:paramtype image_reference: ~azure.mgmt.compute.v2022_03_01.models.ImageReference
:keyword os_disk: Specifies information about the operating system disk used by the virtual
machine. :code:`<br>`:code:`<br>` For more information about disks, see `About disks and VHDs
for Azure virtual machines
<https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview>`_.
:paramtype os_disk: ~azure.mgmt.compute.v2022_03_01.models.OSDisk
:keyword data_disks: Specifies the parameters that are used to add a data disk to a virtual
machine. :code:`<br>`:code:`<br>` For more information about disks, see `About disks and VHDs
for Azure virtual machines
<https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview>`_.
:paramtype data_disks: list[~azure.mgmt.compute.v2022_03_01.models.DataDisk]
"""
super(StorageProfile, self).__init__(**kwargs)
self.image_reference = image_reference
self.os_disk = os_disk
self.data_disks = data_disks
class SubResourceReadOnly(msrest.serialization.Model):
"""SubResourceReadOnly.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(SubResourceReadOnly, self).__init__(**kwargs)
self.id = None
class SubResourceWithColocationStatus(SubResource):
"""SubResourceWithColocationStatus.
:ivar id: Resource Id.
:vartype id: str
:ivar colocation_status: Describes colocation status of a resource in the Proximity Placement
Group.
:vartype colocation_status: ~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'colocation_status': {'key': 'colocationStatus', 'type': 'InstanceViewStatus'},
}
def __init__(
self,
*,
id: Optional[str] = None,
colocation_status: Optional["InstanceViewStatus"] = None,
**kwargs
):
"""
:keyword id: Resource Id.
:paramtype id: str
:keyword colocation_status: Describes colocation status of a resource in the Proximity
Placement Group.
:paramtype colocation_status: ~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus
"""
super(SubResourceWithColocationStatus, self).__init__(id=id, **kwargs)
self.colocation_status = colocation_status
class TerminateNotificationProfile(msrest.serialization.Model):
"""TerminateNotificationProfile.
:ivar not_before_timeout: Configurable length of time a Virtual Machine being deleted will have
to potentially approve the Terminate Scheduled Event before the event is auto approved (timed
out). The configuration must be specified in ISO 8601 format, the default value is 5 minutes
(PT5M).
:vartype not_before_timeout: str
:ivar enable: Specifies whether the Terminate Scheduled event is enabled or disabled.
:vartype enable: bool
"""
_attribute_map = {
'not_before_timeout': {'key': 'notBeforeTimeout', 'type': 'str'},
'enable': {'key': 'enable', 'type': 'bool'},
}
def __init__(
self,
*,
not_before_timeout: Optional[str] = None,
enable: Optional[bool] = None,
**kwargs
):
"""
:keyword not_before_timeout: Configurable length of time a Virtual Machine being deleted will
have to potentially approve the Terminate Scheduled Event before the event is auto approved
(timed out). The configuration must be specified in ISO 8601 format, the default value is 5
minutes (PT5M).
:paramtype not_before_timeout: str
:keyword enable: Specifies whether the Terminate Scheduled event is enabled or disabled.
:paramtype enable: bool
"""
super(TerminateNotificationProfile, self).__init__(**kwargs)
self.not_before_timeout = not_before_timeout
self.enable = enable
class ThrottledRequestsInput(LogAnalyticsInputBase):
"""Api request input for LogAnalytics getThrottledRequests Api.
All required parameters must be populated in order to send to Azure.
:ivar blob_container_sas_uri: Required. SAS Uri of the logging blob container to which
LogAnalytics Api writes output logs to.
:vartype blob_container_sas_uri: str
:ivar from_time: Required. From time of the query.
:vartype from_time: ~datetime.datetime
:ivar to_time: Required. To time of the query.
:vartype to_time: ~datetime.datetime
:ivar group_by_throttle_policy: Group query result by Throttle Policy applied.
:vartype group_by_throttle_policy: bool
:ivar group_by_operation_name: Group query result by Operation Name.
:vartype group_by_operation_name: bool
:ivar group_by_resource_name: Group query result by Resource Name.
:vartype group_by_resource_name: bool
:ivar group_by_client_application_id: Group query result by Client Application ID.
:vartype group_by_client_application_id: bool
:ivar group_by_user_agent: Group query result by User Agent.
:vartype group_by_user_agent: bool
"""
_validation = {
'blob_container_sas_uri': {'required': True},
'from_time': {'required': True},
'to_time': {'required': True},
}
_attribute_map = {
'blob_container_sas_uri': {'key': 'blobContainerSasUri', 'type': 'str'},
'from_time': {'key': 'fromTime', 'type': 'iso-8601'},
'to_time': {'key': 'toTime', 'type': 'iso-8601'},
'group_by_throttle_policy': {'key': 'groupByThrottlePolicy', 'type': 'bool'},
'group_by_operation_name': {'key': 'groupByOperationName', 'type': 'bool'},
'group_by_resource_name': {'key': 'groupByResourceName', 'type': 'bool'},
'group_by_client_application_id': {'key': 'groupByClientApplicationId', 'type': 'bool'},
'group_by_user_agent': {'key': 'groupByUserAgent', 'type': 'bool'},
}
def __init__(
self,
*,
blob_container_sas_uri: str,
from_time: datetime.datetime,
to_time: datetime.datetime,
group_by_throttle_policy: Optional[bool] = None,
group_by_operation_name: Optional[bool] = None,
group_by_resource_name: Optional[bool] = None,
group_by_client_application_id: Optional[bool] = None,
group_by_user_agent: Optional[bool] = None,
**kwargs
):
"""
:keyword blob_container_sas_uri: Required. SAS Uri of the logging blob container to which
LogAnalytics Api writes output logs to.
:paramtype blob_container_sas_uri: str
:keyword from_time: Required. From time of the query.
:paramtype from_time: ~datetime.datetime
:keyword to_time: Required. To time of the query.
:paramtype to_time: ~datetime.datetime
:keyword group_by_throttle_policy: Group query result by Throttle Policy applied.
:paramtype group_by_throttle_policy: bool
:keyword group_by_operation_name: Group query result by Operation Name.
:paramtype group_by_operation_name: bool
:keyword group_by_resource_name: Group query result by Resource Name.
:paramtype group_by_resource_name: bool
:keyword group_by_client_application_id: Group query result by Client Application ID.
:paramtype group_by_client_application_id: bool
:keyword group_by_user_agent: Group query result by User Agent.
:paramtype group_by_user_agent: bool
"""
super(ThrottledRequestsInput, self).__init__(blob_container_sas_uri=blob_container_sas_uri, from_time=from_time, to_time=to_time, group_by_throttle_policy=group_by_throttle_policy, group_by_operation_name=group_by_operation_name, group_by_resource_name=group_by_resource_name, group_by_client_application_id=group_by_client_application_id, group_by_user_agent=group_by_user_agent, **kwargs)
class UefiSettings(msrest.serialization.Model):
"""Specifies the security settings like secure boot and vTPM used while creating the virtual machine. :code:`<br>`:code:`<br>`Minimum api-version: 2020-12-01.
:ivar secure_boot_enabled: Specifies whether secure boot should be enabled on the virtual
machine. :code:`<br>`:code:`<br>`Minimum api-version: 2020-12-01.
:vartype secure_boot_enabled: bool
:ivar v_tpm_enabled: Specifies whether vTPM should be enabled on the virtual machine.
:code:`<br>`:code:`<br>`Minimum api-version: 2020-12-01.
:vartype v_tpm_enabled: bool
"""
_attribute_map = {
'secure_boot_enabled': {'key': 'secureBootEnabled', 'type': 'bool'},
'v_tpm_enabled': {'key': 'vTpmEnabled', 'type': 'bool'},
}
def __init__(
self,
*,
secure_boot_enabled: Optional[bool] = None,
v_tpm_enabled: Optional[bool] = None,
**kwargs
):
"""
:keyword secure_boot_enabled: Specifies whether secure boot should be enabled on the virtual
machine. :code:`<br>`:code:`<br>`Minimum api-version: 2020-12-01.
:paramtype secure_boot_enabled: bool
:keyword v_tpm_enabled: Specifies whether vTPM should be enabled on the virtual machine.
:code:`<br>`:code:`<br>`Minimum api-version: 2020-12-01.
:paramtype v_tpm_enabled: bool
"""
super(UefiSettings, self).__init__(**kwargs)
self.secure_boot_enabled = secure_boot_enabled
self.v_tpm_enabled = v_tpm_enabled
class UpgradeOperationHistoricalStatusInfo(msrest.serialization.Model):
"""Virtual Machine Scale Set OS Upgrade History operation response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar properties: Information about the properties of the upgrade operation.
:vartype properties:
~azure.mgmt.compute.v2022_03_01.models.UpgradeOperationHistoricalStatusInfoProperties
:ivar type: Resource type.
:vartype type: str
:ivar location: Resource location.
:vartype location: str
"""
_validation = {
'properties': {'readonly': True},
'type': {'readonly': True},
'location': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': 'UpgradeOperationHistoricalStatusInfoProperties'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(UpgradeOperationHistoricalStatusInfo, self).__init__(**kwargs)
self.properties = None
self.type = None
self.location = None
class UpgradeOperationHistoricalStatusInfoProperties(msrest.serialization.Model):
"""Describes each OS upgrade on the Virtual Machine Scale Set.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar running_status: Information about the overall status of the upgrade operation.
:vartype running_status: ~azure.mgmt.compute.v2022_03_01.models.UpgradeOperationHistoryStatus
:ivar progress: Counts of the VMs in each state.
:vartype progress: ~azure.mgmt.compute.v2022_03_01.models.RollingUpgradeProgressInfo
:ivar error: Error Details for this upgrade if there are any.
:vartype error: ~azure.mgmt.compute.v2022_03_01.models.ApiError
:ivar started_by: Invoker of the Upgrade Operation. Possible values include: "Unknown", "User",
"Platform".
:vartype started_by: str or ~azure.mgmt.compute.v2022_03_01.models.UpgradeOperationInvoker
:ivar target_image_reference: Image Reference details.
:vartype target_image_reference: ~azure.mgmt.compute.v2022_03_01.models.ImageReference
:ivar rollback_info: Information about OS rollback if performed.
:vartype rollback_info: ~azure.mgmt.compute.v2022_03_01.models.RollbackStatusInfo
"""
_validation = {
'running_status': {'readonly': True},
'progress': {'readonly': True},
'error': {'readonly': True},
'started_by': {'readonly': True},
'target_image_reference': {'readonly': True},
'rollback_info': {'readonly': True},
}
_attribute_map = {
'running_status': {'key': 'runningStatus', 'type': 'UpgradeOperationHistoryStatus'},
'progress': {'key': 'progress', 'type': 'RollingUpgradeProgressInfo'},
'error': {'key': 'error', 'type': 'ApiError'},
'started_by': {'key': 'startedBy', 'type': 'str'},
'target_image_reference': {'key': 'targetImageReference', 'type': 'ImageReference'},
'rollback_info': {'key': 'rollbackInfo', 'type': 'RollbackStatusInfo'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(UpgradeOperationHistoricalStatusInfoProperties, self).__init__(**kwargs)
self.running_status = None
self.progress = None
self.error = None
self.started_by = None
self.target_image_reference = None
self.rollback_info = None
class UpgradeOperationHistoryStatus(msrest.serialization.Model):
"""Information about the current running state of the overall upgrade.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Code indicating the current status of the upgrade. Possible values include:
"RollingForward", "Cancelled", "Completed", "Faulted".
:vartype code: str or ~azure.mgmt.compute.v2022_03_01.models.UpgradeState
:ivar start_time: Start time of the upgrade.
:vartype start_time: ~datetime.datetime
:ivar end_time: End time of the upgrade.
:vartype end_time: ~datetime.datetime
"""
_validation = {
'code': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(UpgradeOperationHistoryStatus, self).__init__(**kwargs)
self.code = None
self.start_time = None
self.end_time = None
class UpgradePolicy(msrest.serialization.Model):
"""Describes an upgrade policy - automatic, manual, or rolling.
:ivar mode: Specifies the mode of an upgrade to virtual machines in the scale set.:code:`<br
/>`:code:`<br />` Possible values are::code:`<br />`:code:`<br />` **Manual** - You control
the application of updates to virtual machines in the scale set. You do this by using the
manualUpgrade action.:code:`<br />`:code:`<br />` **Automatic** - All virtual machines in the
scale set are automatically updated at the same time. Possible values include: "Automatic",
"Manual", "Rolling".
:vartype mode: str or ~azure.mgmt.compute.v2022_03_01.models.UpgradeMode
:ivar rolling_upgrade_policy: The configuration parameters used while performing a rolling
upgrade.
:vartype rolling_upgrade_policy: ~azure.mgmt.compute.v2022_03_01.models.RollingUpgradePolicy
:ivar automatic_os_upgrade_policy: Configuration parameters used for performing automatic OS
Upgrade.
:vartype automatic_os_upgrade_policy:
~azure.mgmt.compute.v2022_03_01.models.AutomaticOSUpgradePolicy
"""
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'rolling_upgrade_policy': {'key': 'rollingUpgradePolicy', 'type': 'RollingUpgradePolicy'},
'automatic_os_upgrade_policy': {'key': 'automaticOSUpgradePolicy', 'type': 'AutomaticOSUpgradePolicy'},
}
def __init__(
self,
*,
mode: Optional[Union[str, "UpgradeMode"]] = None,
rolling_upgrade_policy: Optional["RollingUpgradePolicy"] = None,
automatic_os_upgrade_policy: Optional["AutomaticOSUpgradePolicy"] = None,
**kwargs
):
"""
:keyword mode: Specifies the mode of an upgrade to virtual machines in the scale set.:code:`<br
/>`:code:`<br />` Possible values are::code:`<br />`:code:`<br />` **Manual** - You control
the application of updates to virtual machines in the scale set. You do this by using the
manualUpgrade action.:code:`<br />`:code:`<br />` **Automatic** - All virtual machines in the
scale set are automatically updated at the same time. Possible values include: "Automatic",
"Manual", "Rolling".
:paramtype mode: str or ~azure.mgmt.compute.v2022_03_01.models.UpgradeMode
:keyword rolling_upgrade_policy: The configuration parameters used while performing a rolling
upgrade.
:paramtype rolling_upgrade_policy: ~azure.mgmt.compute.v2022_03_01.models.RollingUpgradePolicy
:keyword automatic_os_upgrade_policy: Configuration parameters used for performing automatic OS
Upgrade.
:paramtype automatic_os_upgrade_policy:
~azure.mgmt.compute.v2022_03_01.models.AutomaticOSUpgradePolicy
"""
super(UpgradePolicy, self).__init__(**kwargs)
self.mode = mode
self.rolling_upgrade_policy = rolling_upgrade_policy
self.automatic_os_upgrade_policy = automatic_os_upgrade_policy
class Usage(msrest.serialization.Model):
"""Describes Compute Resource Usage.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar unit: An enum describing the unit of usage measurement. Has constant value: "Count".
:vartype unit: str
:ivar current_value: Required. The current usage of the resource.
:vartype current_value: int
:ivar limit: Required. The maximum permitted usage of the resource.
:vartype limit: long
:ivar name: Required. The name of the type of usage.
:vartype name: ~azure.mgmt.compute.v2022_03_01.models.UsageName
"""
_validation = {
'unit': {'required': True, 'constant': True},
'current_value': {'required': True},
'limit': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'long'},
'name': {'key': 'name', 'type': 'UsageName'},
}
unit = "Count"
def __init__(
self,
*,
current_value: int,
limit: int,
name: "UsageName",
**kwargs
):
"""
:keyword current_value: Required. The current usage of the resource.
:paramtype current_value: int
:keyword limit: Required. The maximum permitted usage of the resource.
:paramtype limit: long
:keyword name: Required. The name of the type of usage.
:paramtype name: ~azure.mgmt.compute.v2022_03_01.models.UsageName
"""
super(Usage, self).__init__(**kwargs)
self.current_value = current_value
self.limit = limit
self.name = name
class UsageName(msrest.serialization.Model):
"""The Usage Names.
:ivar value: The name of the resource.
:vartype value: str
:ivar localized_value: The localized name of the resource.
:vartype localized_value: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[str] = None,
localized_value: Optional[str] = None,
**kwargs
):
"""
:keyword value: The name of the resource.
:paramtype value: str
:keyword localized_value: The localized name of the resource.
:paramtype localized_value: str
"""
super(UsageName, self).__init__(**kwargs)
self.value = value
self.localized_value = localized_value
class UserAssignedIdentitiesValue(msrest.serialization.Model):
"""UserAssignedIdentitiesValue.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of user assigned identity.
:vartype principal_id: str
:ivar client_id: The client id of user assigned identity.
:vartype client_id: str
"""
_validation = {
'principal_id': {'readonly': True},
'client_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(UserAssignedIdentitiesValue, self).__init__(**kwargs)
self.principal_id = None
self.client_id = None
class VaultCertificate(msrest.serialization.Model):
"""Describes a single certificate reference in a Key Vault, and where the certificate should reside on the VM.
:ivar certificate_url: This is the URL of a certificate that has been uploaded to Key Vault as
a secret. For adding a secret to the Key Vault, see `Add a key or secret to the key vault
<https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add>`_. In this case, your
certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded
in UTF-8: :code:`<br>`:code:`<br>` {:code:`<br>`
"data":":code:`<Base64-encoded-certificate>`",:code:`<br>` "dataType":"pfx",:code:`<br>`
"password":":code:`<pfx-file-password>`":code:`<br>`} :code:`<br>` To install certificates on a
virtual machine it is recommended to use the `Azure Key Vault virtual machine extension for
Linux <https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-linux>`_ or the
`Azure Key Vault virtual machine extension for Windows
<https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-windows>`_.
:vartype certificate_url: str
:ivar certificate_store: For Windows VMs, specifies the certificate store on the Virtual
Machine to which the certificate should be added. The specified certificate store is implicitly
in the LocalMachine account. :code:`<br>`:code:`<br>`For Linux VMs, the certificate file is
placed under the /var/lib/waagent directory, with the file name <UppercaseThumbprint>.crt
for the X509 certificate file and <UppercaseThumbprint>.prv for private key. Both of
these files are .pem formatted.
:vartype certificate_store: str
"""
_attribute_map = {
'certificate_url': {'key': 'certificateUrl', 'type': 'str'},
'certificate_store': {'key': 'certificateStore', 'type': 'str'},
}
def __init__(
self,
*,
certificate_url: Optional[str] = None,
certificate_store: Optional[str] = None,
**kwargs
):
"""
:keyword certificate_url: This is the URL of a certificate that has been uploaded to Key Vault
as a secret. For adding a secret to the Key Vault, see `Add a key or secret to the key vault
<https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add>`_. In this case, your
certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded
in UTF-8: :code:`<br>`:code:`<br>` {:code:`<br>`
"data":":code:`<Base64-encoded-certificate>`",:code:`<br>` "dataType":"pfx",:code:`<br>`
"password":":code:`<pfx-file-password>`":code:`<br>`} :code:`<br>` To install certificates on a
virtual machine it is recommended to use the `Azure Key Vault virtual machine extension for
Linux <https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-linux>`_ or the
`Azure Key Vault virtual machine extension for Windows
<https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-windows>`_.
:paramtype certificate_url: str
:keyword certificate_store: For Windows VMs, specifies the certificate store on the Virtual
Machine to which the certificate should be added. The specified certificate store is implicitly
in the LocalMachine account. :code:`<br>`:code:`<br>`For Linux VMs, the certificate file is
placed under the /var/lib/waagent directory, with the file name <UppercaseThumbprint>.crt
for the X509 certificate file and <UppercaseThumbprint>.prv for private key. Both of
these files are .pem formatted.
:paramtype certificate_store: str
"""
super(VaultCertificate, self).__init__(**kwargs)
self.certificate_url = certificate_url
self.certificate_store = certificate_store
class VaultSecretGroup(msrest.serialization.Model):
"""Describes a set of certificates which are all in the same Key Vault.
:ivar source_vault: The relative URL of the Key Vault containing all of the certificates in
VaultCertificates.
:vartype source_vault: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar vault_certificates: The list of key vault references in SourceVault which contain
certificates.
:vartype vault_certificates: list[~azure.mgmt.compute.v2022_03_01.models.VaultCertificate]
"""
_attribute_map = {
'source_vault': {'key': 'sourceVault', 'type': 'SubResource'},
'vault_certificates': {'key': 'vaultCertificates', 'type': '[VaultCertificate]'},
}
def __init__(
self,
*,
source_vault: Optional["SubResource"] = None,
vault_certificates: Optional[List["VaultCertificate"]] = None,
**kwargs
):
"""
:keyword source_vault: The relative URL of the Key Vault containing all of the certificates in
VaultCertificates.
:paramtype source_vault: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword vault_certificates: The list of key vault references in SourceVault which contain
certificates.
:paramtype vault_certificates: list[~azure.mgmt.compute.v2022_03_01.models.VaultCertificate]
"""
super(VaultSecretGroup, self).__init__(**kwargs)
self.source_vault = source_vault
self.vault_certificates = vault_certificates
class VirtualHardDisk(msrest.serialization.Model):
"""Describes the uri of a disk.
:ivar uri: Specifies the virtual hard disk's uri.
:vartype uri: str
"""
_attribute_map = {
'uri': {'key': 'uri', 'type': 'str'},
}
def __init__(
self,
*,
uri: Optional[str] = None,
**kwargs
):
"""
:keyword uri: Specifies the virtual hard disk's uri.
:paramtype uri: str
"""
super(VirtualHardDisk, self).__init__(**kwargs)
self.uri = uri
class VirtualMachine(Resource):
"""Describes a Virtual Machine.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Required. Resource location.
:vartype location: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar plan: Specifies information about the marketplace image used to create the virtual
machine. This element is only used for marketplace images. Before you can use a marketplace
image from an API, you must enable the image for programmatic use. In the Azure portal, find
the marketplace image that you want to use and then click **Want to deploy programmatically,
Get Started ->**. Enter any required information and then click **Save**.
:vartype plan: ~azure.mgmt.compute.v2022_03_01.models.Plan
:ivar resources: The virtual machine child extension resources.
:vartype resources: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineExtension]
:ivar identity: The identity of the virtual machine, if configured.
:vartype identity: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineIdentity
:ivar zones: The virtual machine zones.
:vartype zones: list[str]
:ivar extended_location: The extended location of the Virtual Machine.
:vartype extended_location: ~azure.mgmt.compute.v2022_03_01.models.ExtendedLocation
:ivar hardware_profile: Specifies the hardware settings for the virtual machine.
:vartype hardware_profile: ~azure.mgmt.compute.v2022_03_01.models.HardwareProfile
:ivar storage_profile: Specifies the storage settings for the virtual machine disks.
:vartype storage_profile: ~azure.mgmt.compute.v2022_03_01.models.StorageProfile
:ivar additional_capabilities: Specifies additional capabilities enabled or disabled on the
virtual machine.
:vartype additional_capabilities: ~azure.mgmt.compute.v2022_03_01.models.AdditionalCapabilities
:ivar os_profile: Specifies the operating system settings used while creating the virtual
machine. Some of the settings cannot be changed once VM is provisioned.
:vartype os_profile: ~azure.mgmt.compute.v2022_03_01.models.OSProfile
:ivar network_profile: Specifies the network interfaces of the virtual machine.
:vartype network_profile: ~azure.mgmt.compute.v2022_03_01.models.NetworkProfile
:ivar security_profile: Specifies the Security related profile settings for the virtual
machine.
:vartype security_profile: ~azure.mgmt.compute.v2022_03_01.models.SecurityProfile
:ivar diagnostics_profile: Specifies the boot diagnostic settings state.
:code:`<br>`:code:`<br>`Minimum api-version: 2015-06-15.
:vartype diagnostics_profile: ~azure.mgmt.compute.v2022_03_01.models.DiagnosticsProfile
:ivar availability_set: Specifies information about the availability set that the virtual
machine should be assigned to. Virtual machines specified in the same availability set are
allocated to different nodes to maximize availability. For more information about availability
sets, see `Availability sets overview
<https://docs.microsoft.com/azure/virtual-machines/availability-set-overview>`_.
:code:`<br>`:code:`<br>` For more information on Azure planned maintenance, see `Maintenance
and updates for Virtual Machines in Azure
<https://docs.microsoft.com/azure/virtual-machines/maintenance-and-updates>`_
:code:`<br>`:code:`<br>` Currently, a VM can only be added to availability set at creation
time. The availability set to which the VM is being added should be under the same resource
group as the availability set resource. An existing VM cannot be added to an availability set.
:code:`<br>`:code:`<br>`This property cannot exist along with a non-null
properties.virtualMachineScaleSet reference.
:vartype availability_set: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar virtual_machine_scale_set: Specifies information about the virtual machine scale set that
the virtual machine should be assigned to. Virtual machines specified in the same virtual
machine scale set are allocated to different nodes to maximize availability. Currently, a VM
can only be added to virtual machine scale set at creation time. An existing VM cannot be added
to a virtual machine scale set. :code:`<br>`:code:`<br>`This property cannot exist along with a
non-null properties.availabilitySet reference. :code:`<br>`:code:`<br>`Minimum api‐version:
2019‐03‐01.
:vartype virtual_machine_scale_set: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar proximity_placement_group: Specifies information about the proximity placement group that
the virtual machine should be assigned to. :code:`<br>`:code:`<br>`Minimum api-version:
2018-04-01.
:vartype proximity_placement_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar priority: Specifies the priority for the virtual machine. :code:`<br>`:code:`<br>`Minimum
api-version: 2019-03-01. Possible values include: "Regular", "Low", "Spot".
:vartype priority: str or ~azure.mgmt.compute.v2022_03_01.models.VirtualMachinePriorityTypes
:ivar eviction_policy: Specifies the eviction policy for the Azure Spot virtual machine and
Azure Spot scale set. :code:`<br>`:code:`<br>`For Azure Spot virtual machines, both
'Deallocate' and 'Delete' are supported and the minimum api-version is 2019-03-01.
:code:`<br>`:code:`<br>`For Azure Spot scale sets, both 'Deallocate' and 'Delete' are supported
and the minimum api-version is 2017-10-30-preview. Possible values include: "Deallocate",
"Delete".
:vartype eviction_policy: str or
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineEvictionPolicyTypes
:ivar billing_profile: Specifies the billing related details of a Azure Spot virtual machine.
:code:`<br>`:code:`<br>`Minimum api-version: 2019-03-01.
:vartype billing_profile: ~azure.mgmt.compute.v2022_03_01.models.BillingProfile
:ivar host: Specifies information about the dedicated host that the virtual machine resides in.
:code:`<br>`:code:`<br>`Minimum api-version: 2018-10-01.
:vartype host: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar host_group: Specifies information about the dedicated host group that the virtual machine
resides in. :code:`<br>`:code:`<br>`Minimum api-version: 2020-06-01.
:code:`<br>`:code:`<br>`NOTE: User cannot specify both host and hostGroup properties.
:vartype host_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar provisioning_state: The provisioning state, which only appears in the response.
:vartype provisioning_state: str
:ivar instance_view: The virtual machine instance view.
:vartype instance_view: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineInstanceView
:ivar license_type: Specifies that the image or disk that is being used was licensed
on-premises. :code:`<br>`:code:`<br>` Possible values for Windows Server operating system are:
:code:`<br>`:code:`<br>` Windows_Client :code:`<br>`:code:`<br>` Windows_Server
:code:`<br>`:code:`<br>` Possible values for Linux Server operating system are:
:code:`<br>`:code:`<br>` RHEL_BYOS (for RHEL) :code:`<br>`:code:`<br>` SLES_BYOS (for SUSE)
:code:`<br>`:code:`<br>` For more information, see `Azure Hybrid Use Benefit for Windows Server
<https://docs.microsoft.com/azure/virtual-machines/windows/hybrid-use-benefit-licensing>`_
:code:`<br>`:code:`<br>` `Azure Hybrid Use Benefit for Linux Server
<https://docs.microsoft.com/azure/virtual-machines/linux/azure-hybrid-benefit-linux>`_
:code:`<br>`:code:`<br>` Minimum api-version: 2015-06-15.
:vartype license_type: str
:ivar vm_id: Specifies the VM unique ID which is a 128-bits identifier that is encoded and
stored in all Azure IaaS VMs SMBIOS and can be read using platform BIOS commands.
:vartype vm_id: str
:ivar extensions_time_budget: Specifies the time alloted for all extensions to start. The time
duration should be between 15 minutes and 120 minutes (inclusive) and should be specified in
ISO 8601 format. The default value is 90 minutes (PT1H30M). :code:`<br>`:code:`<br>` Minimum
api-version: 2020-06-01.
:vartype extensions_time_budget: str
:ivar platform_fault_domain: Specifies the scale set logical fault domain into which the
Virtual Machine will be created. By default, the Virtual Machine will by automatically assigned
to a fault domain that best maintains balance across available fault
domains.:code:`<br>`:code:`<li>`This is applicable only if the 'virtualMachineScaleSet'
property of this Virtual Machine is set.:code:`<li>`The Virtual Machine Scale Set that is
referenced, must have 'platformFaultDomainCount' > 1.:code:`<li>`This property cannot be
updated once the Virtual Machine is created.:code:`<li>`Fault domain assignment can be viewed
in the Virtual Machine Instance View.:code:`<br>`:code:`<br>`Minimum api‐version: 2020‐12‐01.
:vartype platform_fault_domain: int
:ivar scheduled_events_profile: Specifies Scheduled Event related configurations.
:vartype scheduled_events_profile:
~azure.mgmt.compute.v2022_03_01.models.ScheduledEventsProfile
:ivar user_data: UserData for the VM, which must be base-64 encoded. Customer should not pass
any secrets in here. :code:`<br>`:code:`<br>`Minimum api-version: 2021-03-01.
:vartype user_data: str
:ivar capacity_reservation: Specifies information about the capacity reservation that is used
to allocate virtual machine. :code:`<br>`:code:`<br>`Minimum api-version: 2021-04-01.
:vartype capacity_reservation:
~azure.mgmt.compute.v2022_03_01.models.CapacityReservationProfile
:ivar application_profile: Specifies the gallery applications that should be made available to
the VM/VMSS.
:vartype application_profile: ~azure.mgmt.compute.v2022_03_01.models.ApplicationProfile
:ivar time_created: Specifies the time at which the Virtual Machine resource was
created.:code:`<br>`:code:`<br>`Minimum api-version: 2022-03-01.
:vartype time_created: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'resources': {'readonly': True},
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
'vm_id': {'readonly': True},
'time_created': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'plan': {'key': 'plan', 'type': 'Plan'},
'resources': {'key': 'resources', 'type': '[VirtualMachineExtension]'},
'identity': {'key': 'identity', 'type': 'VirtualMachineIdentity'},
'zones': {'key': 'zones', 'type': '[str]'},
'extended_location': {'key': 'extendedLocation', 'type': 'ExtendedLocation'},
'hardware_profile': {'key': 'properties.hardwareProfile', 'type': 'HardwareProfile'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'StorageProfile'},
'additional_capabilities': {'key': 'properties.additionalCapabilities', 'type': 'AdditionalCapabilities'},
'os_profile': {'key': 'properties.osProfile', 'type': 'OSProfile'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'NetworkProfile'},
'security_profile': {'key': 'properties.securityProfile', 'type': 'SecurityProfile'},
'diagnostics_profile': {'key': 'properties.diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'availability_set': {'key': 'properties.availabilitySet', 'type': 'SubResource'},
'virtual_machine_scale_set': {'key': 'properties.virtualMachineScaleSet', 'type': 'SubResource'},
'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'SubResource'},
'priority': {'key': 'properties.priority', 'type': 'str'},
'eviction_policy': {'key': 'properties.evictionPolicy', 'type': 'str'},
'billing_profile': {'key': 'properties.billingProfile', 'type': 'BillingProfile'},
'host': {'key': 'properties.host', 'type': 'SubResource'},
'host_group': {'key': 'properties.hostGroup', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineInstanceView'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'vm_id': {'key': 'properties.vmId', 'type': 'str'},
'extensions_time_budget': {'key': 'properties.extensionsTimeBudget', 'type': 'str'},
'platform_fault_domain': {'key': 'properties.platformFaultDomain', 'type': 'int'},
'scheduled_events_profile': {'key': 'properties.scheduledEventsProfile', 'type': 'ScheduledEventsProfile'},
'user_data': {'key': 'properties.userData', 'type': 'str'},
'capacity_reservation': {'key': 'properties.capacityReservation', 'type': 'CapacityReservationProfile'},
'application_profile': {'key': 'properties.applicationProfile', 'type': 'ApplicationProfile'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
plan: Optional["Plan"] = None,
identity: Optional["VirtualMachineIdentity"] = None,
zones: Optional[List[str]] = None,
extended_location: Optional["ExtendedLocation"] = None,
hardware_profile: Optional["HardwareProfile"] = None,
storage_profile: Optional["StorageProfile"] = None,
additional_capabilities: Optional["AdditionalCapabilities"] = None,
os_profile: Optional["OSProfile"] = None,
network_profile: Optional["NetworkProfile"] = None,
security_profile: Optional["SecurityProfile"] = None,
diagnostics_profile: Optional["DiagnosticsProfile"] = None,
availability_set: Optional["SubResource"] = None,
virtual_machine_scale_set: Optional["SubResource"] = None,
proximity_placement_group: Optional["SubResource"] = None,
priority: Optional[Union[str, "VirtualMachinePriorityTypes"]] = None,
eviction_policy: Optional[Union[str, "VirtualMachineEvictionPolicyTypes"]] = None,
billing_profile: Optional["BillingProfile"] = None,
host: Optional["SubResource"] = None,
host_group: Optional["SubResource"] = None,
license_type: Optional[str] = None,
extensions_time_budget: Optional[str] = None,
platform_fault_domain: Optional[int] = None,
scheduled_events_profile: Optional["ScheduledEventsProfile"] = None,
user_data: Optional[str] = None,
capacity_reservation: Optional["CapacityReservationProfile"] = None,
application_profile: Optional["ApplicationProfile"] = None,
**kwargs
):
"""
:keyword location: Required. Resource location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword plan: Specifies information about the marketplace image used to create the virtual
machine. This element is only used for marketplace images. Before you can use a marketplace
image from an API, you must enable the image for programmatic use. In the Azure portal, find
the marketplace image that you want to use and then click **Want to deploy programmatically,
Get Started ->**. Enter any required information and then click **Save**.
:paramtype plan: ~azure.mgmt.compute.v2022_03_01.models.Plan
:keyword identity: The identity of the virtual machine, if configured.
:paramtype identity: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineIdentity
:keyword zones: The virtual machine zones.
:paramtype zones: list[str]
:keyword extended_location: The extended location of the Virtual Machine.
:paramtype extended_location: ~azure.mgmt.compute.v2022_03_01.models.ExtendedLocation
:keyword hardware_profile: Specifies the hardware settings for the virtual machine.
:paramtype hardware_profile: ~azure.mgmt.compute.v2022_03_01.models.HardwareProfile
:keyword storage_profile: Specifies the storage settings for the virtual machine disks.
:paramtype storage_profile: ~azure.mgmt.compute.v2022_03_01.models.StorageProfile
:keyword additional_capabilities: Specifies additional capabilities enabled or disabled on the
virtual machine.
:paramtype additional_capabilities:
~azure.mgmt.compute.v2022_03_01.models.AdditionalCapabilities
:keyword os_profile: Specifies the operating system settings used while creating the virtual
machine. Some of the settings cannot be changed once VM is provisioned.
:paramtype os_profile: ~azure.mgmt.compute.v2022_03_01.models.OSProfile
:keyword network_profile: Specifies the network interfaces of the virtual machine.
:paramtype network_profile: ~azure.mgmt.compute.v2022_03_01.models.NetworkProfile
:keyword security_profile: Specifies the Security related profile settings for the virtual
machine.
:paramtype security_profile: ~azure.mgmt.compute.v2022_03_01.models.SecurityProfile
:keyword diagnostics_profile: Specifies the boot diagnostic settings state.
:code:`<br>`:code:`<br>`Minimum api-version: 2015-06-15.
:paramtype diagnostics_profile: ~azure.mgmt.compute.v2022_03_01.models.DiagnosticsProfile
:keyword availability_set: Specifies information about the availability set that the virtual
machine should be assigned to. Virtual machines specified in the same availability set are
allocated to different nodes to maximize availability. For more information about availability
sets, see `Availability sets overview
<https://docs.microsoft.com/azure/virtual-machines/availability-set-overview>`_.
:code:`<br>`:code:`<br>` For more information on Azure planned maintenance, see `Maintenance
and updates for Virtual Machines in Azure
<https://docs.microsoft.com/azure/virtual-machines/maintenance-and-updates>`_
:code:`<br>`:code:`<br>` Currently, a VM can only be added to availability set at creation
time. The availability set to which the VM is being added should be under the same resource
group as the availability set resource. An existing VM cannot be added to an availability set.
:code:`<br>`:code:`<br>`This property cannot exist along with a non-null
properties.virtualMachineScaleSet reference.
:paramtype availability_set: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword virtual_machine_scale_set: Specifies information about the virtual machine scale set
that the virtual machine should be assigned to. Virtual machines specified in the same virtual
machine scale set are allocated to different nodes to maximize availability. Currently, a VM
can only be added to virtual machine scale set at creation time. An existing VM cannot be added
to a virtual machine scale set. :code:`<br>`:code:`<br>`This property cannot exist along with a
non-null properties.availabilitySet reference. :code:`<br>`:code:`<br>`Minimum api‐version:
2019‐03‐01.
:paramtype virtual_machine_scale_set: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword proximity_placement_group: Specifies information about the proximity placement group
that the virtual machine should be assigned to. :code:`<br>`:code:`<br>`Minimum api-version:
2018-04-01.
:paramtype proximity_placement_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword priority: Specifies the priority for the virtual machine.
:code:`<br>`:code:`<br>`Minimum api-version: 2019-03-01. Possible values include: "Regular",
"Low", "Spot".
:paramtype priority: str or ~azure.mgmt.compute.v2022_03_01.models.VirtualMachinePriorityTypes
:keyword eviction_policy: Specifies the eviction policy for the Azure Spot virtual machine and
Azure Spot scale set. :code:`<br>`:code:`<br>`For Azure Spot virtual machines, both
'Deallocate' and 'Delete' are supported and the minimum api-version is 2019-03-01.
:code:`<br>`:code:`<br>`For Azure Spot scale sets, both 'Deallocate' and 'Delete' are supported
and the minimum api-version is 2017-10-30-preview. Possible values include: "Deallocate",
"Delete".
:paramtype eviction_policy: str or
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineEvictionPolicyTypes
:keyword billing_profile: Specifies the billing related details of a Azure Spot virtual
machine. :code:`<br>`:code:`<br>`Minimum api-version: 2019-03-01.
:paramtype billing_profile: ~azure.mgmt.compute.v2022_03_01.models.BillingProfile
:keyword host: Specifies information about the dedicated host that the virtual machine resides
in. :code:`<br>`:code:`<br>`Minimum api-version: 2018-10-01.
:paramtype host: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword host_group: Specifies information about the dedicated host group that the virtual
machine resides in. :code:`<br>`:code:`<br>`Minimum api-version: 2020-06-01.
:code:`<br>`:code:`<br>`NOTE: User cannot specify both host and hostGroup properties.
:paramtype host_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword license_type: Specifies that the image or disk that is being used was licensed
on-premises. :code:`<br>`:code:`<br>` Possible values for Windows Server operating system are:
:code:`<br>`:code:`<br>` Windows_Client :code:`<br>`:code:`<br>` Windows_Server
:code:`<br>`:code:`<br>` Possible values for Linux Server operating system are:
:code:`<br>`:code:`<br>` RHEL_BYOS (for RHEL) :code:`<br>`:code:`<br>` SLES_BYOS (for SUSE)
:code:`<br>`:code:`<br>` For more information, see `Azure Hybrid Use Benefit for Windows Server
<https://docs.microsoft.com/azure/virtual-machines/windows/hybrid-use-benefit-licensing>`_
:code:`<br>`:code:`<br>` `Azure Hybrid Use Benefit for Linux Server
<https://docs.microsoft.com/azure/virtual-machines/linux/azure-hybrid-benefit-linux>`_
:code:`<br>`:code:`<br>` Minimum api-version: 2015-06-15.
:paramtype license_type: str
:keyword extensions_time_budget: Specifies the time alloted for all extensions to start. The
time duration should be between 15 minutes and 120 minutes (inclusive) and should be specified
in ISO 8601 format. The default value is 90 minutes (PT1H30M). :code:`<br>`:code:`<br>` Minimum
api-version: 2020-06-01.
:paramtype extensions_time_budget: str
:keyword platform_fault_domain: Specifies the scale set logical fault domain into which the
Virtual Machine will be created. By default, the Virtual Machine will by automatically assigned
to a fault domain that best maintains balance across available fault
domains.:code:`<br>`:code:`<li>`This is applicable only if the 'virtualMachineScaleSet'
property of this Virtual Machine is set.:code:`<li>`The Virtual Machine Scale Set that is
referenced, must have 'platformFaultDomainCount' > 1.:code:`<li>`This property cannot be
updated once the Virtual Machine is created.:code:`<li>`Fault domain assignment can be viewed
in the Virtual Machine Instance View.:code:`<br>`:code:`<br>`Minimum api‐version: 2020‐12‐01.
:paramtype platform_fault_domain: int
:keyword scheduled_events_profile: Specifies Scheduled Event related configurations.
:paramtype scheduled_events_profile:
~azure.mgmt.compute.v2022_03_01.models.ScheduledEventsProfile
:keyword user_data: UserData for the VM, which must be base-64 encoded. Customer should not
pass any secrets in here. :code:`<br>`:code:`<br>`Minimum api-version: 2021-03-01.
:paramtype user_data: str
:keyword capacity_reservation: Specifies information about the capacity reservation that is
used to allocate virtual machine. :code:`<br>`:code:`<br>`Minimum api-version: 2021-04-01.
:paramtype capacity_reservation:
~azure.mgmt.compute.v2022_03_01.models.CapacityReservationProfile
:keyword application_profile: Specifies the gallery applications that should be made available
to the VM/VMSS.
:paramtype application_profile: ~azure.mgmt.compute.v2022_03_01.models.ApplicationProfile
"""
super(VirtualMachine, self).__init__(location=location, tags=tags, **kwargs)
self.plan = plan
self.resources = None
self.identity = identity
self.zones = zones
self.extended_location = extended_location
self.hardware_profile = hardware_profile
self.storage_profile = storage_profile
self.additional_capabilities = additional_capabilities
self.os_profile = os_profile
self.network_profile = network_profile
self.security_profile = security_profile
self.diagnostics_profile = diagnostics_profile
self.availability_set = availability_set
self.virtual_machine_scale_set = virtual_machine_scale_set
self.proximity_placement_group = proximity_placement_group
self.priority = priority
self.eviction_policy = eviction_policy
self.billing_profile = billing_profile
self.host = host
self.host_group = host_group
self.provisioning_state = None
self.instance_view = None
self.license_type = license_type
self.vm_id = None
self.extensions_time_budget = extensions_time_budget
self.platform_fault_domain = platform_fault_domain
self.scheduled_events_profile = scheduled_events_profile
self.user_data = user_data
self.capacity_reservation = capacity_reservation
self.application_profile = application_profile
self.time_created = None
class VirtualMachineAgentInstanceView(msrest.serialization.Model):
"""The instance view of the VM Agent running on the virtual machine.
:ivar vm_agent_version: The VM Agent full version.
:vartype vm_agent_version: str
:ivar extension_handlers: The virtual machine extension handler instance view.
:vartype extension_handlers:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineExtensionHandlerInstanceView]
:ivar statuses: The resource status information.
:vartype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
"""
_attribute_map = {
'vm_agent_version': {'key': 'vmAgentVersion', 'type': 'str'},
'extension_handlers': {'key': 'extensionHandlers', 'type': '[VirtualMachineExtensionHandlerInstanceView]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
vm_agent_version: Optional[str] = None,
extension_handlers: Optional[List["VirtualMachineExtensionHandlerInstanceView"]] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
"""
:keyword vm_agent_version: The VM Agent full version.
:paramtype vm_agent_version: str
:keyword extension_handlers: The virtual machine extension handler instance view.
:paramtype extension_handlers:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineExtensionHandlerInstanceView]
:keyword statuses: The resource status information.
:paramtype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
"""
super(VirtualMachineAgentInstanceView, self).__init__(**kwargs)
self.vm_agent_version = vm_agent_version
self.extension_handlers = extension_handlers
self.statuses = statuses
class VirtualMachineAssessPatchesResult(msrest.serialization.Model):
"""Describes the properties of an AssessPatches result.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar status: The overall success or failure status of the operation. It remains "InProgress"
until the operation completes. At that point it will become "Unknown", "Failed", "Succeeded",
or "CompletedWithWarnings.". Possible values include: "Unknown", "InProgress", "Failed",
"Succeeded", "CompletedWithWarnings".
:vartype status: str or ~azure.mgmt.compute.v2022_03_01.models.PatchOperationStatus
:ivar assessment_activity_id: The activity ID of the operation that produced this result. It is
used to correlate across CRP and extension logs.
:vartype assessment_activity_id: str
:ivar reboot_pending: The overall reboot status of the VM. It will be true when partially
installed patches require a reboot to complete installation but the reboot has not yet
occurred.
:vartype reboot_pending: bool
:ivar critical_and_security_patch_count: The number of critical or security patches that have
been detected as available and not yet installed.
:vartype critical_and_security_patch_count: int
:ivar other_patch_count: The number of all available patches excluding critical and security.
:vartype other_patch_count: int
:ivar start_date_time: The UTC timestamp when the operation began.
:vartype start_date_time: ~datetime.datetime
:ivar available_patches: The list of patches that have been detected as available for
installation.
:vartype available_patches:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineSoftwarePatchProperties]
:ivar error: The errors that were encountered during execution of the operation. The details
array contains the list of them.
:vartype error: ~azure.mgmt.compute.v2022_03_01.models.ApiError
"""
_validation = {
'status': {'readonly': True},
'assessment_activity_id': {'readonly': True},
'reboot_pending': {'readonly': True},
'critical_and_security_patch_count': {'readonly': True},
'other_patch_count': {'readonly': True},
'start_date_time': {'readonly': True},
'available_patches': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'assessment_activity_id': {'key': 'assessmentActivityId', 'type': 'str'},
'reboot_pending': {'key': 'rebootPending', 'type': 'bool'},
'critical_and_security_patch_count': {'key': 'criticalAndSecurityPatchCount', 'type': 'int'},
'other_patch_count': {'key': 'otherPatchCount', 'type': 'int'},
'start_date_time': {'key': 'startDateTime', 'type': 'iso-8601'},
'available_patches': {'key': 'availablePatches', 'type': '[VirtualMachineSoftwarePatchProperties]'},
'error': {'key': 'error', 'type': 'ApiError'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(VirtualMachineAssessPatchesResult, self).__init__(**kwargs)
self.status = None
self.assessment_activity_id = None
self.reboot_pending = None
self.critical_and_security_patch_count = None
self.other_patch_count = None
self.start_date_time = None
self.available_patches = None
self.error = None
class VirtualMachineCaptureParameters(msrest.serialization.Model):
"""Capture Virtual Machine parameters.
All required parameters must be populated in order to send to Azure.
:ivar vhd_prefix: Required. The captured virtual hard disk's name prefix.
:vartype vhd_prefix: str
:ivar destination_container_name: Required. The destination container name.
:vartype destination_container_name: str
:ivar overwrite_vhds: Required. Specifies whether to overwrite the destination virtual hard
disk, in case of conflict.
:vartype overwrite_vhds: bool
"""
_validation = {
'vhd_prefix': {'required': True},
'destination_container_name': {'required': True},
'overwrite_vhds': {'required': True},
}
_attribute_map = {
'vhd_prefix': {'key': 'vhdPrefix', 'type': 'str'},
'destination_container_name': {'key': 'destinationContainerName', 'type': 'str'},
'overwrite_vhds': {'key': 'overwriteVhds', 'type': 'bool'},
}
def __init__(
self,
*,
vhd_prefix: str,
destination_container_name: str,
overwrite_vhds: bool,
**kwargs
):
"""
:keyword vhd_prefix: Required. The captured virtual hard disk's name prefix.
:paramtype vhd_prefix: str
:keyword destination_container_name: Required. The destination container name.
:paramtype destination_container_name: str
:keyword overwrite_vhds: Required. Specifies whether to overwrite the destination virtual hard
disk, in case of conflict.
:paramtype overwrite_vhds: bool
"""
super(VirtualMachineCaptureParameters, self).__init__(**kwargs)
self.vhd_prefix = vhd_prefix
self.destination_container_name = destination_container_name
self.overwrite_vhds = overwrite_vhds
class VirtualMachineCaptureResult(SubResource):
"""Output of virtual machine capture operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar schema: the schema of the captured virtual machine.
:vartype schema: str
:ivar content_version: the version of the content.
:vartype content_version: str
:ivar parameters: parameters of the captured virtual machine.
:vartype parameters: any
:ivar resources: a list of resource items of the captured virtual machine.
:vartype resources: list[any]
"""
_validation = {
'schema': {'readonly': True},
'content_version': {'readonly': True},
'parameters': {'readonly': True},
'resources': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'schema': {'key': '$schema', 'type': 'str'},
'content_version': {'key': 'contentVersion', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'object'},
'resources': {'key': 'resources', 'type': '[object]'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
"""
:keyword id: Resource Id.
:paramtype id: str
"""
super(VirtualMachineCaptureResult, self).__init__(id=id, **kwargs)
self.schema = None
self.content_version = None
self.parameters = None
self.resources = None
class VirtualMachineExtension(ResourceWithOptionalLocation):
"""Describes a Virtual Machine Extension.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar location: Resource location.
:vartype location: str
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar force_update_tag: How the extension handler should be forced to update even if the
extension configuration has not changed.
:vartype force_update_tag: str
:ivar publisher: The name of the extension handler publisher.
:vartype publisher: str
:ivar type_properties_type: Specifies the type of the extension; an example is
"CustomScriptExtension".
:vartype type_properties_type: str
:ivar type_handler_version: Specifies the version of the script handler.
:vartype type_handler_version: str
:ivar auto_upgrade_minor_version: Indicates whether the extension should use a newer minor
version if one is available at deployment time. Once deployed, however, the extension will not
upgrade minor versions unless redeployed, even with this property set to true.
:vartype auto_upgrade_minor_version: bool
:ivar enable_automatic_upgrade: Indicates whether the extension should be automatically
upgraded by the platform if there is a newer version of the extension available.
:vartype enable_automatic_upgrade: bool
:ivar settings: Json formatted public settings for the extension.
:vartype settings: any
:ivar protected_settings: The extension can contain either protectedSettings or
protectedSettingsFromKeyVault or no protected settings at all.
:vartype protected_settings: any
:ivar provisioning_state: The provisioning state, which only appears in the response.
:vartype provisioning_state: str
:ivar instance_view: The virtual machine extension instance view.
:vartype instance_view:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineExtensionInstanceView
:ivar suppress_failures: Indicates whether failures stemming from the extension will be
suppressed (Operational failures such as not connecting to the VM will not be suppressed
regardless of this value). The default is false.
:vartype suppress_failures: bool
:ivar protected_settings_from_key_vault: The extensions protected settings that are passed by
reference, and consumed from key vault.
:vartype protected_settings_from_key_vault: any
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
'publisher': {'key': 'properties.publisher', 'type': 'str'},
'type_properties_type': {'key': 'properties.type', 'type': 'str'},
'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'},
'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'},
'enable_automatic_upgrade': {'key': 'properties.enableAutomaticUpgrade', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': 'object'},
'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineExtensionInstanceView'},
'suppress_failures': {'key': 'properties.suppressFailures', 'type': 'bool'},
'protected_settings_from_key_vault': {'key': 'properties.protectedSettingsFromKeyVault', 'type': 'object'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
force_update_tag: Optional[str] = None,
publisher: Optional[str] = None,
type_properties_type: Optional[str] = None,
type_handler_version: Optional[str] = None,
auto_upgrade_minor_version: Optional[bool] = None,
enable_automatic_upgrade: Optional[bool] = None,
settings: Optional[Any] = None,
protected_settings: Optional[Any] = None,
instance_view: Optional["VirtualMachineExtensionInstanceView"] = None,
suppress_failures: Optional[bool] = None,
protected_settings_from_key_vault: Optional[Any] = None,
**kwargs
):
"""
:keyword location: Resource location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword force_update_tag: How the extension handler should be forced to update even if the
extension configuration has not changed.
:paramtype force_update_tag: str
:keyword publisher: The name of the extension handler publisher.
:paramtype publisher: str
:keyword type_properties_type: Specifies the type of the extension; an example is
"CustomScriptExtension".
:paramtype type_properties_type: str
:keyword type_handler_version: Specifies the version of the script handler.
:paramtype type_handler_version: str
:keyword auto_upgrade_minor_version: Indicates whether the extension should use a newer minor
version if one is available at deployment time. Once deployed, however, the extension will not
upgrade minor versions unless redeployed, even with this property set to true.
:paramtype auto_upgrade_minor_version: bool
:keyword enable_automatic_upgrade: Indicates whether the extension should be automatically
upgraded by the platform if there is a newer version of the extension available.
:paramtype enable_automatic_upgrade: bool
:keyword settings: Json formatted public settings for the extension.
:paramtype settings: any
:keyword protected_settings: The extension can contain either protectedSettings or
protectedSettingsFromKeyVault or no protected settings at all.
:paramtype protected_settings: any
:keyword instance_view: The virtual machine extension instance view.
:paramtype instance_view:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineExtensionInstanceView
:keyword suppress_failures: Indicates whether failures stemming from the extension will be
suppressed (Operational failures such as not connecting to the VM will not be suppressed
regardless of this value). The default is false.
:paramtype suppress_failures: bool
:keyword protected_settings_from_key_vault: The extensions protected settings that are passed
by reference, and consumed from key vault.
:paramtype protected_settings_from_key_vault: any
"""
super(VirtualMachineExtension, self).__init__(location=location, tags=tags, **kwargs)
self.force_update_tag = force_update_tag
self.publisher = publisher
self.type_properties_type = type_properties_type
self.type_handler_version = type_handler_version
self.auto_upgrade_minor_version = auto_upgrade_minor_version
self.enable_automatic_upgrade = enable_automatic_upgrade
self.settings = settings
self.protected_settings = protected_settings
self.provisioning_state = None
self.instance_view = instance_view
self.suppress_failures = suppress_failures
self.protected_settings_from_key_vault = protected_settings_from_key_vault
class VirtualMachineExtensionHandlerInstanceView(msrest.serialization.Model):
"""The instance view of a virtual machine extension handler.
:ivar type: Specifies the type of the extension; an example is "CustomScriptExtension".
:vartype type: str
:ivar type_handler_version: Specifies the version of the script handler.
:vartype type_handler_version: str
:ivar status: The extension handler status.
:vartype status: ~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'type_handler_version': {'key': 'typeHandlerVersion', 'type': 'str'},
'status': {'key': 'status', 'type': 'InstanceViewStatus'},
}
def __init__(
self,
*,
type: Optional[str] = None,
type_handler_version: Optional[str] = None,
status: Optional["InstanceViewStatus"] = None,
**kwargs
):
"""
:keyword type: Specifies the type of the extension; an example is "CustomScriptExtension".
:paramtype type: str
:keyword type_handler_version: Specifies the version of the script handler.
:paramtype type_handler_version: str
:keyword status: The extension handler status.
:paramtype status: ~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus
"""
super(VirtualMachineExtensionHandlerInstanceView, self).__init__(**kwargs)
self.type = type
self.type_handler_version = type_handler_version
self.status = status
class VirtualMachineExtensionImage(Resource):
"""Describes a Virtual Machine Extension Image.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Required. Resource location.
:vartype location: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar operating_system: The operating system this extension supports.
:vartype operating_system: str
:ivar compute_role: The type of role (IaaS or PaaS) this extension supports.
:vartype compute_role: str
:ivar handler_schema: The schema defined by publisher, where extension consumers should provide
settings in a matching schema.
:vartype handler_schema: str
:ivar vm_scale_set_enabled: Whether the extension can be used on xRP VMScaleSets. By default
existing extensions are usable on scalesets, but there might be cases where a publisher wants
to explicitly indicate the extension is only enabled for CRP VMs but not VMSS.
:vartype vm_scale_set_enabled: bool
:ivar supports_multiple_extensions: Whether the handler can support multiple extensions.
:vartype supports_multiple_extensions: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'operating_system': {'key': 'properties.operatingSystem', 'type': 'str'},
'compute_role': {'key': 'properties.computeRole', 'type': 'str'},
'handler_schema': {'key': 'properties.handlerSchema', 'type': 'str'},
'vm_scale_set_enabled': {'key': 'properties.vmScaleSetEnabled', 'type': 'bool'},
'supports_multiple_extensions': {'key': 'properties.supportsMultipleExtensions', 'type': 'bool'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
operating_system: Optional[str] = None,
compute_role: Optional[str] = None,
handler_schema: Optional[str] = None,
vm_scale_set_enabled: Optional[bool] = None,
supports_multiple_extensions: Optional[bool] = None,
**kwargs
):
"""
:keyword location: Required. Resource location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword operating_system: The operating system this extension supports.
:paramtype operating_system: str
:keyword compute_role: The type of role (IaaS or PaaS) this extension supports.
:paramtype compute_role: str
:keyword handler_schema: The schema defined by publisher, where extension consumers should
provide settings in a matching schema.
:paramtype handler_schema: str
:keyword vm_scale_set_enabled: Whether the extension can be used on xRP VMScaleSets. By default
existing extensions are usable on scalesets, but there might be cases where a publisher wants
to explicitly indicate the extension is only enabled for CRP VMs but not VMSS.
:paramtype vm_scale_set_enabled: bool
:keyword supports_multiple_extensions: Whether the handler can support multiple extensions.
:paramtype supports_multiple_extensions: bool
"""
super(VirtualMachineExtensionImage, self).__init__(location=location, tags=tags, **kwargs)
self.operating_system = operating_system
self.compute_role = compute_role
self.handler_schema = handler_schema
self.vm_scale_set_enabled = vm_scale_set_enabled
self.supports_multiple_extensions = supports_multiple_extensions
class VirtualMachineExtensionInstanceView(msrest.serialization.Model):
"""The instance view of a virtual machine extension.
:ivar name: The virtual machine extension name.
:vartype name: str
:ivar type: Specifies the type of the extension; an example is "CustomScriptExtension".
:vartype type: str
:ivar type_handler_version: Specifies the version of the script handler.
:vartype type_handler_version: str
:ivar substatuses: The resource status information.
:vartype substatuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
:ivar statuses: The resource status information.
:vartype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'type_handler_version': {'key': 'typeHandlerVersion', 'type': 'str'},
'substatuses': {'key': 'substatuses', 'type': '[InstanceViewStatus]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
name: Optional[str] = None,
type: Optional[str] = None,
type_handler_version: Optional[str] = None,
substatuses: Optional[List["InstanceViewStatus"]] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
"""
:keyword name: The virtual machine extension name.
:paramtype name: str
:keyword type: Specifies the type of the extension; an example is "CustomScriptExtension".
:paramtype type: str
:keyword type_handler_version: Specifies the version of the script handler.
:paramtype type_handler_version: str
:keyword substatuses: The resource status information.
:paramtype substatuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
:keyword statuses: The resource status information.
:paramtype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
"""
super(VirtualMachineExtensionInstanceView, self).__init__(**kwargs)
self.name = name
self.type = type
self.type_handler_version = type_handler_version
self.substatuses = substatuses
self.statuses = statuses
class VirtualMachineExtensionsListResult(msrest.serialization.Model):
"""The List Extension operation response.
:ivar value: The list of extensions.
:vartype value: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineExtension]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineExtension]'},
}
def __init__(
self,
*,
value: Optional[List["VirtualMachineExtension"]] = None,
**kwargs
):
"""
:keyword value: The list of extensions.
:paramtype value: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineExtension]
"""
super(VirtualMachineExtensionsListResult, self).__init__(**kwargs)
self.value = value
class VirtualMachineExtensionUpdate(UpdateResource):
"""Describes a Virtual Machine Extension.
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar force_update_tag: How the extension handler should be forced to update even if the
extension configuration has not changed.
:vartype force_update_tag: str
:ivar publisher: The name of the extension handler publisher.
:vartype publisher: str
:ivar type: Specifies the type of the extension; an example is "CustomScriptExtension".
:vartype type: str
:ivar type_handler_version: Specifies the version of the script handler.
:vartype type_handler_version: str
:ivar auto_upgrade_minor_version: Indicates whether the extension should use a newer minor
version if one is available at deployment time. Once deployed, however, the extension will not
upgrade minor versions unless redeployed, even with this property set to true.
:vartype auto_upgrade_minor_version: bool
:ivar enable_automatic_upgrade: Indicates whether the extension should be automatically
upgraded by the platform if there is a newer version of the extension available.
:vartype enable_automatic_upgrade: bool
:ivar settings: Json formatted public settings for the extension.
:vartype settings: any
:ivar protected_settings: The extension can contain either protectedSettings or
protectedSettingsFromKeyVault or no protected settings at all.
:vartype protected_settings: any
:ivar suppress_failures: Indicates whether failures stemming from the extension will be
suppressed (Operational failures such as not connecting to the VM will not be suppressed
regardless of this value). The default is false.
:vartype suppress_failures: bool
:ivar protected_settings_from_key_vault: The extensions protected settings that are passed by
reference, and consumed from key vault.
:vartype protected_settings_from_key_vault: any
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
'publisher': {'key': 'properties.publisher', 'type': 'str'},
'type': {'key': 'properties.type', 'type': 'str'},
'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'},
'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'},
'enable_automatic_upgrade': {'key': 'properties.enableAutomaticUpgrade', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': 'object'},
'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'},
'suppress_failures': {'key': 'properties.suppressFailures', 'type': 'bool'},
'protected_settings_from_key_vault': {'key': 'properties.protectedSettingsFromKeyVault', 'type': 'object'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
force_update_tag: Optional[str] = None,
publisher: Optional[str] = None,
type: Optional[str] = None,
type_handler_version: Optional[str] = None,
auto_upgrade_minor_version: Optional[bool] = None,
enable_automatic_upgrade: Optional[bool] = None,
settings: Optional[Any] = None,
protected_settings: Optional[Any] = None,
suppress_failures: Optional[bool] = None,
protected_settings_from_key_vault: Optional[Any] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword force_update_tag: How the extension handler should be forced to update even if the
extension configuration has not changed.
:paramtype force_update_tag: str
:keyword publisher: The name of the extension handler publisher.
:paramtype publisher: str
:keyword type: Specifies the type of the extension; an example is "CustomScriptExtension".
:paramtype type: str
:keyword type_handler_version: Specifies the version of the script handler.
:paramtype type_handler_version: str
:keyword auto_upgrade_minor_version: Indicates whether the extension should use a newer minor
version if one is available at deployment time. Once deployed, however, the extension will not
upgrade minor versions unless redeployed, even with this property set to true.
:paramtype auto_upgrade_minor_version: bool
:keyword enable_automatic_upgrade: Indicates whether the extension should be automatically
upgraded by the platform if there is a newer version of the extension available.
:paramtype enable_automatic_upgrade: bool
:keyword settings: Json formatted public settings for the extension.
:paramtype settings: any
:keyword protected_settings: The extension can contain either protectedSettings or
protectedSettingsFromKeyVault or no protected settings at all.
:paramtype protected_settings: any
:keyword suppress_failures: Indicates whether failures stemming from the extension will be
suppressed (Operational failures such as not connecting to the VM will not be suppressed
regardless of this value). The default is false.
:paramtype suppress_failures: bool
:keyword protected_settings_from_key_vault: The extensions protected settings that are passed
by reference, and consumed from key vault.
:paramtype protected_settings_from_key_vault: any
"""
super(VirtualMachineExtensionUpdate, self).__init__(tags=tags, **kwargs)
self.force_update_tag = force_update_tag
self.publisher = publisher
self.type = type
self.type_handler_version = type_handler_version
self.auto_upgrade_minor_version = auto_upgrade_minor_version
self.enable_automatic_upgrade = enable_automatic_upgrade
self.settings = settings
self.protected_settings = protected_settings
self.suppress_failures = suppress_failures
self.protected_settings_from_key_vault = protected_settings_from_key_vault
class VirtualMachineHealthStatus(msrest.serialization.Model):
"""The health status of the VM.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar status: The health status information for the VM.
:vartype status: ~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus
"""
_validation = {
'status': {'readonly': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'InstanceViewStatus'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(VirtualMachineHealthStatus, self).__init__(**kwargs)
self.status = None
class VirtualMachineIdentity(msrest.serialization.Model):
"""Identity for the virtual machine.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of virtual machine identity. This property will only be
provided for a system assigned identity.
:vartype principal_id: str
:ivar tenant_id: The tenant id associated with the virtual machine. This property will only be
provided for a system assigned identity.
:vartype tenant_id: str
:ivar type: The type of identity used for the virtual machine. The type 'SystemAssigned,
UserAssigned' includes both an implicitly created identity and a set of user assigned
identities. The type 'None' will remove any identities from the virtual machine. Possible
values include: "SystemAssigned", "UserAssigned", "SystemAssigned, UserAssigned", "None".
:vartype type: str or ~azure.mgmt.compute.v2022_03_01.models.ResourceIdentityType
:ivar user_assigned_identities: The list of user identities associated with the Virtual
Machine. The user identity dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
:vartype user_assigned_identities: dict[str,
~azure.mgmt.compute.v2022_03_01.models.UserAssignedIdentitiesValue]
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedIdentitiesValue}'},
}
def __init__(
self,
*,
type: Optional[Union[str, "ResourceIdentityType"]] = None,
user_assigned_identities: Optional[Dict[str, "UserAssignedIdentitiesValue"]] = None,
**kwargs
):
"""
:keyword type: The type of identity used for the virtual machine. The type 'SystemAssigned,
UserAssigned' includes both an implicitly created identity and a set of user assigned
identities. The type 'None' will remove any identities from the virtual machine. Possible
values include: "SystemAssigned", "UserAssigned", "SystemAssigned, UserAssigned", "None".
:paramtype type: str or ~azure.mgmt.compute.v2022_03_01.models.ResourceIdentityType
:keyword user_assigned_identities: The list of user identities associated with the Virtual
Machine. The user identity dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
:paramtype user_assigned_identities: dict[str,
~azure.mgmt.compute.v2022_03_01.models.UserAssignedIdentitiesValue]
"""
super(VirtualMachineIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
self.user_assigned_identities = user_assigned_identities
class VirtualMachineImageResource(SubResource):
"""Virtual machine image resource information.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Required. The name of the resource.
:vartype name: str
:ivar location: Required. The supported Azure location of the resource.
:vartype location: str
:ivar tags: A set of tags. Specifies the tags that are assigned to the virtual machine. For
more information about using tags, see `Using tags to organize your Azure resources
<https://docs.microsoft.com/azure/azure-resource-manager/resource-group-using-tags.md>`_.
:vartype tags: dict[str, str]
:ivar extended_location: The extended location of the Virtual Machine.
:vartype extended_location: ~azure.mgmt.compute.v2022_03_01.models.ExtendedLocation
"""
_validation = {
'name': {'required': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'extended_location': {'key': 'extendedLocation', 'type': 'ExtendedLocation'},
}
def __init__(
self,
*,
name: str,
location: str,
id: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
extended_location: Optional["ExtendedLocation"] = None,
**kwargs
):
"""
:keyword id: Resource Id.
:paramtype id: str
:keyword name: Required. The name of the resource.
:paramtype name: str
:keyword location: Required. The supported Azure location of the resource.
:paramtype location: str
:keyword tags: A set of tags. Specifies the tags that are assigned to the virtual machine. For
more information about using tags, see `Using tags to organize your Azure resources
<https://docs.microsoft.com/azure/azure-resource-manager/resource-group-using-tags.md>`_.
:paramtype tags: dict[str, str]
:keyword extended_location: The extended location of the Virtual Machine.
:paramtype extended_location: ~azure.mgmt.compute.v2022_03_01.models.ExtendedLocation
"""
super(VirtualMachineImageResource, self).__init__(id=id, **kwargs)
self.name = name
self.location = location
self.tags = tags
self.extended_location = extended_location
class VirtualMachineImage(VirtualMachineImageResource):
"""Describes a Virtual Machine Image.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Required. The name of the resource.
:vartype name: str
:ivar location: Required. The supported Azure location of the resource.
:vartype location: str
:ivar tags: A set of tags. Specifies the tags that are assigned to the virtual machine. For
more information about using tags, see `Using tags to organize your Azure resources
<https://docs.microsoft.com/azure/azure-resource-manager/resource-group-using-tags.md>`_.
:vartype tags: dict[str, str]
:ivar extended_location: The extended location of the Virtual Machine.
:vartype extended_location: ~azure.mgmt.compute.v2022_03_01.models.ExtendedLocation
:ivar plan: Used for establishing the purchase context of any 3rd Party artifact through
MarketPlace.
:vartype plan: ~azure.mgmt.compute.v2022_03_01.models.PurchasePlan
:ivar os_disk_image: Contains the os disk image information.
:vartype os_disk_image: ~azure.mgmt.compute.v2022_03_01.models.OSDiskImage
:ivar data_disk_images:
:vartype data_disk_images: list[~azure.mgmt.compute.v2022_03_01.models.DataDiskImage]
:ivar automatic_os_upgrade_properties: Describes automatic OS upgrade properties on the image.
:vartype automatic_os_upgrade_properties:
~azure.mgmt.compute.v2022_03_01.models.AutomaticOSUpgradeProperties
:ivar hyper_v_generation: Specifies the HyperVGeneration Type. Possible values include: "V1",
"V2".
:vartype hyper_v_generation: str or
~azure.mgmt.compute.v2022_03_01.models.HyperVGenerationTypes
:ivar disallowed: Specifies disallowed configuration for the VirtualMachine created from the
image.
:vartype disallowed: ~azure.mgmt.compute.v2022_03_01.models.DisallowedConfiguration
:ivar features:
:vartype features: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineImageFeature]
:ivar architecture: Specifies the Architecture Type. Possible values include: "x64", "Arm64".
:vartype architecture: str or ~azure.mgmt.compute.v2022_03_01.models.ArchitectureTypes
"""
_validation = {
'name': {'required': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'extended_location': {'key': 'extendedLocation', 'type': 'ExtendedLocation'},
'plan': {'key': 'properties.plan', 'type': 'PurchasePlan'},
'os_disk_image': {'key': 'properties.osDiskImage', 'type': 'OSDiskImage'},
'data_disk_images': {'key': 'properties.dataDiskImages', 'type': '[DataDiskImage]'},
'automatic_os_upgrade_properties': {'key': 'properties.automaticOSUpgradeProperties', 'type': 'AutomaticOSUpgradeProperties'},
'hyper_v_generation': {'key': 'properties.hyperVGeneration', 'type': 'str'},
'disallowed': {'key': 'properties.disallowed', 'type': 'DisallowedConfiguration'},
'features': {'key': 'properties.features', 'type': '[VirtualMachineImageFeature]'},
'architecture': {'key': 'properties.architecture', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
location: str,
id: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
extended_location: Optional["ExtendedLocation"] = None,
plan: Optional["PurchasePlan"] = None,
os_disk_image: Optional["OSDiskImage"] = None,
data_disk_images: Optional[List["DataDiskImage"]] = None,
automatic_os_upgrade_properties: Optional["AutomaticOSUpgradeProperties"] = None,
hyper_v_generation: Optional[Union[str, "HyperVGenerationTypes"]] = None,
disallowed: Optional["DisallowedConfiguration"] = None,
features: Optional[List["VirtualMachineImageFeature"]] = None,
architecture: Optional[Union[str, "ArchitectureTypes"]] = None,
**kwargs
):
"""
:keyword id: Resource Id.
:paramtype id: str
:keyword name: Required. The name of the resource.
:paramtype name: str
:keyword location: Required. The supported Azure location of the resource.
:paramtype location: str
:keyword tags: A set of tags. Specifies the tags that are assigned to the virtual machine. For
more information about using tags, see `Using tags to organize your Azure resources
<https://docs.microsoft.com/azure/azure-resource-manager/resource-group-using-tags.md>`_.
:paramtype tags: dict[str, str]
:keyword extended_location: The extended location of the Virtual Machine.
:paramtype extended_location: ~azure.mgmt.compute.v2022_03_01.models.ExtendedLocation
:keyword plan: Used for establishing the purchase context of any 3rd Party artifact through
MarketPlace.
:paramtype plan: ~azure.mgmt.compute.v2022_03_01.models.PurchasePlan
:keyword os_disk_image: Contains the os disk image information.
:paramtype os_disk_image: ~azure.mgmt.compute.v2022_03_01.models.OSDiskImage
:keyword data_disk_images:
:paramtype data_disk_images: list[~azure.mgmt.compute.v2022_03_01.models.DataDiskImage]
:keyword automatic_os_upgrade_properties: Describes automatic OS upgrade properties on the
image.
:paramtype automatic_os_upgrade_properties:
~azure.mgmt.compute.v2022_03_01.models.AutomaticOSUpgradeProperties
:keyword hyper_v_generation: Specifies the HyperVGeneration Type. Possible values include:
"V1", "V2".
:paramtype hyper_v_generation: str or
~azure.mgmt.compute.v2022_03_01.models.HyperVGenerationTypes
:keyword disallowed: Specifies disallowed configuration for the VirtualMachine created from the
image.
:paramtype disallowed: ~azure.mgmt.compute.v2022_03_01.models.DisallowedConfiguration
:keyword features:
:paramtype features: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineImageFeature]
:keyword architecture: Specifies the Architecture Type. Possible values include: "x64",
"Arm64".
:paramtype architecture: str or ~azure.mgmt.compute.v2022_03_01.models.ArchitectureTypes
"""
super(VirtualMachineImage, self).__init__(id=id, name=name, location=location, tags=tags, extended_location=extended_location, **kwargs)
self.plan = plan
self.os_disk_image = os_disk_image
self.data_disk_images = data_disk_images
self.automatic_os_upgrade_properties = automatic_os_upgrade_properties
self.hyper_v_generation = hyper_v_generation
self.disallowed = disallowed
self.features = features
self.architecture = architecture
class VirtualMachineImageFeature(msrest.serialization.Model):
"""Specifies additional capabilities supported by the image.
:ivar name: The name of the feature.
:vartype name: str
:ivar value: The corresponding value for the feature.
:vartype value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
value: Optional[str] = None,
**kwargs
):
"""
:keyword name: The name of the feature.
:paramtype name: str
:keyword value: The corresponding value for the feature.
:paramtype value: str
"""
super(VirtualMachineImageFeature, self).__init__(**kwargs)
self.name = name
self.value = value
class VirtualMachineInstallPatchesParameters(msrest.serialization.Model):
"""Input for InstallPatches as directly received by the API.
All required parameters must be populated in order to send to Azure.
:ivar maximum_duration: Specifies the maximum amount of time that the operation will run. It
must be an ISO 8601-compliant duration string such as PT4H (4 hours).
:vartype maximum_duration: str
:ivar reboot_setting: Required. Defines when it is acceptable to reboot a VM during a software
update operation. Possible values include: "IfRequired", "Never", "Always".
:vartype reboot_setting: str or
~azure.mgmt.compute.v2022_03_01.models.VMGuestPatchRebootSetting
:ivar windows_parameters: Input for InstallPatches on a Windows VM, as directly received by the
API.
:vartype windows_parameters: ~azure.mgmt.compute.v2022_03_01.models.WindowsParameters
:ivar linux_parameters: Input for InstallPatches on a Linux VM, as directly received by the
API.
:vartype linux_parameters: ~azure.mgmt.compute.v2022_03_01.models.LinuxParameters
"""
_validation = {
'reboot_setting': {'required': True},
}
_attribute_map = {
'maximum_duration': {'key': 'maximumDuration', 'type': 'str'},
'reboot_setting': {'key': 'rebootSetting', 'type': 'str'},
'windows_parameters': {'key': 'windowsParameters', 'type': 'WindowsParameters'},
'linux_parameters': {'key': 'linuxParameters', 'type': 'LinuxParameters'},
}
def __init__(
self,
*,
reboot_setting: Union[str, "VMGuestPatchRebootSetting"],
maximum_duration: Optional[str] = None,
windows_parameters: Optional["WindowsParameters"] = None,
linux_parameters: Optional["LinuxParameters"] = None,
**kwargs
):
"""
:keyword maximum_duration: Specifies the maximum amount of time that the operation will run. It
must be an ISO 8601-compliant duration string such as PT4H (4 hours).
:paramtype maximum_duration: str
:keyword reboot_setting: Required. Defines when it is acceptable to reboot a VM during a
software update operation. Possible values include: "IfRequired", "Never", "Always".
:paramtype reboot_setting: str or
~azure.mgmt.compute.v2022_03_01.models.VMGuestPatchRebootSetting
:keyword windows_parameters: Input for InstallPatches on a Windows VM, as directly received by
the API.
:paramtype windows_parameters: ~azure.mgmt.compute.v2022_03_01.models.WindowsParameters
:keyword linux_parameters: Input for InstallPatches on a Linux VM, as directly received by the
API.
:paramtype linux_parameters: ~azure.mgmt.compute.v2022_03_01.models.LinuxParameters
"""
super(VirtualMachineInstallPatchesParameters, self).__init__(**kwargs)
self.maximum_duration = maximum_duration
self.reboot_setting = reboot_setting
self.windows_parameters = windows_parameters
self.linux_parameters = linux_parameters
class VirtualMachineInstallPatchesResult(msrest.serialization.Model):
"""The result summary of an installation operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar status: The overall success or failure status of the operation. It remains "InProgress"
until the operation completes. At that point it will become "Failed", "Succeeded", "Unknown" or
"CompletedWithWarnings.". Possible values include: "Unknown", "InProgress", "Failed",
"Succeeded", "CompletedWithWarnings".
:vartype status: str or ~azure.mgmt.compute.v2022_03_01.models.PatchOperationStatus
:ivar installation_activity_id: The activity ID of the operation that produced this result. It
is used to correlate across CRP and extension logs.
:vartype installation_activity_id: str
:ivar reboot_status: The reboot state of the VM following completion of the operation. Possible
values include: "Unknown", "NotNeeded", "Required", "Started", "Failed", "Completed".
:vartype reboot_status: str or ~azure.mgmt.compute.v2022_03_01.models.VMGuestPatchRebootStatus
:ivar maintenance_window_exceeded: Whether the operation ran out of time before it completed
all its intended actions.
:vartype maintenance_window_exceeded: bool
:ivar excluded_patch_count: The number of patches that were not installed due to the user
blocking their installation.
:vartype excluded_patch_count: int
:ivar not_selected_patch_count: The number of patches that were detected as available for
install, but did not meet the operation's criteria.
:vartype not_selected_patch_count: int
:ivar pending_patch_count: The number of patches that were identified as meeting the
installation criteria, but were not able to be installed. Typically this happens when
maintenanceWindowExceeded == true.
:vartype pending_patch_count: int
:ivar installed_patch_count: The number of patches successfully installed.
:vartype installed_patch_count: int
:ivar failed_patch_count: The number of patches that could not be installed due to some issue.
See errors for details.
:vartype failed_patch_count: int
:ivar patches: The patches that were installed during the operation.
:vartype patches: list[~azure.mgmt.compute.v2022_03_01.models.PatchInstallationDetail]
:ivar start_date_time: The UTC timestamp when the operation began.
:vartype start_date_time: ~datetime.datetime
:ivar error: The errors that were encountered during execution of the operation. The details
array contains the list of them.
:vartype error: ~azure.mgmt.compute.v2022_03_01.models.ApiError
"""
_validation = {
'status': {'readonly': True},
'installation_activity_id': {'readonly': True},
'reboot_status': {'readonly': True},
'maintenance_window_exceeded': {'readonly': True},
'excluded_patch_count': {'readonly': True},
'not_selected_patch_count': {'readonly': True},
'pending_patch_count': {'readonly': True},
'installed_patch_count': {'readonly': True},
'failed_patch_count': {'readonly': True},
'patches': {'readonly': True},
'start_date_time': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'installation_activity_id': {'key': 'installationActivityId', 'type': 'str'},
'reboot_status': {'key': 'rebootStatus', 'type': 'str'},
'maintenance_window_exceeded': {'key': 'maintenanceWindowExceeded', 'type': 'bool'},
'excluded_patch_count': {'key': 'excludedPatchCount', 'type': 'int'},
'not_selected_patch_count': {'key': 'notSelectedPatchCount', 'type': 'int'},
'pending_patch_count': {'key': 'pendingPatchCount', 'type': 'int'},
'installed_patch_count': {'key': 'installedPatchCount', 'type': 'int'},
'failed_patch_count': {'key': 'failedPatchCount', 'type': 'int'},
'patches': {'key': 'patches', 'type': '[PatchInstallationDetail]'},
'start_date_time': {'key': 'startDateTime', 'type': 'iso-8601'},
'error': {'key': 'error', 'type': 'ApiError'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(VirtualMachineInstallPatchesResult, self).__init__(**kwargs)
self.status = None
self.installation_activity_id = None
self.reboot_status = None
self.maintenance_window_exceeded = None
self.excluded_patch_count = None
self.not_selected_patch_count = None
self.pending_patch_count = None
self.installed_patch_count = None
self.failed_patch_count = None
self.patches = None
self.start_date_time = None
self.error = None
class VirtualMachineInstanceView(msrest.serialization.Model):
"""The instance view of a virtual machine.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar platform_update_domain: Specifies the update domain of the virtual machine.
:vartype platform_update_domain: int
:ivar platform_fault_domain: Specifies the fault domain of the virtual machine.
:vartype platform_fault_domain: int
:ivar computer_name: The computer name assigned to the virtual machine.
:vartype computer_name: str
:ivar os_name: The Operating System running on the virtual machine.
:vartype os_name: str
:ivar os_version: The version of Operating System running on the virtual machine.
:vartype os_version: str
:ivar hyper_v_generation: Specifies the HyperVGeneration Type associated with a resource.
Possible values include: "V1", "V2".
:vartype hyper_v_generation: str or ~azure.mgmt.compute.v2022_03_01.models.HyperVGenerationType
:ivar rdp_thumb_print: The Remote desktop certificate thumbprint.
:vartype rdp_thumb_print: str
:ivar vm_agent: The VM Agent running on the virtual machine.
:vartype vm_agent: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineAgentInstanceView
:ivar maintenance_redeploy_status: The Maintenance Operation status on the virtual machine.
:vartype maintenance_redeploy_status:
~azure.mgmt.compute.v2022_03_01.models.MaintenanceRedeployStatus
:ivar disks: The virtual machine disk information.
:vartype disks: list[~azure.mgmt.compute.v2022_03_01.models.DiskInstanceView]
:ivar extensions: The extensions information.
:vartype extensions:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineExtensionInstanceView]
:ivar vm_health: The health status for the VM.
:vartype vm_health: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineHealthStatus
:ivar boot_diagnostics: Boot Diagnostics is a debugging feature which allows you to view
Console Output and Screenshot to diagnose VM status. :code:`<br>`:code:`<br>` You can easily
view the output of your console log. :code:`<br>`:code:`<br>` Azure also enables you to see a
screenshot of the VM from the hypervisor.
:vartype boot_diagnostics: ~azure.mgmt.compute.v2022_03_01.models.BootDiagnosticsInstanceView
:ivar assigned_host: Resource id of the dedicated host, on which the virtual machine is
allocated through automatic placement, when the virtual machine is associated with a dedicated
host group that has automatic placement enabled. :code:`<br>`:code:`<br>`Minimum api-version:
2020-06-01.
:vartype assigned_host: str
:ivar statuses: The resource status information.
:vartype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
:ivar patch_status: [Preview Feature] The status of virtual machine patch operations.
:vartype patch_status: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachinePatchStatus
"""
_validation = {
'vm_health': {'readonly': True},
'assigned_host': {'readonly': True},
}
_attribute_map = {
'platform_update_domain': {'key': 'platformUpdateDomain', 'type': 'int'},
'platform_fault_domain': {'key': 'platformFaultDomain', 'type': 'int'},
'computer_name': {'key': 'computerName', 'type': 'str'},
'os_name': {'key': 'osName', 'type': 'str'},
'os_version': {'key': 'osVersion', 'type': 'str'},
'hyper_v_generation': {'key': 'hyperVGeneration', 'type': 'str'},
'rdp_thumb_print': {'key': 'rdpThumbPrint', 'type': 'str'},
'vm_agent': {'key': 'vmAgent', 'type': 'VirtualMachineAgentInstanceView'},
'maintenance_redeploy_status': {'key': 'maintenanceRedeployStatus', 'type': 'MaintenanceRedeployStatus'},
'disks': {'key': 'disks', 'type': '[DiskInstanceView]'},
'extensions': {'key': 'extensions', 'type': '[VirtualMachineExtensionInstanceView]'},
'vm_health': {'key': 'vmHealth', 'type': 'VirtualMachineHealthStatus'},
'boot_diagnostics': {'key': 'bootDiagnostics', 'type': 'BootDiagnosticsInstanceView'},
'assigned_host': {'key': 'assignedHost', 'type': 'str'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
'patch_status': {'key': 'patchStatus', 'type': 'VirtualMachinePatchStatus'},
}
def __init__(
self,
*,
platform_update_domain: Optional[int] = None,
platform_fault_domain: Optional[int] = None,
computer_name: Optional[str] = None,
os_name: Optional[str] = None,
os_version: Optional[str] = None,
hyper_v_generation: Optional[Union[str, "HyperVGenerationType"]] = None,
rdp_thumb_print: Optional[str] = None,
vm_agent: Optional["VirtualMachineAgentInstanceView"] = None,
maintenance_redeploy_status: Optional["MaintenanceRedeployStatus"] = None,
disks: Optional[List["DiskInstanceView"]] = None,
extensions: Optional[List["VirtualMachineExtensionInstanceView"]] = None,
boot_diagnostics: Optional["BootDiagnosticsInstanceView"] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
patch_status: Optional["VirtualMachinePatchStatus"] = None,
**kwargs
):
"""
:keyword platform_update_domain: Specifies the update domain of the virtual machine.
:paramtype platform_update_domain: int
:keyword platform_fault_domain: Specifies the fault domain of the virtual machine.
:paramtype platform_fault_domain: int
:keyword computer_name: The computer name assigned to the virtual machine.
:paramtype computer_name: str
:keyword os_name: The Operating System running on the virtual machine.
:paramtype os_name: str
:keyword os_version: The version of Operating System running on the virtual machine.
:paramtype os_version: str
:keyword hyper_v_generation: Specifies the HyperVGeneration Type associated with a resource.
Possible values include: "V1", "V2".
:paramtype hyper_v_generation: str or
~azure.mgmt.compute.v2022_03_01.models.HyperVGenerationType
:keyword rdp_thumb_print: The Remote desktop certificate thumbprint.
:paramtype rdp_thumb_print: str
:keyword vm_agent: The VM Agent running on the virtual machine.
:paramtype vm_agent: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineAgentInstanceView
:keyword maintenance_redeploy_status: The Maintenance Operation status on the virtual machine.
:paramtype maintenance_redeploy_status:
~azure.mgmt.compute.v2022_03_01.models.MaintenanceRedeployStatus
:keyword disks: The virtual machine disk information.
:paramtype disks: list[~azure.mgmt.compute.v2022_03_01.models.DiskInstanceView]
:keyword extensions: The extensions information.
:paramtype extensions:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineExtensionInstanceView]
:keyword boot_diagnostics: Boot Diagnostics is a debugging feature which allows you to view
Console Output and Screenshot to diagnose VM status. :code:`<br>`:code:`<br>` You can easily
view the output of your console log. :code:`<br>`:code:`<br>` Azure also enables you to see a
screenshot of the VM from the hypervisor.
:paramtype boot_diagnostics: ~azure.mgmt.compute.v2022_03_01.models.BootDiagnosticsInstanceView
:keyword statuses: The resource status information.
:paramtype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
:keyword patch_status: [Preview Feature] The status of virtual machine patch operations.
:paramtype patch_status: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachinePatchStatus
"""
super(VirtualMachineInstanceView, self).__init__(**kwargs)
self.platform_update_domain = platform_update_domain
self.platform_fault_domain = platform_fault_domain
self.computer_name = computer_name
self.os_name = os_name
self.os_version = os_version
self.hyper_v_generation = hyper_v_generation
self.rdp_thumb_print = rdp_thumb_print
self.vm_agent = vm_agent
self.maintenance_redeploy_status = maintenance_redeploy_status
self.disks = disks
self.extensions = extensions
self.vm_health = None
self.boot_diagnostics = boot_diagnostics
self.assigned_host = None
self.statuses = statuses
self.patch_status = patch_status
class VirtualMachineIpTag(msrest.serialization.Model):
"""Contains the IP tag associated with the public IP address.
:ivar ip_tag_type: IP tag type. Example: FirstPartyUsage.
:vartype ip_tag_type: str
:ivar tag: IP tag associated with the public IP. Example: SQL, Storage etc.
:vartype tag: str
"""
_attribute_map = {
'ip_tag_type': {'key': 'ipTagType', 'type': 'str'},
'tag': {'key': 'tag', 'type': 'str'},
}
def __init__(
self,
*,
ip_tag_type: Optional[str] = None,
tag: Optional[str] = None,
**kwargs
):
"""
:keyword ip_tag_type: IP tag type. Example: FirstPartyUsage.
:paramtype ip_tag_type: str
:keyword tag: IP tag associated with the public IP. Example: SQL, Storage etc.
:paramtype tag: str
"""
super(VirtualMachineIpTag, self).__init__(**kwargs)
self.ip_tag_type = ip_tag_type
self.tag = tag
class VirtualMachineListResult(msrest.serialization.Model):
"""The List Virtual Machine operation response.
All required parameters must be populated in order to send to Azure.
:ivar value: Required. The list of virtual machines.
:vartype value: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachine]
:ivar next_link: The URI to fetch the next page of VMs. Call ListNext() with this URI to fetch
the next page of Virtual Machines.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachine]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachine"],
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Required. The list of virtual machines.
:paramtype value: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachine]
:keyword next_link: The URI to fetch the next page of VMs. Call ListNext() with this URI to
fetch the next page of Virtual Machines.
:paramtype next_link: str
"""
super(VirtualMachineListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineNetworkInterfaceConfiguration(msrest.serialization.Model):
"""Describes a virtual machine network interface configurations.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The network interface configuration name.
:vartype name: str
:ivar primary: Specifies the primary network interface in case the virtual machine has more
than 1 network interface.
:vartype primary: bool
:ivar delete_option: Specify what happens to the network interface when the VM is deleted.
Possible values include: "Delete", "Detach".
:vartype delete_option: str or ~azure.mgmt.compute.v2022_03_01.models.DeleteOptions
:ivar enable_accelerated_networking: Specifies whether the network interface is accelerated
networking-enabled.
:vartype enable_accelerated_networking: bool
:ivar enable_fpga: Specifies whether the network interface is FPGA networking-enabled.
:vartype enable_fpga: bool
:ivar enable_ip_forwarding: Whether IP forwarding enabled on this NIC.
:vartype enable_ip_forwarding: bool
:ivar network_security_group: The network security group.
:vartype network_security_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar dns_settings: The dns settings to be applied on the network interfaces.
:vartype dns_settings:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineNetworkInterfaceDnsSettingsConfiguration
:ivar ip_configurations: Specifies the IP configurations of the network interface.
:vartype ip_configurations:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineNetworkInterfaceIPConfiguration]
:ivar dscp_configuration:
:vartype dscp_configuration: ~azure.mgmt.compute.v2022_03_01.models.SubResource
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'delete_option': {'key': 'properties.deleteOption', 'type': 'str'},
'enable_accelerated_networking': {'key': 'properties.enableAcceleratedNetworking', 'type': 'bool'},
'enable_fpga': {'key': 'properties.enableFpga', 'type': 'bool'},
'enable_ip_forwarding': {'key': 'properties.enableIPForwarding', 'type': 'bool'},
'network_security_group': {'key': 'properties.networkSecurityGroup', 'type': 'SubResource'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'VirtualMachineNetworkInterfaceDnsSettingsConfiguration'},
'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[VirtualMachineNetworkInterfaceIPConfiguration]'},
'dscp_configuration': {'key': 'properties.dscpConfiguration', 'type': 'SubResource'},
}
def __init__(
self,
*,
name: str,
primary: Optional[bool] = None,
delete_option: Optional[Union[str, "DeleteOptions"]] = None,
enable_accelerated_networking: Optional[bool] = None,
enable_fpga: Optional[bool] = None,
enable_ip_forwarding: Optional[bool] = None,
network_security_group: Optional["SubResource"] = None,
dns_settings: Optional["VirtualMachineNetworkInterfaceDnsSettingsConfiguration"] = None,
ip_configurations: Optional[List["VirtualMachineNetworkInterfaceIPConfiguration"]] = None,
dscp_configuration: Optional["SubResource"] = None,
**kwargs
):
"""
:keyword name: Required. The network interface configuration name.
:paramtype name: str
:keyword primary: Specifies the primary network interface in case the virtual machine has more
than 1 network interface.
:paramtype primary: bool
:keyword delete_option: Specify what happens to the network interface when the VM is deleted.
Possible values include: "Delete", "Detach".
:paramtype delete_option: str or ~azure.mgmt.compute.v2022_03_01.models.DeleteOptions
:keyword enable_accelerated_networking: Specifies whether the network interface is accelerated
networking-enabled.
:paramtype enable_accelerated_networking: bool
:keyword enable_fpga: Specifies whether the network interface is FPGA networking-enabled.
:paramtype enable_fpga: bool
:keyword enable_ip_forwarding: Whether IP forwarding enabled on this NIC.
:paramtype enable_ip_forwarding: bool
:keyword network_security_group: The network security group.
:paramtype network_security_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword dns_settings: The dns settings to be applied on the network interfaces.
:paramtype dns_settings:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineNetworkInterfaceDnsSettingsConfiguration
:keyword ip_configurations: Specifies the IP configurations of the network interface.
:paramtype ip_configurations:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineNetworkInterfaceIPConfiguration]
:keyword dscp_configuration:
:paramtype dscp_configuration: ~azure.mgmt.compute.v2022_03_01.models.SubResource
"""
super(VirtualMachineNetworkInterfaceConfiguration, self).__init__(**kwargs)
self.name = name
self.primary = primary
self.delete_option = delete_option
self.enable_accelerated_networking = enable_accelerated_networking
self.enable_fpga = enable_fpga
self.enable_ip_forwarding = enable_ip_forwarding
self.network_security_group = network_security_group
self.dns_settings = dns_settings
self.ip_configurations = ip_configurations
self.dscp_configuration = dscp_configuration
class VirtualMachineNetworkInterfaceDnsSettingsConfiguration(msrest.serialization.Model):
"""Describes a virtual machines network configuration's DNS settings.
:ivar dns_servers: List of DNS servers IP addresses.
:vartype dns_servers: list[str]
"""
_attribute_map = {
'dns_servers': {'key': 'dnsServers', 'type': '[str]'},
}
def __init__(
self,
*,
dns_servers: Optional[List[str]] = None,
**kwargs
):
"""
:keyword dns_servers: List of DNS servers IP addresses.
:paramtype dns_servers: list[str]
"""
super(VirtualMachineNetworkInterfaceDnsSettingsConfiguration, self).__init__(**kwargs)
self.dns_servers = dns_servers
class VirtualMachineNetworkInterfaceIPConfiguration(msrest.serialization.Model):
"""Describes a virtual machine network profile's IP configuration.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The IP configuration name.
:vartype name: str
:ivar subnet: Specifies the identifier of the subnet.
:vartype subnet: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar primary: Specifies the primary network interface in case the virtual machine has more
than 1 network interface.
:vartype primary: bool
:ivar public_ip_address_configuration: The publicIPAddressConfiguration.
:vartype public_ip_address_configuration:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachinePublicIPAddressConfiguration
:ivar private_ip_address_version: Available from Api-Version 2017-03-30 onwards, it represents
whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible
values are: 'IPv4' and 'IPv6'. Possible values include: "IPv4", "IPv6".
:vartype private_ip_address_version: str or ~azure.mgmt.compute.v2022_03_01.models.IPVersions
:ivar application_security_groups: Specifies an array of references to application security
group.
:vartype application_security_groups: list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
:ivar application_gateway_backend_address_pools: Specifies an array of references to backend
address pools of application gateways. A virtual machine can reference backend address pools of
multiple application gateways. Multiple virtual machines cannot use the same application
gateway.
:vartype application_gateway_backend_address_pools:
list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
:ivar load_balancer_backend_address_pools: Specifies an array of references to backend address
pools of load balancers. A virtual machine can reference backend address pools of one public
and one internal load balancer. [Multiple virtual machines cannot use the same basic sku load
balancer].
:vartype load_balancer_backend_address_pools:
list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'SubResource'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'public_ip_address_configuration': {'key': 'properties.publicIPAddressConfiguration', 'type': 'VirtualMachinePublicIPAddressConfiguration'},
'private_ip_address_version': {'key': 'properties.privateIPAddressVersion', 'type': 'str'},
'application_security_groups': {'key': 'properties.applicationSecurityGroups', 'type': '[SubResource]'},
'application_gateway_backend_address_pools': {'key': 'properties.applicationGatewayBackendAddressPools', 'type': '[SubResource]'},
'load_balancer_backend_address_pools': {'key': 'properties.loadBalancerBackendAddressPools', 'type': '[SubResource]'},
}
def __init__(
self,
*,
name: str,
subnet: Optional["SubResource"] = None,
primary: Optional[bool] = None,
public_ip_address_configuration: Optional["VirtualMachinePublicIPAddressConfiguration"] = None,
private_ip_address_version: Optional[Union[str, "IPVersions"]] = None,
application_security_groups: Optional[List["SubResource"]] = None,
application_gateway_backend_address_pools: Optional[List["SubResource"]] = None,
load_balancer_backend_address_pools: Optional[List["SubResource"]] = None,
**kwargs
):
"""
:keyword name: Required. The IP configuration name.
:paramtype name: str
:keyword subnet: Specifies the identifier of the subnet.
:paramtype subnet: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword primary: Specifies the primary network interface in case the virtual machine has more
than 1 network interface.
:paramtype primary: bool
:keyword public_ip_address_configuration: The publicIPAddressConfiguration.
:paramtype public_ip_address_configuration:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachinePublicIPAddressConfiguration
:keyword private_ip_address_version: Available from Api-Version 2017-03-30 onwards, it
represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4.
Possible values are: 'IPv4' and 'IPv6'. Possible values include: "IPv4", "IPv6".
:paramtype private_ip_address_version: str or ~azure.mgmt.compute.v2022_03_01.models.IPVersions
:keyword application_security_groups: Specifies an array of references to application security
group.
:paramtype application_security_groups:
list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
:keyword application_gateway_backend_address_pools: Specifies an array of references to backend
address pools of application gateways. A virtual machine can reference backend address pools of
multiple application gateways. Multiple virtual machines cannot use the same application
gateway.
:paramtype application_gateway_backend_address_pools:
list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
:keyword load_balancer_backend_address_pools: Specifies an array of references to backend
address pools of load balancers. A virtual machine can reference backend address pools of one
public and one internal load balancer. [Multiple virtual machines cannot use the same basic sku
load balancer].
:paramtype load_balancer_backend_address_pools:
list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
"""
super(VirtualMachineNetworkInterfaceIPConfiguration, self).__init__(**kwargs)
self.name = name
self.subnet = subnet
self.primary = primary
self.public_ip_address_configuration = public_ip_address_configuration
self.private_ip_address_version = private_ip_address_version
self.application_security_groups = application_security_groups
self.application_gateway_backend_address_pools = application_gateway_backend_address_pools
self.load_balancer_backend_address_pools = load_balancer_backend_address_pools
class VirtualMachinePatchStatus(msrest.serialization.Model):
"""The status of virtual machine patch operations.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar available_patch_summary: The available patch summary of the latest assessment operation
for the virtual machine.
:vartype available_patch_summary: ~azure.mgmt.compute.v2022_03_01.models.AvailablePatchSummary
:ivar last_patch_installation_summary: The installation summary of the latest installation
operation for the virtual machine.
:vartype last_patch_installation_summary:
~azure.mgmt.compute.v2022_03_01.models.LastPatchInstallationSummary
:ivar configuration_statuses: The enablement status of the specified patchMode.
:vartype configuration_statuses:
list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
"""
_validation = {
'configuration_statuses': {'readonly': True},
}
_attribute_map = {
'available_patch_summary': {'key': 'availablePatchSummary', 'type': 'AvailablePatchSummary'},
'last_patch_installation_summary': {'key': 'lastPatchInstallationSummary', 'type': 'LastPatchInstallationSummary'},
'configuration_statuses': {'key': 'configurationStatuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
available_patch_summary: Optional["AvailablePatchSummary"] = None,
last_patch_installation_summary: Optional["LastPatchInstallationSummary"] = None,
**kwargs
):
"""
:keyword available_patch_summary: The available patch summary of the latest assessment
operation for the virtual machine.
:paramtype available_patch_summary:
~azure.mgmt.compute.v2022_03_01.models.AvailablePatchSummary
:keyword last_patch_installation_summary: The installation summary of the latest installation
operation for the virtual machine.
:paramtype last_patch_installation_summary:
~azure.mgmt.compute.v2022_03_01.models.LastPatchInstallationSummary
"""
super(VirtualMachinePatchStatus, self).__init__(**kwargs)
self.available_patch_summary = available_patch_summary
self.last_patch_installation_summary = last_patch_installation_summary
self.configuration_statuses = None
class VirtualMachinePublicIPAddressConfiguration(msrest.serialization.Model):
"""Describes a virtual machines IP Configuration's PublicIPAddress configuration.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The publicIP address configuration name.
:vartype name: str
:ivar sku: Describes the public IP Sku. It can only be set with OrchestrationMode as Flexible.
:vartype sku: ~azure.mgmt.compute.v2022_03_01.models.PublicIPAddressSku
:ivar idle_timeout_in_minutes: The idle timeout of the public IP address.
:vartype idle_timeout_in_minutes: int
:ivar delete_option: Specify what happens to the public IP address when the VM is deleted.
Possible values include: "Delete", "Detach".
:vartype delete_option: str or ~azure.mgmt.compute.v2022_03_01.models.DeleteOptions
:ivar dns_settings: The dns settings to be applied on the publicIP addresses .
:vartype dns_settings:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachinePublicIPAddressDnsSettingsConfiguration
:ivar ip_tags: The list of IP tags associated with the public IP address.
:vartype ip_tags: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineIpTag]
:ivar public_ip_prefix: The PublicIPPrefix from which to allocate publicIP addresses.
:vartype public_ip_prefix: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar public_ip_address_version: Available from Api-Version 2019-07-01 onwards, it represents
whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values
are: 'IPv4' and 'IPv6'. Possible values include: "IPv4", "IPv6".
:vartype public_ip_address_version: str or ~azure.mgmt.compute.v2022_03_01.models.IPVersions
:ivar public_ip_allocation_method: Specify the public IP allocation type. Possible values
include: "Dynamic", "Static".
:vartype public_ip_allocation_method: str or
~azure.mgmt.compute.v2022_03_01.models.PublicIPAllocationMethod
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'PublicIPAddressSku'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'delete_option': {'key': 'properties.deleteOption', 'type': 'str'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'VirtualMachinePublicIPAddressDnsSettingsConfiguration'},
'ip_tags': {'key': 'properties.ipTags', 'type': '[VirtualMachineIpTag]'},
'public_ip_prefix': {'key': 'properties.publicIPPrefix', 'type': 'SubResource'},
'public_ip_address_version': {'key': 'properties.publicIPAddressVersion', 'type': 'str'},
'public_ip_allocation_method': {'key': 'properties.publicIPAllocationMethod', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
sku: Optional["PublicIPAddressSku"] = None,
idle_timeout_in_minutes: Optional[int] = None,
delete_option: Optional[Union[str, "DeleteOptions"]] = None,
dns_settings: Optional["VirtualMachinePublicIPAddressDnsSettingsConfiguration"] = None,
ip_tags: Optional[List["VirtualMachineIpTag"]] = None,
public_ip_prefix: Optional["SubResource"] = None,
public_ip_address_version: Optional[Union[str, "IPVersions"]] = None,
public_ip_allocation_method: Optional[Union[str, "PublicIPAllocationMethod"]] = None,
**kwargs
):
"""
:keyword name: Required. The publicIP address configuration name.
:paramtype name: str
:keyword sku: Describes the public IP Sku. It can only be set with OrchestrationMode as
Flexible.
:paramtype sku: ~azure.mgmt.compute.v2022_03_01.models.PublicIPAddressSku
:keyword idle_timeout_in_minutes: The idle timeout of the public IP address.
:paramtype idle_timeout_in_minutes: int
:keyword delete_option: Specify what happens to the public IP address when the VM is deleted.
Possible values include: "Delete", "Detach".
:paramtype delete_option: str or ~azure.mgmt.compute.v2022_03_01.models.DeleteOptions
:keyword dns_settings: The dns settings to be applied on the publicIP addresses .
:paramtype dns_settings:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachinePublicIPAddressDnsSettingsConfiguration
:keyword ip_tags: The list of IP tags associated with the public IP address.
:paramtype ip_tags: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineIpTag]
:keyword public_ip_prefix: The PublicIPPrefix from which to allocate publicIP addresses.
:paramtype public_ip_prefix: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword public_ip_address_version: Available from Api-Version 2019-07-01 onwards, it
represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4.
Possible values are: 'IPv4' and 'IPv6'. Possible values include: "IPv4", "IPv6".
:paramtype public_ip_address_version: str or ~azure.mgmt.compute.v2022_03_01.models.IPVersions
:keyword public_ip_allocation_method: Specify the public IP allocation type. Possible values
include: "Dynamic", "Static".
:paramtype public_ip_allocation_method: str or
~azure.mgmt.compute.v2022_03_01.models.PublicIPAllocationMethod
"""
super(VirtualMachinePublicIPAddressConfiguration, self).__init__(**kwargs)
self.name = name
self.sku = sku
self.idle_timeout_in_minutes = idle_timeout_in_minutes
self.delete_option = delete_option
self.dns_settings = dns_settings
self.ip_tags = ip_tags
self.public_ip_prefix = public_ip_prefix
self.public_ip_address_version = public_ip_address_version
self.public_ip_allocation_method = public_ip_allocation_method
class VirtualMachinePublicIPAddressDnsSettingsConfiguration(msrest.serialization.Model):
"""Describes a virtual machines network configuration's DNS settings.
All required parameters must be populated in order to send to Azure.
:ivar domain_name_label: Required. The Domain name label prefix of the PublicIPAddress
resources that will be created. The generated name label is the concatenation of the domain
name label and vm network profile unique ID.
:vartype domain_name_label: str
"""
_validation = {
'domain_name_label': {'required': True},
}
_attribute_map = {
'domain_name_label': {'key': 'domainNameLabel', 'type': 'str'},
}
def __init__(
self,
*,
domain_name_label: str,
**kwargs
):
"""
:keyword domain_name_label: Required. The Domain name label prefix of the PublicIPAddress
resources that will be created. The generated name label is the concatenation of the domain
name label and vm network profile unique ID.
:paramtype domain_name_label: str
"""
super(VirtualMachinePublicIPAddressDnsSettingsConfiguration, self).__init__(**kwargs)
self.domain_name_label = domain_name_label
class VirtualMachineReimageParameters(msrest.serialization.Model):
"""Parameters for Reimaging Virtual Machine. NOTE: Virtual Machine OS disk will always be reimaged.
:ivar temp_disk: Specifies whether to reimage temp disk. Default value: false. Note: This temp
disk reimage parameter is only supported for VM/VMSS with Ephemeral OS disk.
:vartype temp_disk: bool
"""
_attribute_map = {
'temp_disk': {'key': 'tempDisk', 'type': 'bool'},
}
def __init__(
self,
*,
temp_disk: Optional[bool] = None,
**kwargs
):
"""
:keyword temp_disk: Specifies whether to reimage temp disk. Default value: false. Note: This
temp disk reimage parameter is only supported for VM/VMSS with Ephemeral OS disk.
:paramtype temp_disk: bool
"""
super(VirtualMachineReimageParameters, self).__init__(**kwargs)
self.temp_disk = temp_disk
class VirtualMachineRunCommand(Resource):
"""Describes a Virtual Machine run command.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Required. Resource location.
:vartype location: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar source: The source of the run command script.
:vartype source: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineRunCommandScriptSource
:ivar parameters: The parameters used by the script.
:vartype parameters: list[~azure.mgmt.compute.v2022_03_01.models.RunCommandInputParameter]
:ivar protected_parameters: The parameters used by the script.
:vartype protected_parameters:
list[~azure.mgmt.compute.v2022_03_01.models.RunCommandInputParameter]
:ivar async_execution: Optional. If set to true, provisioning will complete as soon as the
script starts and will not wait for script to complete.
:vartype async_execution: bool
:ivar run_as_user: Specifies the user account on the VM when executing the run command.
:vartype run_as_user: str
:ivar run_as_password: Specifies the user account password on the VM when executing the run
command.
:vartype run_as_password: str
:ivar timeout_in_seconds: The timeout in seconds to execute the run command.
:vartype timeout_in_seconds: int
:ivar output_blob_uri: Specifies the Azure storage blob where script output stream will be
uploaded.
:vartype output_blob_uri: str
:ivar error_blob_uri: Specifies the Azure storage blob where script error stream will be
uploaded.
:vartype error_blob_uri: str
:ivar provisioning_state: The provisioning state, which only appears in the response.
:vartype provisioning_state: str
:ivar instance_view: The virtual machine run command instance view.
:vartype instance_view:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineRunCommandInstanceView
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'source': {'key': 'properties.source', 'type': 'VirtualMachineRunCommandScriptSource'},
'parameters': {'key': 'properties.parameters', 'type': '[RunCommandInputParameter]'},
'protected_parameters': {'key': 'properties.protectedParameters', 'type': '[RunCommandInputParameter]'},
'async_execution': {'key': 'properties.asyncExecution', 'type': 'bool'},
'run_as_user': {'key': 'properties.runAsUser', 'type': 'str'},
'run_as_password': {'key': 'properties.runAsPassword', 'type': 'str'},
'timeout_in_seconds': {'key': 'properties.timeoutInSeconds', 'type': 'int'},
'output_blob_uri': {'key': 'properties.outputBlobUri', 'type': 'str'},
'error_blob_uri': {'key': 'properties.errorBlobUri', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineRunCommandInstanceView'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
source: Optional["VirtualMachineRunCommandScriptSource"] = None,
parameters: Optional[List["RunCommandInputParameter"]] = None,
protected_parameters: Optional[List["RunCommandInputParameter"]] = None,
async_execution: Optional[bool] = False,
run_as_user: Optional[str] = None,
run_as_password: Optional[str] = None,
timeout_in_seconds: Optional[int] = None,
output_blob_uri: Optional[str] = None,
error_blob_uri: Optional[str] = None,
**kwargs
):
"""
:keyword location: Required. Resource location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword source: The source of the run command script.
:paramtype source: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineRunCommandScriptSource
:keyword parameters: The parameters used by the script.
:paramtype parameters: list[~azure.mgmt.compute.v2022_03_01.models.RunCommandInputParameter]
:keyword protected_parameters: The parameters used by the script.
:paramtype protected_parameters:
list[~azure.mgmt.compute.v2022_03_01.models.RunCommandInputParameter]
:keyword async_execution: Optional. If set to true, provisioning will complete as soon as the
script starts and will not wait for script to complete.
:paramtype async_execution: bool
:keyword run_as_user: Specifies the user account on the VM when executing the run command.
:paramtype run_as_user: str
:keyword run_as_password: Specifies the user account password on the VM when executing the run
command.
:paramtype run_as_password: str
:keyword timeout_in_seconds: The timeout in seconds to execute the run command.
:paramtype timeout_in_seconds: int
:keyword output_blob_uri: Specifies the Azure storage blob where script output stream will be
uploaded.
:paramtype output_blob_uri: str
:keyword error_blob_uri: Specifies the Azure storage blob where script error stream will be
uploaded.
:paramtype error_blob_uri: str
"""
super(VirtualMachineRunCommand, self).__init__(location=location, tags=tags, **kwargs)
self.source = source
self.parameters = parameters
self.protected_parameters = protected_parameters
self.async_execution = async_execution
self.run_as_user = run_as_user
self.run_as_password = run_as_password
self.timeout_in_seconds = timeout_in_seconds
self.output_blob_uri = output_blob_uri
self.error_blob_uri = error_blob_uri
self.provisioning_state = None
self.instance_view = None
class VirtualMachineRunCommandInstanceView(msrest.serialization.Model):
"""The instance view of a virtual machine run command.
:ivar execution_state: Script execution status. Possible values include: "Unknown", "Pending",
"Running", "Failed", "Succeeded", "TimedOut", "Canceled".
:vartype execution_state: str or ~azure.mgmt.compute.v2022_03_01.models.ExecutionState
:ivar execution_message: Communicate script configuration errors or execution messages.
:vartype execution_message: str
:ivar exit_code: Exit code returned from script execution.
:vartype exit_code: int
:ivar output: Script output stream.
:vartype output: str
:ivar error: Script error stream.
:vartype error: str
:ivar start_time: Script start time.
:vartype start_time: ~datetime.datetime
:ivar end_time: Script end time.
:vartype end_time: ~datetime.datetime
:ivar statuses: The resource status information.
:vartype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
"""
_attribute_map = {
'execution_state': {'key': 'executionState', 'type': 'str'},
'execution_message': {'key': 'executionMessage', 'type': 'str'},
'exit_code': {'key': 'exitCode', 'type': 'int'},
'output': {'key': 'output', 'type': 'str'},
'error': {'key': 'error', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
execution_state: Optional[Union[str, "ExecutionState"]] = None,
execution_message: Optional[str] = None,
exit_code: Optional[int] = None,
output: Optional[str] = None,
error: Optional[str] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
"""
:keyword execution_state: Script execution status. Possible values include: "Unknown",
"Pending", "Running", "Failed", "Succeeded", "TimedOut", "Canceled".
:paramtype execution_state: str or ~azure.mgmt.compute.v2022_03_01.models.ExecutionState
:keyword execution_message: Communicate script configuration errors or execution messages.
:paramtype execution_message: str
:keyword exit_code: Exit code returned from script execution.
:paramtype exit_code: int
:keyword output: Script output stream.
:paramtype output: str
:keyword error: Script error stream.
:paramtype error: str
:keyword start_time: Script start time.
:paramtype start_time: ~datetime.datetime
:keyword end_time: Script end time.
:paramtype end_time: ~datetime.datetime
:keyword statuses: The resource status information.
:paramtype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
"""
super(VirtualMachineRunCommandInstanceView, self).__init__(**kwargs)
self.execution_state = execution_state
self.execution_message = execution_message
self.exit_code = exit_code
self.output = output
self.error = error
self.start_time = start_time
self.end_time = end_time
self.statuses = statuses
class VirtualMachineRunCommandScriptSource(msrest.serialization.Model):
"""Describes the script sources for run command.
:ivar script: Specifies the script content to be executed on the VM.
:vartype script: str
:ivar script_uri: Specifies the script download location.
:vartype script_uri: str
:ivar command_id: Specifies a commandId of predefined built-in script.
:vartype command_id: str
"""
_attribute_map = {
'script': {'key': 'script', 'type': 'str'},
'script_uri': {'key': 'scriptUri', 'type': 'str'},
'command_id': {'key': 'commandId', 'type': 'str'},
}
def __init__(
self,
*,
script: Optional[str] = None,
script_uri: Optional[str] = None,
command_id: Optional[str] = None,
**kwargs
):
"""
:keyword script: Specifies the script content to be executed on the VM.
:paramtype script: str
:keyword script_uri: Specifies the script download location.
:paramtype script_uri: str
:keyword command_id: Specifies a commandId of predefined built-in script.
:paramtype command_id: str
"""
super(VirtualMachineRunCommandScriptSource, self).__init__(**kwargs)
self.script = script
self.script_uri = script_uri
self.command_id = command_id
class VirtualMachineRunCommandsListResult(msrest.serialization.Model):
"""The List run command operation response.
All required parameters must be populated in order to send to Azure.
:ivar value: Required. The list of run commands.
:vartype value: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineRunCommand]
:ivar next_link: The uri to fetch the next page of run commands.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineRunCommand]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachineRunCommand"],
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Required. The list of run commands.
:paramtype value: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineRunCommand]
:keyword next_link: The uri to fetch the next page of run commands.
:paramtype next_link: str
"""
super(VirtualMachineRunCommandsListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineRunCommandUpdate(UpdateResource):
"""Describes a Virtual Machine run command.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar source: The source of the run command script.
:vartype source: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineRunCommandScriptSource
:ivar parameters: The parameters used by the script.
:vartype parameters: list[~azure.mgmt.compute.v2022_03_01.models.RunCommandInputParameter]
:ivar protected_parameters: The parameters used by the script.
:vartype protected_parameters:
list[~azure.mgmt.compute.v2022_03_01.models.RunCommandInputParameter]
:ivar async_execution: Optional. If set to true, provisioning will complete as soon as the
script starts and will not wait for script to complete.
:vartype async_execution: bool
:ivar run_as_user: Specifies the user account on the VM when executing the run command.
:vartype run_as_user: str
:ivar run_as_password: Specifies the user account password on the VM when executing the run
command.
:vartype run_as_password: str
:ivar timeout_in_seconds: The timeout in seconds to execute the run command.
:vartype timeout_in_seconds: int
:ivar output_blob_uri: Specifies the Azure storage blob where script output stream will be
uploaded.
:vartype output_blob_uri: str
:ivar error_blob_uri: Specifies the Azure storage blob where script error stream will be
uploaded.
:vartype error_blob_uri: str
:ivar provisioning_state: The provisioning state, which only appears in the response.
:vartype provisioning_state: str
:ivar instance_view: The virtual machine run command instance view.
:vartype instance_view:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineRunCommandInstanceView
"""
_validation = {
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'source': {'key': 'properties.source', 'type': 'VirtualMachineRunCommandScriptSource'},
'parameters': {'key': 'properties.parameters', 'type': '[RunCommandInputParameter]'},
'protected_parameters': {'key': 'properties.protectedParameters', 'type': '[RunCommandInputParameter]'},
'async_execution': {'key': 'properties.asyncExecution', 'type': 'bool'},
'run_as_user': {'key': 'properties.runAsUser', 'type': 'str'},
'run_as_password': {'key': 'properties.runAsPassword', 'type': 'str'},
'timeout_in_seconds': {'key': 'properties.timeoutInSeconds', 'type': 'int'},
'output_blob_uri': {'key': 'properties.outputBlobUri', 'type': 'str'},
'error_blob_uri': {'key': 'properties.errorBlobUri', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineRunCommandInstanceView'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
source: Optional["VirtualMachineRunCommandScriptSource"] = None,
parameters: Optional[List["RunCommandInputParameter"]] = None,
protected_parameters: Optional[List["RunCommandInputParameter"]] = None,
async_execution: Optional[bool] = False,
run_as_user: Optional[str] = None,
run_as_password: Optional[str] = None,
timeout_in_seconds: Optional[int] = None,
output_blob_uri: Optional[str] = None,
error_blob_uri: Optional[str] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword source: The source of the run command script.
:paramtype source: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineRunCommandScriptSource
:keyword parameters: The parameters used by the script.
:paramtype parameters: list[~azure.mgmt.compute.v2022_03_01.models.RunCommandInputParameter]
:keyword protected_parameters: The parameters used by the script.
:paramtype protected_parameters:
list[~azure.mgmt.compute.v2022_03_01.models.RunCommandInputParameter]
:keyword async_execution: Optional. If set to true, provisioning will complete as soon as the
script starts and will not wait for script to complete.
:paramtype async_execution: bool
:keyword run_as_user: Specifies the user account on the VM when executing the run command.
:paramtype run_as_user: str
:keyword run_as_password: Specifies the user account password on the VM when executing the run
command.
:paramtype run_as_password: str
:keyword timeout_in_seconds: The timeout in seconds to execute the run command.
:paramtype timeout_in_seconds: int
:keyword output_blob_uri: Specifies the Azure storage blob where script output stream will be
uploaded.
:paramtype output_blob_uri: str
:keyword error_blob_uri: Specifies the Azure storage blob where script error stream will be
uploaded.
:paramtype error_blob_uri: str
"""
super(VirtualMachineRunCommandUpdate, self).__init__(tags=tags, **kwargs)
self.source = source
self.parameters = parameters
self.protected_parameters = protected_parameters
self.async_execution = async_execution
self.run_as_user = run_as_user
self.run_as_password = run_as_password
self.timeout_in_seconds = timeout_in_seconds
self.output_blob_uri = output_blob_uri
self.error_blob_uri = error_blob_uri
self.provisioning_state = None
self.instance_view = None
class VirtualMachineScaleSet(Resource):
"""Describes a Virtual Machine Scale Set.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Required. Resource location.
:vartype location: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar sku: The virtual machine scale set sku.
:vartype sku: ~azure.mgmt.compute.v2022_03_01.models.Sku
:ivar plan: Specifies information about the marketplace image used to create the virtual
machine. This element is only used for marketplace images. Before you can use a marketplace
image from an API, you must enable the image for programmatic use. In the Azure portal, find
the marketplace image that you want to use and then click **Want to deploy programmatically,
Get Started ->**. Enter any required information and then click **Save**.
:vartype plan: ~azure.mgmt.compute.v2022_03_01.models.Plan
:ivar identity: The identity of the virtual machine scale set, if configured.
:vartype identity: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetIdentity
:ivar zones: The virtual machine scale set zones. NOTE: Availability zones can only be set when
you create the scale set.
:vartype zones: list[str]
:ivar extended_location: The extended location of the Virtual Machine Scale Set.
:vartype extended_location: ~azure.mgmt.compute.v2022_03_01.models.ExtendedLocation
:ivar upgrade_policy: The upgrade policy.
:vartype upgrade_policy: ~azure.mgmt.compute.v2022_03_01.models.UpgradePolicy
:ivar automatic_repairs_policy: Policy for automatic repairs.
:vartype automatic_repairs_policy:
~azure.mgmt.compute.v2022_03_01.models.AutomaticRepairsPolicy
:ivar virtual_machine_profile: The virtual machine profile.
:vartype virtual_machine_profile:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetVMProfile
:ivar provisioning_state: The provisioning state, which only appears in the response.
:vartype provisioning_state: str
:ivar overprovision: Specifies whether the Virtual Machine Scale Set should be overprovisioned.
:vartype overprovision: bool
:ivar do_not_run_extensions_on_overprovisioned_v_ms: When Overprovision is enabled, extensions
are launched only on the requested number of VMs which are finally kept. This property will
hence ensure that the extensions do not run on the extra overprovisioned VMs.
:vartype do_not_run_extensions_on_overprovisioned_v_ms: bool
:ivar unique_id: Specifies the ID which uniquely identifies a Virtual Machine Scale Set.
:vartype unique_id: str
:ivar single_placement_group: When true this limits the scale set to a single placement group,
of max size 100 virtual machines. NOTE: If singlePlacementGroup is true, it may be modified to
false. However, if singlePlacementGroup is false, it may not be modified to true.
:vartype single_placement_group: bool
:ivar zone_balance: Whether to force strictly even Virtual Machine distribution cross x-zones
in case there is zone outage. zoneBalance property can only be set if the zones property of the
scale set contains more than one zone. If there are no zones or only one zone specified, then
zoneBalance property should not be set.
:vartype zone_balance: bool
:ivar platform_fault_domain_count: Fault Domain count for each placement group.
:vartype platform_fault_domain_count: int
:ivar proximity_placement_group: Specifies information about the proximity placement group that
the virtual machine scale set should be assigned to. :code:`<br>`:code:`<br>`Minimum
api-version: 2018-04-01.
:vartype proximity_placement_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar host_group: Specifies information about the dedicated host group that the virtual machine
scale set resides in. :code:`<br>`:code:`<br>`Minimum api-version: 2020-06-01.
:vartype host_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar additional_capabilities: Specifies additional capabilities enabled or disabled on the
Virtual Machines in the Virtual Machine Scale Set. For instance: whether the Virtual Machines
have the capability to support attaching managed data disks with UltraSSD_LRS storage account
type.
:vartype additional_capabilities: ~azure.mgmt.compute.v2022_03_01.models.AdditionalCapabilities
:ivar scale_in_policy: Specifies the policies applied when scaling in Virtual Machines in the
Virtual Machine Scale Set.
:vartype scale_in_policy: ~azure.mgmt.compute.v2022_03_01.models.ScaleInPolicy
:ivar orchestration_mode: Specifies the orchestration mode for the virtual machine scale set.
Possible values include: "Uniform", "Flexible".
:vartype orchestration_mode: str or ~azure.mgmt.compute.v2022_03_01.models.OrchestrationMode
:ivar spot_restore_policy: Specifies the Spot Restore properties for the virtual machine scale
set.
:vartype spot_restore_policy: ~azure.mgmt.compute.v2022_03_01.models.SpotRestorePolicy
:ivar time_created: Specifies the time at which the Virtual Machine Scale Set resource was
created.:code:`<br>`:code:`<br>`Minimum api-version: 2022-03-01.
:vartype time_created: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'unique_id': {'readonly': True},
'time_created': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'plan': {'key': 'plan', 'type': 'Plan'},
'identity': {'key': 'identity', 'type': 'VirtualMachineScaleSetIdentity'},
'zones': {'key': 'zones', 'type': '[str]'},
'extended_location': {'key': 'extendedLocation', 'type': 'ExtendedLocation'},
'upgrade_policy': {'key': 'properties.upgradePolicy', 'type': 'UpgradePolicy'},
'automatic_repairs_policy': {'key': 'properties.automaticRepairsPolicy', 'type': 'AutomaticRepairsPolicy'},
'virtual_machine_profile': {'key': 'properties.virtualMachineProfile', 'type': 'VirtualMachineScaleSetVMProfile'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'overprovision': {'key': 'properties.overprovision', 'type': 'bool'},
'do_not_run_extensions_on_overprovisioned_v_ms': {'key': 'properties.doNotRunExtensionsOnOverprovisionedVMs', 'type': 'bool'},
'unique_id': {'key': 'properties.uniqueId', 'type': 'str'},
'single_placement_group': {'key': 'properties.singlePlacementGroup', 'type': 'bool'},
'zone_balance': {'key': 'properties.zoneBalance', 'type': 'bool'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'SubResource'},
'host_group': {'key': 'properties.hostGroup', 'type': 'SubResource'},
'additional_capabilities': {'key': 'properties.additionalCapabilities', 'type': 'AdditionalCapabilities'},
'scale_in_policy': {'key': 'properties.scaleInPolicy', 'type': 'ScaleInPolicy'},
'orchestration_mode': {'key': 'properties.orchestrationMode', 'type': 'str'},
'spot_restore_policy': {'key': 'properties.spotRestorePolicy', 'type': 'SpotRestorePolicy'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
plan: Optional["Plan"] = None,
identity: Optional["VirtualMachineScaleSetIdentity"] = None,
zones: Optional[List[str]] = None,
extended_location: Optional["ExtendedLocation"] = None,
upgrade_policy: Optional["UpgradePolicy"] = None,
automatic_repairs_policy: Optional["AutomaticRepairsPolicy"] = None,
virtual_machine_profile: Optional["VirtualMachineScaleSetVMProfile"] = None,
overprovision: Optional[bool] = None,
do_not_run_extensions_on_overprovisioned_v_ms: Optional[bool] = None,
single_placement_group: Optional[bool] = None,
zone_balance: Optional[bool] = None,
platform_fault_domain_count: Optional[int] = None,
proximity_placement_group: Optional["SubResource"] = None,
host_group: Optional["SubResource"] = None,
additional_capabilities: Optional["AdditionalCapabilities"] = None,
scale_in_policy: Optional["ScaleInPolicy"] = None,
orchestration_mode: Optional[Union[str, "OrchestrationMode"]] = None,
spot_restore_policy: Optional["SpotRestorePolicy"] = None,
**kwargs
):
"""
:keyword location: Required. Resource location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword sku: The virtual machine scale set sku.
:paramtype sku: ~azure.mgmt.compute.v2022_03_01.models.Sku
:keyword plan: Specifies information about the marketplace image used to create the virtual
machine. This element is only used for marketplace images. Before you can use a marketplace
image from an API, you must enable the image for programmatic use. In the Azure portal, find
the marketplace image that you want to use and then click **Want to deploy programmatically,
Get Started ->**. Enter any required information and then click **Save**.
:paramtype plan: ~azure.mgmt.compute.v2022_03_01.models.Plan
:keyword identity: The identity of the virtual machine scale set, if configured.
:paramtype identity: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetIdentity
:keyword zones: The virtual machine scale set zones. NOTE: Availability zones can only be set
when you create the scale set.
:paramtype zones: list[str]
:keyword extended_location: The extended location of the Virtual Machine Scale Set.
:paramtype extended_location: ~azure.mgmt.compute.v2022_03_01.models.ExtendedLocation
:keyword upgrade_policy: The upgrade policy.
:paramtype upgrade_policy: ~azure.mgmt.compute.v2022_03_01.models.UpgradePolicy
:keyword automatic_repairs_policy: Policy for automatic repairs.
:paramtype automatic_repairs_policy:
~azure.mgmt.compute.v2022_03_01.models.AutomaticRepairsPolicy
:keyword virtual_machine_profile: The virtual machine profile.
:paramtype virtual_machine_profile:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetVMProfile
:keyword overprovision: Specifies whether the Virtual Machine Scale Set should be
overprovisioned.
:paramtype overprovision: bool
:keyword do_not_run_extensions_on_overprovisioned_v_ms: When Overprovision is enabled,
extensions are launched only on the requested number of VMs which are finally kept. This
property will hence ensure that the extensions do not run on the extra overprovisioned VMs.
:paramtype do_not_run_extensions_on_overprovisioned_v_ms: bool
:keyword single_placement_group: When true this limits the scale set to a single placement
group, of max size 100 virtual machines. NOTE: If singlePlacementGroup is true, it may be
modified to false. However, if singlePlacementGroup is false, it may not be modified to true.
:paramtype single_placement_group: bool
:keyword zone_balance: Whether to force strictly even Virtual Machine distribution cross
x-zones in case there is zone outage. zoneBalance property can only be set if the zones
property of the scale set contains more than one zone. If there are no zones or only one zone
specified, then zoneBalance property should not be set.
:paramtype zone_balance: bool
:keyword platform_fault_domain_count: Fault Domain count for each placement group.
:paramtype platform_fault_domain_count: int
:keyword proximity_placement_group: Specifies information about the proximity placement group
that the virtual machine scale set should be assigned to. :code:`<br>`:code:`<br>`Minimum
api-version: 2018-04-01.
:paramtype proximity_placement_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword host_group: Specifies information about the dedicated host group that the virtual
machine scale set resides in. :code:`<br>`:code:`<br>`Minimum api-version: 2020-06-01.
:paramtype host_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword additional_capabilities: Specifies additional capabilities enabled or disabled on the
Virtual Machines in the Virtual Machine Scale Set. For instance: whether the Virtual Machines
have the capability to support attaching managed data disks with UltraSSD_LRS storage account
type.
:paramtype additional_capabilities:
~azure.mgmt.compute.v2022_03_01.models.AdditionalCapabilities
:keyword scale_in_policy: Specifies the policies applied when scaling in Virtual Machines in
the Virtual Machine Scale Set.
:paramtype scale_in_policy: ~azure.mgmt.compute.v2022_03_01.models.ScaleInPolicy
:keyword orchestration_mode: Specifies the orchestration mode for the virtual machine scale
set. Possible values include: "Uniform", "Flexible".
:paramtype orchestration_mode: str or ~azure.mgmt.compute.v2022_03_01.models.OrchestrationMode
:keyword spot_restore_policy: Specifies the Spot Restore properties for the virtual machine
scale set.
:paramtype spot_restore_policy: ~azure.mgmt.compute.v2022_03_01.models.SpotRestorePolicy
"""
super(VirtualMachineScaleSet, self).__init__(location=location, tags=tags, **kwargs)
self.sku = sku
self.plan = plan
self.identity = identity
self.zones = zones
self.extended_location = extended_location
self.upgrade_policy = upgrade_policy
self.automatic_repairs_policy = automatic_repairs_policy
self.virtual_machine_profile = virtual_machine_profile
self.provisioning_state = None
self.overprovision = overprovision
self.do_not_run_extensions_on_overprovisioned_v_ms = do_not_run_extensions_on_overprovisioned_v_ms
self.unique_id = None
self.single_placement_group = single_placement_group
self.zone_balance = zone_balance
self.platform_fault_domain_count = platform_fault_domain_count
self.proximity_placement_group = proximity_placement_group
self.host_group = host_group
self.additional_capabilities = additional_capabilities
self.scale_in_policy = scale_in_policy
self.orchestration_mode = orchestration_mode
self.spot_restore_policy = spot_restore_policy
self.time_created = None
class VirtualMachineScaleSetDataDisk(msrest.serialization.Model):
"""Describes a virtual machine scale set data disk.
All required parameters must be populated in order to send to Azure.
:ivar name: The disk name.
:vartype name: str
:ivar lun: Required. Specifies the logical unit number of the data disk. This value is used to
identify data disks within the VM and therefore must be unique for each data disk attached to a
VM.
:vartype lun: int
:ivar caching: Specifies the caching requirements. :code:`<br>`:code:`<br>` Possible values
are: :code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **ReadOnly**
:code:`<br>`:code:`<br>` **ReadWrite** :code:`<br>`:code:`<br>` Default: **None for Standard
storage. ReadOnly for Premium storage**. Possible values include: "None", "ReadOnly",
"ReadWrite".
:vartype caching: str or ~azure.mgmt.compute.v2022_03_01.models.CachingTypes
:ivar write_accelerator_enabled: Specifies whether writeAccelerator should be enabled or
disabled on the disk.
:vartype write_accelerator_enabled: bool
:ivar create_option: Required. The create option. Possible values include: "FromImage",
"Empty", "Attach".
:vartype create_option: str or ~azure.mgmt.compute.v2022_03_01.models.DiskCreateOptionTypes
:ivar disk_size_gb: Specifies the size of an empty data disk in gigabytes. This element can be
used to overwrite the size of the disk in a virtual machine image. :code:`<br>`:code:`<br>`
This value cannot be larger than 1023 GB.
:vartype disk_size_gb: int
:ivar managed_disk: The managed disk parameters.
:vartype managed_disk:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetManagedDiskParameters
:ivar disk_iops_read_write: Specifies the Read-Write IOPS for the managed disk. Should be used
only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be
assigned based on diskSizeGB.
:vartype disk_iops_read_write: long
:ivar disk_m_bps_read_write: Specifies the bandwidth in MB per second for the managed disk.
Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value
would be assigned based on diskSizeGB.
:vartype disk_m_bps_read_write: long
:ivar delete_option: Specifies whether data disk should be deleted or detached upon VMSS Flex
deletion (This feature is available for VMSS with Flexible OrchestrationMode
only).:code:`<br>`:code:`<br>` Possible values: :code:`<br>`:code:`<br>` **Delete** If this
value is used, the data disk is deleted when the VMSS Flex VM is
deleted.:code:`<br>`:code:`<br>` **Detach** If this value is used, the data disk is retained
after VMSS Flex VM is deleted.:code:`<br>`:code:`<br>` The default value is set to **Delete**.
Possible values include: "Delete", "Detach".
:vartype delete_option: str or ~azure.mgmt.compute.v2022_03_01.models.DiskDeleteOptionTypes
"""
_validation = {
'lun': {'required': True},
'create_option': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'lun': {'key': 'lun', 'type': 'int'},
'caching': {'key': 'caching', 'type': 'str'},
'write_accelerator_enabled': {'key': 'writeAcceleratorEnabled', 'type': 'bool'},
'create_option': {'key': 'createOption', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'managed_disk': {'key': 'managedDisk', 'type': 'VirtualMachineScaleSetManagedDiskParameters'},
'disk_iops_read_write': {'key': 'diskIOPSReadWrite', 'type': 'long'},
'disk_m_bps_read_write': {'key': 'diskMBpsReadWrite', 'type': 'long'},
'delete_option': {'key': 'deleteOption', 'type': 'str'},
}
def __init__(
self,
*,
lun: int,
create_option: Union[str, "DiskCreateOptionTypes"],
name: Optional[str] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
write_accelerator_enabled: Optional[bool] = None,
disk_size_gb: Optional[int] = None,
managed_disk: Optional["VirtualMachineScaleSetManagedDiskParameters"] = None,
disk_iops_read_write: Optional[int] = None,
disk_m_bps_read_write: Optional[int] = None,
delete_option: Optional[Union[str, "DiskDeleteOptionTypes"]] = None,
**kwargs
):
"""
:keyword name: The disk name.
:paramtype name: str
:keyword lun: Required. Specifies the logical unit number of the data disk. This value is used
to identify data disks within the VM and therefore must be unique for each data disk attached
to a VM.
:paramtype lun: int
:keyword caching: Specifies the caching requirements. :code:`<br>`:code:`<br>` Possible values
are: :code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **ReadOnly**
:code:`<br>`:code:`<br>` **ReadWrite** :code:`<br>`:code:`<br>` Default: **None for Standard
storage. ReadOnly for Premium storage**. Possible values include: "None", "ReadOnly",
"ReadWrite".
:paramtype caching: str or ~azure.mgmt.compute.v2022_03_01.models.CachingTypes
:keyword write_accelerator_enabled: Specifies whether writeAccelerator should be enabled or
disabled on the disk.
:paramtype write_accelerator_enabled: bool
:keyword create_option: Required. The create option. Possible values include: "FromImage",
"Empty", "Attach".
:paramtype create_option: str or ~azure.mgmt.compute.v2022_03_01.models.DiskCreateOptionTypes
:keyword disk_size_gb: Specifies the size of an empty data disk in gigabytes. This element can
be used to overwrite the size of the disk in a virtual machine image. :code:`<br>`:code:`<br>`
This value cannot be larger than 1023 GB.
:paramtype disk_size_gb: int
:keyword managed_disk: The managed disk parameters.
:paramtype managed_disk:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetManagedDiskParameters
:keyword disk_iops_read_write: Specifies the Read-Write IOPS for the managed disk. Should be
used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be
assigned based on diskSizeGB.
:paramtype disk_iops_read_write: long
:keyword disk_m_bps_read_write: Specifies the bandwidth in MB per second for the managed disk.
Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value
would be assigned based on diskSizeGB.
:paramtype disk_m_bps_read_write: long
:keyword delete_option: Specifies whether data disk should be deleted or detached upon VMSS
Flex deletion (This feature is available for VMSS with Flexible OrchestrationMode
only).:code:`<br>`:code:`<br>` Possible values: :code:`<br>`:code:`<br>` **Delete** If this
value is used, the data disk is deleted when the VMSS Flex VM is
deleted.:code:`<br>`:code:`<br>` **Detach** If this value is used, the data disk is retained
after VMSS Flex VM is deleted.:code:`<br>`:code:`<br>` The default value is set to **Delete**.
Possible values include: "Delete", "Detach".
:paramtype delete_option: str or ~azure.mgmt.compute.v2022_03_01.models.DiskDeleteOptionTypes
"""
super(VirtualMachineScaleSetDataDisk, self).__init__(**kwargs)
self.name = name
self.lun = lun
self.caching = caching
self.write_accelerator_enabled = write_accelerator_enabled
self.create_option = create_option
self.disk_size_gb = disk_size_gb
self.managed_disk = managed_disk
self.disk_iops_read_write = disk_iops_read_write
self.disk_m_bps_read_write = disk_m_bps_read_write
self.delete_option = delete_option
class VirtualMachineScaleSetExtension(SubResourceReadOnly):
"""Describes a Virtual Machine Scale Set Extension.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: The name of the extension.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar force_update_tag: If a value is provided and is different from the previous value, the
extension handler will be forced to update even if the extension configuration has not changed.
:vartype force_update_tag: str
:ivar publisher: The name of the extension handler publisher.
:vartype publisher: str
:ivar type_properties_type: Specifies the type of the extension; an example is
"CustomScriptExtension".
:vartype type_properties_type: str
:ivar type_handler_version: Specifies the version of the script handler.
:vartype type_handler_version: str
:ivar auto_upgrade_minor_version: Indicates whether the extension should use a newer minor
version if one is available at deployment time. Once deployed, however, the extension will not
upgrade minor versions unless redeployed, even with this property set to true.
:vartype auto_upgrade_minor_version: bool
:ivar enable_automatic_upgrade: Indicates whether the extension should be automatically
upgraded by the platform if there is a newer version of the extension available.
:vartype enable_automatic_upgrade: bool
:ivar settings: Json formatted public settings for the extension.
:vartype settings: any
:ivar protected_settings: The extension can contain either protectedSettings or
protectedSettingsFromKeyVault or no protected settings at all.
:vartype protected_settings: any
:ivar provisioning_state: The provisioning state, which only appears in the response.
:vartype provisioning_state: str
:ivar provision_after_extensions: Collection of extension names after which this extension
needs to be provisioned.
:vartype provision_after_extensions: list[str]
:ivar suppress_failures: Indicates whether failures stemming from the extension will be
suppressed (Operational failures such as not connecting to the VM will not be suppressed
regardless of this value). The default is false.
:vartype suppress_failures: bool
:ivar protected_settings_from_key_vault: The extensions protected settings that are passed by
reference, and consumed from key vault.
:vartype protected_settings_from_key_vault: any
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
'publisher': {'key': 'properties.publisher', 'type': 'str'},
'type_properties_type': {'key': 'properties.type', 'type': 'str'},
'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'},
'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'},
'enable_automatic_upgrade': {'key': 'properties.enableAutomaticUpgrade', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': 'object'},
'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'provision_after_extensions': {'key': 'properties.provisionAfterExtensions', 'type': '[str]'},
'suppress_failures': {'key': 'properties.suppressFailures', 'type': 'bool'},
'protected_settings_from_key_vault': {'key': 'properties.protectedSettingsFromKeyVault', 'type': 'object'},
}
def __init__(
self,
*,
name: Optional[str] = None,
force_update_tag: Optional[str] = None,
publisher: Optional[str] = None,
type_properties_type: Optional[str] = None,
type_handler_version: Optional[str] = None,
auto_upgrade_minor_version: Optional[bool] = None,
enable_automatic_upgrade: Optional[bool] = None,
settings: Optional[Any] = None,
protected_settings: Optional[Any] = None,
provision_after_extensions: Optional[List[str]] = None,
suppress_failures: Optional[bool] = None,
protected_settings_from_key_vault: Optional[Any] = None,
**kwargs
):
"""
:keyword name: The name of the extension.
:paramtype name: str
:keyword force_update_tag: If a value is provided and is different from the previous value, the
extension handler will be forced to update even if the extension configuration has not changed.
:paramtype force_update_tag: str
:keyword publisher: The name of the extension handler publisher.
:paramtype publisher: str
:keyword type_properties_type: Specifies the type of the extension; an example is
"CustomScriptExtension".
:paramtype type_properties_type: str
:keyword type_handler_version: Specifies the version of the script handler.
:paramtype type_handler_version: str
:keyword auto_upgrade_minor_version: Indicates whether the extension should use a newer minor
version if one is available at deployment time. Once deployed, however, the extension will not
upgrade minor versions unless redeployed, even with this property set to true.
:paramtype auto_upgrade_minor_version: bool
:keyword enable_automatic_upgrade: Indicates whether the extension should be automatically
upgraded by the platform if there is a newer version of the extension available.
:paramtype enable_automatic_upgrade: bool
:keyword settings: Json formatted public settings for the extension.
:paramtype settings: any
:keyword protected_settings: The extension can contain either protectedSettings or
protectedSettingsFromKeyVault or no protected settings at all.
:paramtype protected_settings: any
:keyword provision_after_extensions: Collection of extension names after which this extension
needs to be provisioned.
:paramtype provision_after_extensions: list[str]
:keyword suppress_failures: Indicates whether failures stemming from the extension will be
suppressed (Operational failures such as not connecting to the VM will not be suppressed
regardless of this value). The default is false.
:paramtype suppress_failures: bool
:keyword protected_settings_from_key_vault: The extensions protected settings that are passed
by reference, and consumed from key vault.
:paramtype protected_settings_from_key_vault: any
"""
super(VirtualMachineScaleSetExtension, self).__init__(**kwargs)
self.name = name
self.type = None
self.force_update_tag = force_update_tag
self.publisher = publisher
self.type_properties_type = type_properties_type
self.type_handler_version = type_handler_version
self.auto_upgrade_minor_version = auto_upgrade_minor_version
self.enable_automatic_upgrade = enable_automatic_upgrade
self.settings = settings
self.protected_settings = protected_settings
self.provisioning_state = None
self.provision_after_extensions = provision_after_extensions
self.suppress_failures = suppress_failures
self.protected_settings_from_key_vault = protected_settings_from_key_vault
class VirtualMachineScaleSetExtensionListResult(msrest.serialization.Model):
"""The List VM scale set extension operation response.
All required parameters must be populated in order to send to Azure.
:ivar value: Required. The list of VM scale set extensions.
:vartype value: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetExtension]
:ivar next_link: The uri to fetch the next page of VM scale set extensions. Call ListNext()
with this to fetch the next page of VM scale set extensions.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineScaleSetExtension]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachineScaleSetExtension"],
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Required. The list of VM scale set extensions.
:paramtype value: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetExtension]
:keyword next_link: The uri to fetch the next page of VM scale set extensions. Call ListNext()
with this to fetch the next page of VM scale set extensions.
:paramtype next_link: str
"""
super(VirtualMachineScaleSetExtensionListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineScaleSetExtensionProfile(msrest.serialization.Model):
"""Describes a virtual machine scale set extension profile.
:ivar extensions: The virtual machine scale set child extension resources.
:vartype extensions:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetExtension]
:ivar extensions_time_budget: Specifies the time alloted for all extensions to start. The time
duration should be between 15 minutes and 120 minutes (inclusive) and should be specified in
ISO 8601 format. The default value is 90 minutes (PT1H30M). :code:`<br>`:code:`<br>` Minimum
api-version: 2020-06-01.
:vartype extensions_time_budget: str
"""
_attribute_map = {
'extensions': {'key': 'extensions', 'type': '[VirtualMachineScaleSetExtension]'},
'extensions_time_budget': {'key': 'extensionsTimeBudget', 'type': 'str'},
}
def __init__(
self,
*,
extensions: Optional[List["VirtualMachineScaleSetExtension"]] = None,
extensions_time_budget: Optional[str] = None,
**kwargs
):
"""
:keyword extensions: The virtual machine scale set child extension resources.
:paramtype extensions:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetExtension]
:keyword extensions_time_budget: Specifies the time alloted for all extensions to start. The
time duration should be between 15 minutes and 120 minutes (inclusive) and should be specified
in ISO 8601 format. The default value is 90 minutes (PT1H30M). :code:`<br>`:code:`<br>` Minimum
api-version: 2020-06-01.
:paramtype extensions_time_budget: str
"""
super(VirtualMachineScaleSetExtensionProfile, self).__init__(**kwargs)
self.extensions = extensions
self.extensions_time_budget = extensions_time_budget
class VirtualMachineScaleSetExtensionUpdate(SubResourceReadOnly):
"""Describes a Virtual Machine Scale Set Extension.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: The name of the extension.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar force_update_tag: If a value is provided and is different from the previous value, the
extension handler will be forced to update even if the extension configuration has not changed.
:vartype force_update_tag: str
:ivar publisher: The name of the extension handler publisher.
:vartype publisher: str
:ivar type_properties_type: Specifies the type of the extension; an example is
"CustomScriptExtension".
:vartype type_properties_type: str
:ivar type_handler_version: Specifies the version of the script handler.
:vartype type_handler_version: str
:ivar auto_upgrade_minor_version: Indicates whether the extension should use a newer minor
version if one is available at deployment time. Once deployed, however, the extension will not
upgrade minor versions unless redeployed, even with this property set to true.
:vartype auto_upgrade_minor_version: bool
:ivar enable_automatic_upgrade: Indicates whether the extension should be automatically
upgraded by the platform if there is a newer version of the extension available.
:vartype enable_automatic_upgrade: bool
:ivar settings: Json formatted public settings for the extension.
:vartype settings: any
:ivar protected_settings: The extension can contain either protectedSettings or
protectedSettingsFromKeyVault or no protected settings at all.
:vartype protected_settings: any
:ivar provisioning_state: The provisioning state, which only appears in the response.
:vartype provisioning_state: str
:ivar provision_after_extensions: Collection of extension names after which this extension
needs to be provisioned.
:vartype provision_after_extensions: list[str]
:ivar suppress_failures: Indicates whether failures stemming from the extension will be
suppressed (Operational failures such as not connecting to the VM will not be suppressed
regardless of this value). The default is false.
:vartype suppress_failures: bool
:ivar protected_settings_from_key_vault: The extensions protected settings that are passed by
reference, and consumed from key vault.
:vartype protected_settings_from_key_vault: any
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
'publisher': {'key': 'properties.publisher', 'type': 'str'},
'type_properties_type': {'key': 'properties.type', 'type': 'str'},
'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'},
'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'},
'enable_automatic_upgrade': {'key': 'properties.enableAutomaticUpgrade', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': 'object'},
'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'provision_after_extensions': {'key': 'properties.provisionAfterExtensions', 'type': '[str]'},
'suppress_failures': {'key': 'properties.suppressFailures', 'type': 'bool'},
'protected_settings_from_key_vault': {'key': 'properties.protectedSettingsFromKeyVault', 'type': 'object'},
}
def __init__(
self,
*,
force_update_tag: Optional[str] = None,
publisher: Optional[str] = None,
type_properties_type: Optional[str] = None,
type_handler_version: Optional[str] = None,
auto_upgrade_minor_version: Optional[bool] = None,
enable_automatic_upgrade: Optional[bool] = None,
settings: Optional[Any] = None,
protected_settings: Optional[Any] = None,
provision_after_extensions: Optional[List[str]] = None,
suppress_failures: Optional[bool] = None,
protected_settings_from_key_vault: Optional[Any] = None,
**kwargs
):
"""
:keyword force_update_tag: If a value is provided and is different from the previous value, the
extension handler will be forced to update even if the extension configuration has not changed.
:paramtype force_update_tag: str
:keyword publisher: The name of the extension handler publisher.
:paramtype publisher: str
:keyword type_properties_type: Specifies the type of the extension; an example is
"CustomScriptExtension".
:paramtype type_properties_type: str
:keyword type_handler_version: Specifies the version of the script handler.
:paramtype type_handler_version: str
:keyword auto_upgrade_minor_version: Indicates whether the extension should use a newer minor
version if one is available at deployment time. Once deployed, however, the extension will not
upgrade minor versions unless redeployed, even with this property set to true.
:paramtype auto_upgrade_minor_version: bool
:keyword enable_automatic_upgrade: Indicates whether the extension should be automatically
upgraded by the platform if there is a newer version of the extension available.
:paramtype enable_automatic_upgrade: bool
:keyword settings: Json formatted public settings for the extension.
:paramtype settings: any
:keyword protected_settings: The extension can contain either protectedSettings or
protectedSettingsFromKeyVault or no protected settings at all.
:paramtype protected_settings: any
:keyword provision_after_extensions: Collection of extension names after which this extension
needs to be provisioned.
:paramtype provision_after_extensions: list[str]
:keyword suppress_failures: Indicates whether failures stemming from the extension will be
suppressed (Operational failures such as not connecting to the VM will not be suppressed
regardless of this value). The default is false.
:paramtype suppress_failures: bool
:keyword protected_settings_from_key_vault: The extensions protected settings that are passed
by reference, and consumed from key vault.
:paramtype protected_settings_from_key_vault: any
"""
super(VirtualMachineScaleSetExtensionUpdate, self).__init__(**kwargs)
self.name = None
self.type = None
self.force_update_tag = force_update_tag
self.publisher = publisher
self.type_properties_type = type_properties_type
self.type_handler_version = type_handler_version
self.auto_upgrade_minor_version = auto_upgrade_minor_version
self.enable_automatic_upgrade = enable_automatic_upgrade
self.settings = settings
self.protected_settings = protected_settings
self.provisioning_state = None
self.provision_after_extensions = provision_after_extensions
self.suppress_failures = suppress_failures
self.protected_settings_from_key_vault = protected_settings_from_key_vault
class VirtualMachineScaleSetHardwareProfile(msrest.serialization.Model):
"""Specifies the hardware settings for the virtual machine scale set.
:ivar vm_size_properties: Specifies the properties for customizing the size of the virtual
machine. Minimum api-version: 2022-03-01. :code:`<br>`:code:`<br>` Please follow the
instructions in `VM Customization <https://aka.ms/vmcustomization>`_ for more details.
:vartype vm_size_properties: ~azure.mgmt.compute.v2022_03_01.models.VMSizeProperties
"""
_attribute_map = {
'vm_size_properties': {'key': 'vmSizeProperties', 'type': 'VMSizeProperties'},
}
def __init__(
self,
*,
vm_size_properties: Optional["VMSizeProperties"] = None,
**kwargs
):
"""
:keyword vm_size_properties: Specifies the properties for customizing the size of the virtual
machine. Minimum api-version: 2022-03-01. :code:`<br>`:code:`<br>` Please follow the
instructions in `VM Customization <https://aka.ms/vmcustomization>`_ for more details.
:paramtype vm_size_properties: ~azure.mgmt.compute.v2022_03_01.models.VMSizeProperties
"""
super(VirtualMachineScaleSetHardwareProfile, self).__init__(**kwargs)
self.vm_size_properties = vm_size_properties
class VirtualMachineScaleSetIdentity(msrest.serialization.Model):
"""Identity for the virtual machine scale set.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of virtual machine scale set identity. This property will
only be provided for a system assigned identity.
:vartype principal_id: str
:ivar tenant_id: The tenant id associated with the virtual machine scale set. This property
will only be provided for a system assigned identity.
:vartype tenant_id: str
:ivar type: The type of identity used for the virtual machine scale set. The type
'SystemAssigned, UserAssigned' includes both an implicitly created identity and a set of user
assigned identities. The type 'None' will remove any identities from the virtual machine scale
set. Possible values include: "SystemAssigned", "UserAssigned", "SystemAssigned, UserAssigned",
"None".
:vartype type: str or ~azure.mgmt.compute.v2022_03_01.models.ResourceIdentityType
:ivar user_assigned_identities: The list of user identities associated with the virtual machine
scale set. The user identity dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
:vartype user_assigned_identities: dict[str,
~azure.mgmt.compute.v2022_03_01.models.UserAssignedIdentitiesValue]
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedIdentitiesValue}'},
}
def __init__(
self,
*,
type: Optional[Union[str, "ResourceIdentityType"]] = None,
user_assigned_identities: Optional[Dict[str, "UserAssignedIdentitiesValue"]] = None,
**kwargs
):
"""
:keyword type: The type of identity used for the virtual machine scale set. The type
'SystemAssigned, UserAssigned' includes both an implicitly created identity and a set of user
assigned identities. The type 'None' will remove any identities from the virtual machine scale
set. Possible values include: "SystemAssigned", "UserAssigned", "SystemAssigned, UserAssigned",
"None".
:paramtype type: str or ~azure.mgmt.compute.v2022_03_01.models.ResourceIdentityType
:keyword user_assigned_identities: The list of user identities associated with the virtual
machine scale set. The user identity dictionary key references will be ARM resource ids in the
form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
:paramtype user_assigned_identities: dict[str,
~azure.mgmt.compute.v2022_03_01.models.UserAssignedIdentitiesValue]
"""
super(VirtualMachineScaleSetIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
self.user_assigned_identities = user_assigned_identities
class VirtualMachineScaleSetInstanceView(msrest.serialization.Model):
"""The instance view of a virtual machine scale set.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar virtual_machine: The instance view status summary for the virtual machine scale set.
:vartype virtual_machine:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetInstanceViewStatusesSummary
:ivar extensions: The extensions information.
:vartype extensions:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetVMExtensionsSummary]
:ivar statuses: The resource status information.
:vartype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
:ivar orchestration_services: The orchestration services information.
:vartype orchestration_services:
list[~azure.mgmt.compute.v2022_03_01.models.OrchestrationServiceSummary]
"""
_validation = {
'virtual_machine': {'readonly': True},
'extensions': {'readonly': True},
'orchestration_services': {'readonly': True},
}
_attribute_map = {
'virtual_machine': {'key': 'virtualMachine', 'type': 'VirtualMachineScaleSetInstanceViewStatusesSummary'},
'extensions': {'key': 'extensions', 'type': '[VirtualMachineScaleSetVMExtensionsSummary]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
'orchestration_services': {'key': 'orchestrationServices', 'type': '[OrchestrationServiceSummary]'},
}
def __init__(
self,
*,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
"""
:keyword statuses: The resource status information.
:paramtype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
"""
super(VirtualMachineScaleSetInstanceView, self).__init__(**kwargs)
self.virtual_machine = None
self.extensions = None
self.statuses = statuses
self.orchestration_services = None
class VirtualMachineScaleSetInstanceViewStatusesSummary(msrest.serialization.Model):
"""Instance view statuses summary for virtual machines of a virtual machine scale set.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar statuses_summary: The extensions information.
:vartype statuses_summary:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineStatusCodeCount]
"""
_validation = {
'statuses_summary': {'readonly': True},
}
_attribute_map = {
'statuses_summary': {'key': 'statusesSummary', 'type': '[VirtualMachineStatusCodeCount]'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(VirtualMachineScaleSetInstanceViewStatusesSummary, self).__init__(**kwargs)
self.statuses_summary = None
class VirtualMachineScaleSetIPConfiguration(SubResource):
"""Describes a virtual machine scale set network profile's IP configuration.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Required. The IP configuration name.
:vartype name: str
:ivar subnet: Specifies the identifier of the subnet.
:vartype subnet: ~azure.mgmt.compute.v2022_03_01.models.ApiEntityReference
:ivar primary: Specifies the primary network interface in case the virtual machine has more
than 1 network interface.
:vartype primary: bool
:ivar public_ip_address_configuration: The publicIPAddressConfiguration.
:vartype public_ip_address_configuration:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetPublicIPAddressConfiguration
:ivar private_ip_address_version: Available from Api-Version 2017-03-30 onwards, it represents
whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible
values are: 'IPv4' and 'IPv6'. Possible values include: "IPv4", "IPv6".
:vartype private_ip_address_version: str or ~azure.mgmt.compute.v2022_03_01.models.IPVersion
:ivar application_gateway_backend_address_pools: Specifies an array of references to backend
address pools of application gateways. A scale set can reference backend address pools of
multiple application gateways. Multiple scale sets cannot use the same application gateway.
:vartype application_gateway_backend_address_pools:
list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
:ivar application_security_groups: Specifies an array of references to application security
group.
:vartype application_security_groups: list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
:ivar load_balancer_backend_address_pools: Specifies an array of references to backend address
pools of load balancers. A scale set can reference backend address pools of one public and one
internal load balancer. Multiple scale sets cannot use the same basic sku load balancer.
:vartype load_balancer_backend_address_pools:
list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
:ivar load_balancer_inbound_nat_pools: Specifies an array of references to inbound Nat pools of
the load balancers. A scale set can reference inbound nat pools of one public and one internal
load balancer. Multiple scale sets cannot use the same basic sku load balancer.
:vartype load_balancer_inbound_nat_pools:
list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'ApiEntityReference'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'public_ip_address_configuration': {'key': 'properties.publicIPAddressConfiguration', 'type': 'VirtualMachineScaleSetPublicIPAddressConfiguration'},
'private_ip_address_version': {'key': 'properties.privateIPAddressVersion', 'type': 'str'},
'application_gateway_backend_address_pools': {'key': 'properties.applicationGatewayBackendAddressPools', 'type': '[SubResource]'},
'application_security_groups': {'key': 'properties.applicationSecurityGroups', 'type': '[SubResource]'},
'load_balancer_backend_address_pools': {'key': 'properties.loadBalancerBackendAddressPools', 'type': '[SubResource]'},
'load_balancer_inbound_nat_pools': {'key': 'properties.loadBalancerInboundNatPools', 'type': '[SubResource]'},
}
def __init__(
self,
*,
name: str,
id: Optional[str] = None,
subnet: Optional["ApiEntityReference"] = None,
primary: Optional[bool] = None,
public_ip_address_configuration: Optional["VirtualMachineScaleSetPublicIPAddressConfiguration"] = None,
private_ip_address_version: Optional[Union[str, "IPVersion"]] = None,
application_gateway_backend_address_pools: Optional[List["SubResource"]] = None,
application_security_groups: Optional[List["SubResource"]] = None,
load_balancer_backend_address_pools: Optional[List["SubResource"]] = None,
load_balancer_inbound_nat_pools: Optional[List["SubResource"]] = None,
**kwargs
):
"""
:keyword id: Resource Id.
:paramtype id: str
:keyword name: Required. The IP configuration name.
:paramtype name: str
:keyword subnet: Specifies the identifier of the subnet.
:paramtype subnet: ~azure.mgmt.compute.v2022_03_01.models.ApiEntityReference
:keyword primary: Specifies the primary network interface in case the virtual machine has more
than 1 network interface.
:paramtype primary: bool
:keyword public_ip_address_configuration: The publicIPAddressConfiguration.
:paramtype public_ip_address_configuration:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetPublicIPAddressConfiguration
:keyword private_ip_address_version: Available from Api-Version 2017-03-30 onwards, it
represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4.
Possible values are: 'IPv4' and 'IPv6'. Possible values include: "IPv4", "IPv6".
:paramtype private_ip_address_version: str or ~azure.mgmt.compute.v2022_03_01.models.IPVersion
:keyword application_gateway_backend_address_pools: Specifies an array of references to backend
address pools of application gateways. A scale set can reference backend address pools of
multiple application gateways. Multiple scale sets cannot use the same application gateway.
:paramtype application_gateway_backend_address_pools:
list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
:keyword application_security_groups: Specifies an array of references to application security
group.
:paramtype application_security_groups:
list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
:keyword load_balancer_backend_address_pools: Specifies an array of references to backend
address pools of load balancers. A scale set can reference backend address pools of one public
and one internal load balancer. Multiple scale sets cannot use the same basic sku load
balancer.
:paramtype load_balancer_backend_address_pools:
list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
:keyword load_balancer_inbound_nat_pools: Specifies an array of references to inbound Nat pools
of the load balancers. A scale set can reference inbound nat pools of one public and one
internal load balancer. Multiple scale sets cannot use the same basic sku load balancer.
:paramtype load_balancer_inbound_nat_pools:
list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
"""
super(VirtualMachineScaleSetIPConfiguration, self).__init__(id=id, **kwargs)
self.name = name
self.subnet = subnet
self.primary = primary
self.public_ip_address_configuration = public_ip_address_configuration
self.private_ip_address_version = private_ip_address_version
self.application_gateway_backend_address_pools = application_gateway_backend_address_pools
self.application_security_groups = application_security_groups
self.load_balancer_backend_address_pools = load_balancer_backend_address_pools
self.load_balancer_inbound_nat_pools = load_balancer_inbound_nat_pools
class VirtualMachineScaleSetIpTag(msrest.serialization.Model):
"""Contains the IP tag associated with the public IP address.
:ivar ip_tag_type: IP tag type. Example: FirstPartyUsage.
:vartype ip_tag_type: str
:ivar tag: IP tag associated with the public IP. Example: SQL, Storage etc.
:vartype tag: str
"""
_attribute_map = {
'ip_tag_type': {'key': 'ipTagType', 'type': 'str'},
'tag': {'key': 'tag', 'type': 'str'},
}
def __init__(
self,
*,
ip_tag_type: Optional[str] = None,
tag: Optional[str] = None,
**kwargs
):
"""
:keyword ip_tag_type: IP tag type. Example: FirstPartyUsage.
:paramtype ip_tag_type: str
:keyword tag: IP tag associated with the public IP. Example: SQL, Storage etc.
:paramtype tag: str
"""
super(VirtualMachineScaleSetIpTag, self).__init__(**kwargs)
self.ip_tag_type = ip_tag_type
self.tag = tag
class VirtualMachineScaleSetListOSUpgradeHistory(msrest.serialization.Model):
"""List of Virtual Machine Scale Set OS Upgrade History operation response.
All required parameters must be populated in order to send to Azure.
:ivar value: Required. The list of OS upgrades performed on the virtual machine scale set.
:vartype value:
list[~azure.mgmt.compute.v2022_03_01.models.UpgradeOperationHistoricalStatusInfo]
:ivar next_link: The uri to fetch the next page of OS Upgrade History. Call ListNext() with
this to fetch the next page of history of upgrades.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[UpgradeOperationHistoricalStatusInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["UpgradeOperationHistoricalStatusInfo"],
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Required. The list of OS upgrades performed on the virtual machine scale set.
:paramtype value:
list[~azure.mgmt.compute.v2022_03_01.models.UpgradeOperationHistoricalStatusInfo]
:keyword next_link: The uri to fetch the next page of OS Upgrade History. Call ListNext() with
this to fetch the next page of history of upgrades.
:paramtype next_link: str
"""
super(VirtualMachineScaleSetListOSUpgradeHistory, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineScaleSetListResult(msrest.serialization.Model):
"""The List Virtual Machine operation response.
All required parameters must be populated in order to send to Azure.
:ivar value: Required. The list of virtual machine scale sets.
:vartype value: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSet]
:ivar next_link: The uri to fetch the next page of Virtual Machine Scale Sets. Call ListNext()
with this to fetch the next page of VMSS.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineScaleSet]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachineScaleSet"],
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Required. The list of virtual machine scale sets.
:paramtype value: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSet]
:keyword next_link: The uri to fetch the next page of Virtual Machine Scale Sets. Call
ListNext() with this to fetch the next page of VMSS.
:paramtype next_link: str
"""
super(VirtualMachineScaleSetListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineScaleSetListSkusResult(msrest.serialization.Model):
"""The Virtual Machine Scale Set List Skus operation response.
All required parameters must be populated in order to send to Azure.
:ivar value: Required. The list of skus available for the virtual machine scale set.
:vartype value: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetSku]
:ivar next_link: The uri to fetch the next page of Virtual Machine Scale Set Skus. Call
ListNext() with this to fetch the next page of VMSS Skus.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineScaleSetSku]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachineScaleSetSku"],
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Required. The list of skus available for the virtual machine scale set.
:paramtype value: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetSku]
:keyword next_link: The uri to fetch the next page of Virtual Machine Scale Set Skus. Call
ListNext() with this to fetch the next page of VMSS Skus.
:paramtype next_link: str
"""
super(VirtualMachineScaleSetListSkusResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineScaleSetListWithLinkResult(msrest.serialization.Model):
"""The List Virtual Machine operation response.
All required parameters must be populated in order to send to Azure.
:ivar value: Required. The list of virtual machine scale sets.
:vartype value: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSet]
:ivar next_link: The uri to fetch the next page of Virtual Machine Scale Sets. Call ListNext()
with this to fetch the next page of Virtual Machine Scale Sets.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineScaleSet]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachineScaleSet"],
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Required. The list of virtual machine scale sets.
:paramtype value: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSet]
:keyword next_link: The uri to fetch the next page of Virtual Machine Scale Sets. Call
ListNext() with this to fetch the next page of Virtual Machine Scale Sets.
:paramtype next_link: str
"""
super(VirtualMachineScaleSetListWithLinkResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineScaleSetManagedDiskParameters(msrest.serialization.Model):
"""Describes the parameters of a ScaleSet managed disk.
:ivar storage_account_type: Specifies the storage account type for the managed disk. NOTE:
UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values
include: "Standard_LRS", "Premium_LRS", "StandardSSD_LRS", "UltraSSD_LRS", "Premium_ZRS",
"StandardSSD_ZRS", "PremiumV2_LRS".
:vartype storage_account_type: str or
~azure.mgmt.compute.v2022_03_01.models.StorageAccountTypes
:ivar disk_encryption_set: Specifies the customer managed disk encryption set resource id for
the managed disk.
:vartype disk_encryption_set:
~azure.mgmt.compute.v2022_03_01.models.DiskEncryptionSetParameters
:ivar security_profile: Specifies the security profile for the managed disk.
:vartype security_profile: ~azure.mgmt.compute.v2022_03_01.models.VMDiskSecurityProfile
"""
_attribute_map = {
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
'disk_encryption_set': {'key': 'diskEncryptionSet', 'type': 'DiskEncryptionSetParameters'},
'security_profile': {'key': 'securityProfile', 'type': 'VMDiskSecurityProfile'},
}
def __init__(
self,
*,
storage_account_type: Optional[Union[str, "StorageAccountTypes"]] = None,
disk_encryption_set: Optional["DiskEncryptionSetParameters"] = None,
security_profile: Optional["VMDiskSecurityProfile"] = None,
**kwargs
):
"""
:keyword storage_account_type: Specifies the storage account type for the managed disk. NOTE:
UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values
include: "Standard_LRS", "Premium_LRS", "StandardSSD_LRS", "UltraSSD_LRS", "Premium_ZRS",
"StandardSSD_ZRS", "PremiumV2_LRS".
:paramtype storage_account_type: str or
~azure.mgmt.compute.v2022_03_01.models.StorageAccountTypes
:keyword disk_encryption_set: Specifies the customer managed disk encryption set resource id
for the managed disk.
:paramtype disk_encryption_set:
~azure.mgmt.compute.v2022_03_01.models.DiskEncryptionSetParameters
:keyword security_profile: Specifies the security profile for the managed disk.
:paramtype security_profile: ~azure.mgmt.compute.v2022_03_01.models.VMDiskSecurityProfile
"""
super(VirtualMachineScaleSetManagedDiskParameters, self).__init__(**kwargs)
self.storage_account_type = storage_account_type
self.disk_encryption_set = disk_encryption_set
self.security_profile = security_profile
class VirtualMachineScaleSetNetworkConfiguration(SubResource):
"""Describes a virtual machine scale set network profile's network configurations.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Required. The network configuration name.
:vartype name: str
:ivar primary: Specifies the primary network interface in case the virtual machine has more
than 1 network interface.
:vartype primary: bool
:ivar enable_accelerated_networking: Specifies whether the network interface is accelerated
networking-enabled.
:vartype enable_accelerated_networking: bool
:ivar enable_fpga: Specifies whether the network interface is FPGA networking-enabled.
:vartype enable_fpga: bool
:ivar network_security_group: The network security group.
:vartype network_security_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar dns_settings: The dns settings to be applied on the network interfaces.
:vartype dns_settings:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetNetworkConfigurationDnsSettings
:ivar ip_configurations: Specifies the IP configurations of the network interface.
:vartype ip_configurations:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetIPConfiguration]
:ivar enable_ip_forwarding: Whether IP forwarding enabled on this NIC.
:vartype enable_ip_forwarding: bool
:ivar delete_option: Specify what happens to the network interface when the VM is deleted.
Possible values include: "Delete", "Detach".
:vartype delete_option: str or ~azure.mgmt.compute.v2022_03_01.models.DeleteOptions
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'enable_accelerated_networking': {'key': 'properties.enableAcceleratedNetworking', 'type': 'bool'},
'enable_fpga': {'key': 'properties.enableFpga', 'type': 'bool'},
'network_security_group': {'key': 'properties.networkSecurityGroup', 'type': 'SubResource'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'VirtualMachineScaleSetNetworkConfigurationDnsSettings'},
'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[VirtualMachineScaleSetIPConfiguration]'},
'enable_ip_forwarding': {'key': 'properties.enableIPForwarding', 'type': 'bool'},
'delete_option': {'key': 'properties.deleteOption', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
id: Optional[str] = None,
primary: Optional[bool] = None,
enable_accelerated_networking: Optional[bool] = None,
enable_fpga: Optional[bool] = None,
network_security_group: Optional["SubResource"] = None,
dns_settings: Optional["VirtualMachineScaleSetNetworkConfigurationDnsSettings"] = None,
ip_configurations: Optional[List["VirtualMachineScaleSetIPConfiguration"]] = None,
enable_ip_forwarding: Optional[bool] = None,
delete_option: Optional[Union[str, "DeleteOptions"]] = None,
**kwargs
):
"""
:keyword id: Resource Id.
:paramtype id: str
:keyword name: Required. The network configuration name.
:paramtype name: str
:keyword primary: Specifies the primary network interface in case the virtual machine has more
than 1 network interface.
:paramtype primary: bool
:keyword enable_accelerated_networking: Specifies whether the network interface is accelerated
networking-enabled.
:paramtype enable_accelerated_networking: bool
:keyword enable_fpga: Specifies whether the network interface is FPGA networking-enabled.
:paramtype enable_fpga: bool
:keyword network_security_group: The network security group.
:paramtype network_security_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword dns_settings: The dns settings to be applied on the network interfaces.
:paramtype dns_settings:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetNetworkConfigurationDnsSettings
:keyword ip_configurations: Specifies the IP configurations of the network interface.
:paramtype ip_configurations:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetIPConfiguration]
:keyword enable_ip_forwarding: Whether IP forwarding enabled on this NIC.
:paramtype enable_ip_forwarding: bool
:keyword delete_option: Specify what happens to the network interface when the VM is deleted.
Possible values include: "Delete", "Detach".
:paramtype delete_option: str or ~azure.mgmt.compute.v2022_03_01.models.DeleteOptions
"""
super(VirtualMachineScaleSetNetworkConfiguration, self).__init__(id=id, **kwargs)
self.name = name
self.primary = primary
self.enable_accelerated_networking = enable_accelerated_networking
self.enable_fpga = enable_fpga
self.network_security_group = network_security_group
self.dns_settings = dns_settings
self.ip_configurations = ip_configurations
self.enable_ip_forwarding = enable_ip_forwarding
self.delete_option = delete_option
class VirtualMachineScaleSetNetworkConfigurationDnsSettings(msrest.serialization.Model):
"""Describes a virtual machines scale sets network configuration's DNS settings.
:ivar dns_servers: List of DNS servers IP addresses.
:vartype dns_servers: list[str]
"""
_attribute_map = {
'dns_servers': {'key': 'dnsServers', 'type': '[str]'},
}
def __init__(
self,
*,
dns_servers: Optional[List[str]] = None,
**kwargs
):
"""
:keyword dns_servers: List of DNS servers IP addresses.
:paramtype dns_servers: list[str]
"""
super(VirtualMachineScaleSetNetworkConfigurationDnsSettings, self).__init__(**kwargs)
self.dns_servers = dns_servers
class VirtualMachineScaleSetNetworkProfile(msrest.serialization.Model):
"""Describes a virtual machine scale set network profile.
:ivar health_probe: A reference to a load balancer probe used to determine the health of an
instance in the virtual machine scale set. The reference will be in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}'.
:vartype health_probe: ~azure.mgmt.compute.v2022_03_01.models.ApiEntityReference
:ivar network_interface_configurations: The list of network configurations.
:vartype network_interface_configurations:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetNetworkConfiguration]
:ivar network_api_version: specifies the Microsoft.Network API version used when creating
networking resources in the Network Interface Configurations for Virtual Machine Scale Set with
orchestration mode 'Flexible'. Possible values include: "2020-11-01".
:vartype network_api_version: str or ~azure.mgmt.compute.v2022_03_01.models.NetworkApiVersion
"""
_attribute_map = {
'health_probe': {'key': 'healthProbe', 'type': 'ApiEntityReference'},
'network_interface_configurations': {'key': 'networkInterfaceConfigurations', 'type': '[VirtualMachineScaleSetNetworkConfiguration]'},
'network_api_version': {'key': 'networkApiVersion', 'type': 'str'},
}
def __init__(
self,
*,
health_probe: Optional["ApiEntityReference"] = None,
network_interface_configurations: Optional[List["VirtualMachineScaleSetNetworkConfiguration"]] = None,
network_api_version: Optional[Union[str, "NetworkApiVersion"]] = None,
**kwargs
):
"""
:keyword health_probe: A reference to a load balancer probe used to determine the health of an
instance in the virtual machine scale set. The reference will be in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}'.
:paramtype health_probe: ~azure.mgmt.compute.v2022_03_01.models.ApiEntityReference
:keyword network_interface_configurations: The list of network configurations.
:paramtype network_interface_configurations:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetNetworkConfiguration]
:keyword network_api_version: specifies the Microsoft.Network API version used when creating
networking resources in the Network Interface Configurations for Virtual Machine Scale Set with
orchestration mode 'Flexible'. Possible values include: "2020-11-01".
:paramtype network_api_version: str or ~azure.mgmt.compute.v2022_03_01.models.NetworkApiVersion
"""
super(VirtualMachineScaleSetNetworkProfile, self).__init__(**kwargs)
self.health_probe = health_probe
self.network_interface_configurations = network_interface_configurations
self.network_api_version = network_api_version
class VirtualMachineScaleSetOSDisk(msrest.serialization.Model):
"""Describes a virtual machine scale set operating system disk.
All required parameters must be populated in order to send to Azure.
:ivar name: The disk name.
:vartype name: str
:ivar caching: Specifies the caching requirements. :code:`<br>`:code:`<br>` Possible values
are: :code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **ReadOnly**
:code:`<br>`:code:`<br>` **ReadWrite** :code:`<br>`:code:`<br>` Default: **None for Standard
storage. ReadOnly for Premium storage**. Possible values include: "None", "ReadOnly",
"ReadWrite".
:vartype caching: str or ~azure.mgmt.compute.v2022_03_01.models.CachingTypes
:ivar write_accelerator_enabled: Specifies whether writeAccelerator should be enabled or
disabled on the disk.
:vartype write_accelerator_enabled: bool
:ivar create_option: Required. Specifies how the virtual machines in the scale set should be
created.:code:`<br>`:code:`<br>` The only allowed value is: **FromImage** \u2013 This value is
used when you are using an image to create the virtual machine. If you are using a platform
image, you also use the imageReference element described above. If you are using a marketplace
image, you also use the plan element previously described. Possible values include:
"FromImage", "Empty", "Attach".
:vartype create_option: str or ~azure.mgmt.compute.v2022_03_01.models.DiskCreateOptionTypes
:ivar diff_disk_settings: Specifies the ephemeral disk Settings for the operating system disk
used by the virtual machine scale set.
:vartype diff_disk_settings: ~azure.mgmt.compute.v2022_03_01.models.DiffDiskSettings
:ivar disk_size_gb: Specifies the size of the operating system disk in gigabytes. This element
can be used to overwrite the size of the disk in a virtual machine image.
:code:`<br>`:code:`<br>` This value cannot be larger than 1023 GB.
:vartype disk_size_gb: int
:ivar os_type: This property allows you to specify the type of the OS that is included in the
disk if creating a VM from user-image or a specialized VHD. :code:`<br>`:code:`<br>` Possible
values are: :code:`<br>`:code:`<br>` **Windows** :code:`<br>`:code:`<br>` **Linux**. Possible
values include: "Windows", "Linux".
:vartype os_type: str or ~azure.mgmt.compute.v2022_03_01.models.OperatingSystemTypes
:ivar image: Specifies information about the unmanaged user image to base the scale set on.
:vartype image: ~azure.mgmt.compute.v2022_03_01.models.VirtualHardDisk
:ivar vhd_containers: Specifies the container urls that are used to store operating system
disks for the scale set.
:vartype vhd_containers: list[str]
:ivar managed_disk: The managed disk parameters.
:vartype managed_disk:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetManagedDiskParameters
:ivar delete_option: Specifies whether OS Disk should be deleted or detached upon VMSS Flex
deletion (This feature is available for VMSS with Flexible OrchestrationMode only).
:code:`<br>`:code:`<br>` Possible values: :code:`<br>`:code:`<br>` **Delete** If this value is
used, the OS disk is deleted when VMSS Flex VM is deleted.:code:`<br>`:code:`<br>` **Detach**
If this value is used, the OS disk is retained after VMSS Flex VM is deleted.
:code:`<br>`:code:`<br>` The default value is set to **Delete**. For an Ephemeral OS Disk, the
default value is set to **Delete**. User cannot change the delete option for Ephemeral OS Disk.
Possible values include: "Delete", "Detach".
:vartype delete_option: str or ~azure.mgmt.compute.v2022_03_01.models.DiskDeleteOptionTypes
"""
_validation = {
'create_option': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'str'},
'write_accelerator_enabled': {'key': 'writeAcceleratorEnabled', 'type': 'bool'},
'create_option': {'key': 'createOption', 'type': 'str'},
'diff_disk_settings': {'key': 'diffDiskSettings', 'type': 'DiffDiskSettings'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'os_type': {'key': 'osType', 'type': 'str'},
'image': {'key': 'image', 'type': 'VirtualHardDisk'},
'vhd_containers': {'key': 'vhdContainers', 'type': '[str]'},
'managed_disk': {'key': 'managedDisk', 'type': 'VirtualMachineScaleSetManagedDiskParameters'},
'delete_option': {'key': 'deleteOption', 'type': 'str'},
}
def __init__(
self,
*,
create_option: Union[str, "DiskCreateOptionTypes"],
name: Optional[str] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
write_accelerator_enabled: Optional[bool] = None,
diff_disk_settings: Optional["DiffDiskSettings"] = None,
disk_size_gb: Optional[int] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
image: Optional["VirtualHardDisk"] = None,
vhd_containers: Optional[List[str]] = None,
managed_disk: Optional["VirtualMachineScaleSetManagedDiskParameters"] = None,
delete_option: Optional[Union[str, "DiskDeleteOptionTypes"]] = None,
**kwargs
):
"""
:keyword name: The disk name.
:paramtype name: str
:keyword caching: Specifies the caching requirements. :code:`<br>`:code:`<br>` Possible values
are: :code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **ReadOnly**
:code:`<br>`:code:`<br>` **ReadWrite** :code:`<br>`:code:`<br>` Default: **None for Standard
storage. ReadOnly for Premium storage**. Possible values include: "None", "ReadOnly",
"ReadWrite".
:paramtype caching: str or ~azure.mgmt.compute.v2022_03_01.models.CachingTypes
:keyword write_accelerator_enabled: Specifies whether writeAccelerator should be enabled or
disabled on the disk.
:paramtype write_accelerator_enabled: bool
:keyword create_option: Required. Specifies how the virtual machines in the scale set should be
created.:code:`<br>`:code:`<br>` The only allowed value is: **FromImage** \u2013 This value is
used when you are using an image to create the virtual machine. If you are using a platform
image, you also use the imageReference element described above. If you are using a marketplace
image, you also use the plan element previously described. Possible values include:
"FromImage", "Empty", "Attach".
:paramtype create_option: str or ~azure.mgmt.compute.v2022_03_01.models.DiskCreateOptionTypes
:keyword diff_disk_settings: Specifies the ephemeral disk Settings for the operating system
disk used by the virtual machine scale set.
:paramtype diff_disk_settings: ~azure.mgmt.compute.v2022_03_01.models.DiffDiskSettings
:keyword disk_size_gb: Specifies the size of the operating system disk in gigabytes. This
element can be used to overwrite the size of the disk in a virtual machine image.
:code:`<br>`:code:`<br>` This value cannot be larger than 1023 GB.
:paramtype disk_size_gb: int
:keyword os_type: This property allows you to specify the type of the OS that is included in
the disk if creating a VM from user-image or a specialized VHD. :code:`<br>`:code:`<br>`
Possible values are: :code:`<br>`:code:`<br>` **Windows** :code:`<br>`:code:`<br>` **Linux**.
Possible values include: "Windows", "Linux".
:paramtype os_type: str or ~azure.mgmt.compute.v2022_03_01.models.OperatingSystemTypes
:keyword image: Specifies information about the unmanaged user image to base the scale set on.
:paramtype image: ~azure.mgmt.compute.v2022_03_01.models.VirtualHardDisk
:keyword vhd_containers: Specifies the container urls that are used to store operating system
disks for the scale set.
:paramtype vhd_containers: list[str]
:keyword managed_disk: The managed disk parameters.
:paramtype managed_disk:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetManagedDiskParameters
:keyword delete_option: Specifies whether OS Disk should be deleted or detached upon VMSS Flex
deletion (This feature is available for VMSS with Flexible OrchestrationMode only).
:code:`<br>`:code:`<br>` Possible values: :code:`<br>`:code:`<br>` **Delete** If this value is
used, the OS disk is deleted when VMSS Flex VM is deleted.:code:`<br>`:code:`<br>` **Detach**
If this value is used, the OS disk is retained after VMSS Flex VM is deleted.
:code:`<br>`:code:`<br>` The default value is set to **Delete**. For an Ephemeral OS Disk, the
default value is set to **Delete**. User cannot change the delete option for Ephemeral OS Disk.
Possible values include: "Delete", "Detach".
:paramtype delete_option: str or ~azure.mgmt.compute.v2022_03_01.models.DiskDeleteOptionTypes
"""
super(VirtualMachineScaleSetOSDisk, self).__init__(**kwargs)
self.name = name
self.caching = caching
self.write_accelerator_enabled = write_accelerator_enabled
self.create_option = create_option
self.diff_disk_settings = diff_disk_settings
self.disk_size_gb = disk_size_gb
self.os_type = os_type
self.image = image
self.vhd_containers = vhd_containers
self.managed_disk = managed_disk
self.delete_option = delete_option
class VirtualMachineScaleSetOSProfile(msrest.serialization.Model):
"""Describes a virtual machine scale set OS profile.
:ivar computer_name_prefix: Specifies the computer name prefix for all of the virtual machines
in the scale set. Computer name prefixes must be 1 to 15 characters long.
:vartype computer_name_prefix: str
:ivar admin_username: Specifies the name of the administrator account. :code:`<br>`:code:`<br>`
**Windows-only restriction:** Cannot end in "." :code:`<br>`:code:`<br>` **Disallowed values:**
"administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1",
"123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest",
"john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2",
"test3", "user4", "user5". :code:`<br>`:code:`<br>` **Minimum-length (Linux):** 1 character
:code:`<br>`:code:`<br>` **Max-length (Linux):** 64 characters :code:`<br>`:code:`<br>`
**Max-length (Windows):** 20 characters.
:vartype admin_username: str
:ivar admin_password: Specifies the password of the administrator account.
:code:`<br>`:code:`<br>` **Minimum-length (Windows):** 8 characters :code:`<br>`:code:`<br>`
**Minimum-length (Linux):** 6 characters :code:`<br>`:code:`<br>` **Max-length (Windows):** 123
characters :code:`<br>`:code:`<br>` **Max-length (Linux):** 72 characters
:code:`<br>`:code:`<br>` **Complexity requirements:** 3 out of 4 conditions below need to be
fulfilled :code:`<br>` Has lower characters :code:`<br>`Has upper characters :code:`<br>` Has a
digit :code:`<br>` Has a special character (Regex match [\W_]) :code:`<br>`:code:`<br>`
**Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word",
"pass@word1", "Password!", "Password1", "Password22", "iloveyou!" :code:`<br>`:code:`<br>` For
resetting the password, see `How to reset the Remote Desktop service or its login password in a
Windows VM <https://docs.microsoft.com/troubleshoot/azure/virtual-machines/reset-rdp>`_
:code:`<br>`:code:`<br>` For resetting root password, see `Manage users, SSH, and check or
repair disks on Azure Linux VMs using the VMAccess Extension
<https://docs.microsoft.com/troubleshoot/azure/virtual-machines/troubleshoot-ssh-connection>`_.
:vartype admin_password: str
:ivar custom_data: Specifies a base-64 encoded string of custom data. The base-64 encoded
string is decoded to a binary array that is saved as a file on the Virtual Machine. The maximum
length of the binary array is 65535 bytes. :code:`<br>`:code:`<br>` For using cloud-init for
your VM, see `Using cloud-init to customize a Linux VM during creation
<https://docs.microsoft.com/azure/virtual-machines/linux/using-cloud-init>`_.
:vartype custom_data: str
:ivar windows_configuration: Specifies Windows operating system settings on the virtual
machine.
:vartype windows_configuration: ~azure.mgmt.compute.v2022_03_01.models.WindowsConfiguration
:ivar linux_configuration: Specifies the Linux operating system settings on the virtual
machine. :code:`<br>`:code:`<br>`For a list of supported Linux distributions, see `Linux on
Azure-Endorsed Distributions
<https://docs.microsoft.com/azure/virtual-machines/linux/endorsed-distros>`_.
:vartype linux_configuration: ~azure.mgmt.compute.v2022_03_01.models.LinuxConfiguration
:ivar secrets: Specifies set of certificates that should be installed onto the virtual machines
in the scale set. To install certificates on a virtual machine it is recommended to use the
`Azure Key Vault virtual machine extension for Linux
<https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-linux>`_ or the `Azure
Key Vault virtual machine extension for Windows
<https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-windows>`_.
:vartype secrets: list[~azure.mgmt.compute.v2022_03_01.models.VaultSecretGroup]
:ivar allow_extension_operations: Specifies whether extension operations should be allowed on
the virtual machine scale set. :code:`<br>`:code:`<br>`This may only be set to False when no
extensions are present on the virtual machine scale set.
:vartype allow_extension_operations: bool
"""
_attribute_map = {
'computer_name_prefix': {'key': 'computerNamePrefix', 'type': 'str'},
'admin_username': {'key': 'adminUsername', 'type': 'str'},
'admin_password': {'key': 'adminPassword', 'type': 'str'},
'custom_data': {'key': 'customData', 'type': 'str'},
'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'},
'linux_configuration': {'key': 'linuxConfiguration', 'type': 'LinuxConfiguration'},
'secrets': {'key': 'secrets', 'type': '[VaultSecretGroup]'},
'allow_extension_operations': {'key': 'allowExtensionOperations', 'type': 'bool'},
}
def __init__(
self,
*,
computer_name_prefix: Optional[str] = None,
admin_username: Optional[str] = None,
admin_password: Optional[str] = None,
custom_data: Optional[str] = None,
windows_configuration: Optional["WindowsConfiguration"] = None,
linux_configuration: Optional["LinuxConfiguration"] = None,
secrets: Optional[List["VaultSecretGroup"]] = None,
allow_extension_operations: Optional[bool] = None,
**kwargs
):
"""
:keyword computer_name_prefix: Specifies the computer name prefix for all of the virtual
machines in the scale set. Computer name prefixes must be 1 to 15 characters long.
:paramtype computer_name_prefix: str
:keyword admin_username: Specifies the name of the administrator account.
:code:`<br>`:code:`<br>` **Windows-only restriction:** Cannot end in "."
:code:`<br>`:code:`<br>` **Disallowed values:** "administrator", "admin", "user", "user1",
"test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2",
"aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql",
"support", "support_388945a0", "sys", "test2", "test3", "user4", "user5".
:code:`<br>`:code:`<br>` **Minimum-length (Linux):** 1 character :code:`<br>`:code:`<br>`
**Max-length (Linux):** 64 characters :code:`<br>`:code:`<br>` **Max-length (Windows):** 20
characters.
:paramtype admin_username: str
:keyword admin_password: Specifies the password of the administrator account.
:code:`<br>`:code:`<br>` **Minimum-length (Windows):** 8 characters :code:`<br>`:code:`<br>`
**Minimum-length (Linux):** 6 characters :code:`<br>`:code:`<br>` **Max-length (Windows):** 123
characters :code:`<br>`:code:`<br>` **Max-length (Linux):** 72 characters
:code:`<br>`:code:`<br>` **Complexity requirements:** 3 out of 4 conditions below need to be
fulfilled :code:`<br>` Has lower characters :code:`<br>`Has upper characters :code:`<br>` Has a
digit :code:`<br>` Has a special character (Regex match [\W_]) :code:`<br>`:code:`<br>`
**Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word",
"pass@word1", "Password!", "Password1", "Password22", "iloveyou!" :code:`<br>`:code:`<br>` For
resetting the password, see `How to reset the Remote Desktop service or its login password in a
Windows VM <https://docs.microsoft.com/troubleshoot/azure/virtual-machines/reset-rdp>`_
:code:`<br>`:code:`<br>` For resetting root password, see `Manage users, SSH, and check or
repair disks on Azure Linux VMs using the VMAccess Extension
<https://docs.microsoft.com/troubleshoot/azure/virtual-machines/troubleshoot-ssh-connection>`_.
:paramtype admin_password: str
:keyword custom_data: Specifies a base-64 encoded string of custom data. The base-64 encoded
string is decoded to a binary array that is saved as a file on the Virtual Machine. The maximum
length of the binary array is 65535 bytes. :code:`<br>`:code:`<br>` For using cloud-init for
your VM, see `Using cloud-init to customize a Linux VM during creation
<https://docs.microsoft.com/azure/virtual-machines/linux/using-cloud-init>`_.
:paramtype custom_data: str
:keyword windows_configuration: Specifies Windows operating system settings on the virtual
machine.
:paramtype windows_configuration: ~azure.mgmt.compute.v2022_03_01.models.WindowsConfiguration
:keyword linux_configuration: Specifies the Linux operating system settings on the virtual
machine. :code:`<br>`:code:`<br>`For a list of supported Linux distributions, see `Linux on
Azure-Endorsed Distributions
<https://docs.microsoft.com/azure/virtual-machines/linux/endorsed-distros>`_.
:paramtype linux_configuration: ~azure.mgmt.compute.v2022_03_01.models.LinuxConfiguration
:keyword secrets: Specifies set of certificates that should be installed onto the virtual
machines in the scale set. To install certificates on a virtual machine it is recommended to
use the `Azure Key Vault virtual machine extension for Linux
<https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-linux>`_ or the `Azure
Key Vault virtual machine extension for Windows
<https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-windows>`_.
:paramtype secrets: list[~azure.mgmt.compute.v2022_03_01.models.VaultSecretGroup]
:keyword allow_extension_operations: Specifies whether extension operations should be allowed
on the virtual machine scale set. :code:`<br>`:code:`<br>`This may only be set to False when no
extensions are present on the virtual machine scale set.
:paramtype allow_extension_operations: bool
"""
super(VirtualMachineScaleSetOSProfile, self).__init__(**kwargs)
self.computer_name_prefix = computer_name_prefix
self.admin_username = admin_username
self.admin_password = admin_password
self.custom_data = custom_data
self.windows_configuration = windows_configuration
self.linux_configuration = linux_configuration
self.secrets = secrets
self.allow_extension_operations = allow_extension_operations
class VirtualMachineScaleSetPublicIPAddressConfiguration(msrest.serialization.Model):
"""Describes a virtual machines scale set IP Configuration's PublicIPAddress configuration.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The publicIP address configuration name.
:vartype name: str
:ivar sku: Describes the public IP Sku. It can only be set with OrchestrationMode as Flexible.
:vartype sku: ~azure.mgmt.compute.v2022_03_01.models.PublicIPAddressSku
:ivar idle_timeout_in_minutes: The idle timeout of the public IP address.
:vartype idle_timeout_in_minutes: int
:ivar dns_settings: The dns settings to be applied on the publicIP addresses .
:vartype dns_settings:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings
:ivar ip_tags: The list of IP tags associated with the public IP address.
:vartype ip_tags: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetIpTag]
:ivar public_ip_prefix: The PublicIPPrefix from which to allocate publicIP addresses.
:vartype public_ip_prefix: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar public_ip_address_version: Available from Api-Version 2019-07-01 onwards, it represents
whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values
are: 'IPv4' and 'IPv6'. Possible values include: "IPv4", "IPv6".
:vartype public_ip_address_version: str or ~azure.mgmt.compute.v2022_03_01.models.IPVersion
:ivar delete_option: Specify what happens to the public IP when the VM is deleted. Possible
values include: "Delete", "Detach".
:vartype delete_option: str or ~azure.mgmt.compute.v2022_03_01.models.DeleteOptions
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'PublicIPAddressSku'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings'},
'ip_tags': {'key': 'properties.ipTags', 'type': '[VirtualMachineScaleSetIpTag]'},
'public_ip_prefix': {'key': 'properties.publicIPPrefix', 'type': 'SubResource'},
'public_ip_address_version': {'key': 'properties.publicIPAddressVersion', 'type': 'str'},
'delete_option': {'key': 'properties.deleteOption', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
sku: Optional["PublicIPAddressSku"] = None,
idle_timeout_in_minutes: Optional[int] = None,
dns_settings: Optional["VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings"] = None,
ip_tags: Optional[List["VirtualMachineScaleSetIpTag"]] = None,
public_ip_prefix: Optional["SubResource"] = None,
public_ip_address_version: Optional[Union[str, "IPVersion"]] = None,
delete_option: Optional[Union[str, "DeleteOptions"]] = None,
**kwargs
):
"""
:keyword name: Required. The publicIP address configuration name.
:paramtype name: str
:keyword sku: Describes the public IP Sku. It can only be set with OrchestrationMode as
Flexible.
:paramtype sku: ~azure.mgmt.compute.v2022_03_01.models.PublicIPAddressSku
:keyword idle_timeout_in_minutes: The idle timeout of the public IP address.
:paramtype idle_timeout_in_minutes: int
:keyword dns_settings: The dns settings to be applied on the publicIP addresses .
:paramtype dns_settings:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings
:keyword ip_tags: The list of IP tags associated with the public IP address.
:paramtype ip_tags: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetIpTag]
:keyword public_ip_prefix: The PublicIPPrefix from which to allocate publicIP addresses.
:paramtype public_ip_prefix: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword public_ip_address_version: Available from Api-Version 2019-07-01 onwards, it
represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4.
Possible values are: 'IPv4' and 'IPv6'. Possible values include: "IPv4", "IPv6".
:paramtype public_ip_address_version: str or ~azure.mgmt.compute.v2022_03_01.models.IPVersion
:keyword delete_option: Specify what happens to the public IP when the VM is deleted. Possible
values include: "Delete", "Detach".
:paramtype delete_option: str or ~azure.mgmt.compute.v2022_03_01.models.DeleteOptions
"""
super(VirtualMachineScaleSetPublicIPAddressConfiguration, self).__init__(**kwargs)
self.name = name
self.sku = sku
self.idle_timeout_in_minutes = idle_timeout_in_minutes
self.dns_settings = dns_settings
self.ip_tags = ip_tags
self.public_ip_prefix = public_ip_prefix
self.public_ip_address_version = public_ip_address_version
self.delete_option = delete_option
class VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings(msrest.serialization.Model):
"""Describes a virtual machines scale sets network configuration's DNS settings.
All required parameters must be populated in order to send to Azure.
:ivar domain_name_label: Required. The Domain name label.The concatenation of the domain name
label and vm index will be the domain name labels of the PublicIPAddress resources that will be
created.
:vartype domain_name_label: str
"""
_validation = {
'domain_name_label': {'required': True},
}
_attribute_map = {
'domain_name_label': {'key': 'domainNameLabel', 'type': 'str'},
}
def __init__(
self,
*,
domain_name_label: str,
**kwargs
):
"""
:keyword domain_name_label: Required. The Domain name label.The concatenation of the domain
name label and vm index will be the domain name labels of the PublicIPAddress resources that
will be created.
:paramtype domain_name_label: str
"""
super(VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings, self).__init__(**kwargs)
self.domain_name_label = domain_name_label
class VirtualMachineScaleSetVMReimageParameters(VirtualMachineReimageParameters):
"""Describes a Virtual Machine Scale Set VM Reimage Parameters.
:ivar temp_disk: Specifies whether to reimage temp disk. Default value: false. Note: This temp
disk reimage parameter is only supported for VM/VMSS with Ephemeral OS disk.
:vartype temp_disk: bool
"""
_attribute_map = {
'temp_disk': {'key': 'tempDisk', 'type': 'bool'},
}
def __init__(
self,
*,
temp_disk: Optional[bool] = None,
**kwargs
):
"""
:keyword temp_disk: Specifies whether to reimage temp disk. Default value: false. Note: This
temp disk reimage parameter is only supported for VM/VMSS with Ephemeral OS disk.
:paramtype temp_disk: bool
"""
super(VirtualMachineScaleSetVMReimageParameters, self).__init__(temp_disk=temp_disk, **kwargs)
class VirtualMachineScaleSetReimageParameters(VirtualMachineScaleSetVMReimageParameters):
"""Describes a Virtual Machine Scale Set VM Reimage Parameters.
:ivar temp_disk: Specifies whether to reimage temp disk. Default value: false. Note: This temp
disk reimage parameter is only supported for VM/VMSS with Ephemeral OS disk.
:vartype temp_disk: bool
:ivar instance_ids: The virtual machine scale set instance ids. Omitting the virtual machine
scale set instance ids will result in the operation being performed on all virtual machines in
the virtual machine scale set.
:vartype instance_ids: list[str]
"""
_attribute_map = {
'temp_disk': {'key': 'tempDisk', 'type': 'bool'},
'instance_ids': {'key': 'instanceIds', 'type': '[str]'},
}
def __init__(
self,
*,
temp_disk: Optional[bool] = None,
instance_ids: Optional[List[str]] = None,
**kwargs
):
"""
:keyword temp_disk: Specifies whether to reimage temp disk. Default value: false. Note: This
temp disk reimage parameter is only supported for VM/VMSS with Ephemeral OS disk.
:paramtype temp_disk: bool
:keyword instance_ids: The virtual machine scale set instance ids. Omitting the virtual machine
scale set instance ids will result in the operation being performed on all virtual machines in
the virtual machine scale set.
:paramtype instance_ids: list[str]
"""
super(VirtualMachineScaleSetReimageParameters, self).__init__(temp_disk=temp_disk, **kwargs)
self.instance_ids = instance_ids
class VirtualMachineScaleSetSku(msrest.serialization.Model):
"""Describes an available virtual machine scale set sku.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar resource_type: The type of resource the sku applies to.
:vartype resource_type: str
:ivar sku: The Sku.
:vartype sku: ~azure.mgmt.compute.v2022_03_01.models.Sku
:ivar capacity: Specifies the number of virtual machines in the scale set.
:vartype capacity: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetSkuCapacity
"""
_validation = {
'resource_type': {'readonly': True},
'sku': {'readonly': True},
'capacity': {'readonly': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'capacity': {'key': 'capacity', 'type': 'VirtualMachineScaleSetSkuCapacity'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(VirtualMachineScaleSetSku, self).__init__(**kwargs)
self.resource_type = None
self.sku = None
self.capacity = None
class VirtualMachineScaleSetSkuCapacity(msrest.serialization.Model):
"""Describes scaling information of a sku.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar minimum: The minimum capacity.
:vartype minimum: long
:ivar maximum: The maximum capacity that can be set.
:vartype maximum: long
:ivar default_capacity: The default capacity.
:vartype default_capacity: long
:ivar scale_type: The scale type applicable to the sku. Possible values include: "Automatic",
"None".
:vartype scale_type: str or
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetSkuScaleType
"""
_validation = {
'minimum': {'readonly': True},
'maximum': {'readonly': True},
'default_capacity': {'readonly': True},
'scale_type': {'readonly': True},
}
_attribute_map = {
'minimum': {'key': 'minimum', 'type': 'long'},
'maximum': {'key': 'maximum', 'type': 'long'},
'default_capacity': {'key': 'defaultCapacity', 'type': 'long'},
'scale_type': {'key': 'scaleType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(VirtualMachineScaleSetSkuCapacity, self).__init__(**kwargs)
self.minimum = None
self.maximum = None
self.default_capacity = None
self.scale_type = None
class VirtualMachineScaleSetStorageProfile(msrest.serialization.Model):
"""Describes a virtual machine scale set storage profile.
:ivar image_reference: Specifies information about the image to use. You can specify
information about platform images, marketplace images, or virtual machine images. This element
is required when you want to use a platform image, marketplace image, or virtual machine image,
but is not used in other creation operations.
:vartype image_reference: ~azure.mgmt.compute.v2022_03_01.models.ImageReference
:ivar os_disk: Specifies information about the operating system disk used by the virtual
machines in the scale set. :code:`<br>`:code:`<br>` For more information about disks, see
`About disks and VHDs for Azure virtual machines
<https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview>`_.
:vartype os_disk: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetOSDisk
:ivar data_disks: Specifies the parameters that are used to add data disks to the virtual
machines in the scale set. :code:`<br>`:code:`<br>` For more information about disks, see
`About disks and VHDs for Azure virtual machines
<https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview>`_.
:vartype data_disks:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetDataDisk]
"""
_attribute_map = {
'image_reference': {'key': 'imageReference', 'type': 'ImageReference'},
'os_disk': {'key': 'osDisk', 'type': 'VirtualMachineScaleSetOSDisk'},
'data_disks': {'key': 'dataDisks', 'type': '[VirtualMachineScaleSetDataDisk]'},
}
def __init__(
self,
*,
image_reference: Optional["ImageReference"] = None,
os_disk: Optional["VirtualMachineScaleSetOSDisk"] = None,
data_disks: Optional[List["VirtualMachineScaleSetDataDisk"]] = None,
**kwargs
):
"""
:keyword image_reference: Specifies information about the image to use. You can specify
information about platform images, marketplace images, or virtual machine images. This element
is required when you want to use a platform image, marketplace image, or virtual machine image,
but is not used in other creation operations.
:paramtype image_reference: ~azure.mgmt.compute.v2022_03_01.models.ImageReference
:keyword os_disk: Specifies information about the operating system disk used by the virtual
machines in the scale set. :code:`<br>`:code:`<br>` For more information about disks, see
`About disks and VHDs for Azure virtual machines
<https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview>`_.
:paramtype os_disk: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetOSDisk
:keyword data_disks: Specifies the parameters that are used to add data disks to the virtual
machines in the scale set. :code:`<br>`:code:`<br>` For more information about disks, see
`About disks and VHDs for Azure virtual machines
<https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview>`_.
:paramtype data_disks:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetDataDisk]
"""
super(VirtualMachineScaleSetStorageProfile, self).__init__(**kwargs)
self.image_reference = image_reference
self.os_disk = os_disk
self.data_disks = data_disks
class VirtualMachineScaleSetUpdate(UpdateResource):
"""Describes a Virtual Machine Scale Set.
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar sku: The virtual machine scale set sku.
:vartype sku: ~azure.mgmt.compute.v2022_03_01.models.Sku
:ivar plan: The purchase plan when deploying a virtual machine scale set from VM Marketplace
images.
:vartype plan: ~azure.mgmt.compute.v2022_03_01.models.Plan
:ivar identity: The identity of the virtual machine scale set, if configured.
:vartype identity: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetIdentity
:ivar upgrade_policy: The upgrade policy.
:vartype upgrade_policy: ~azure.mgmt.compute.v2022_03_01.models.UpgradePolicy
:ivar automatic_repairs_policy: Policy for automatic repairs.
:vartype automatic_repairs_policy:
~azure.mgmt.compute.v2022_03_01.models.AutomaticRepairsPolicy
:ivar virtual_machine_profile: The virtual machine profile.
:vartype virtual_machine_profile:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetUpdateVMProfile
:ivar overprovision: Specifies whether the Virtual Machine Scale Set should be overprovisioned.
:vartype overprovision: bool
:ivar do_not_run_extensions_on_overprovisioned_v_ms: When Overprovision is enabled, extensions
are launched only on the requested number of VMs which are finally kept. This property will
hence ensure that the extensions do not run on the extra overprovisioned VMs.
:vartype do_not_run_extensions_on_overprovisioned_v_ms: bool
:ivar single_placement_group: When true this limits the scale set to a single placement group,
of max size 100 virtual machines. NOTE: If singlePlacementGroup is true, it may be modified to
false. However, if singlePlacementGroup is false, it may not be modified to true.
:vartype single_placement_group: bool
:ivar additional_capabilities: Specifies additional capabilities enabled or disabled on the
Virtual Machines in the Virtual Machine Scale Set. For instance: whether the Virtual Machines
have the capability to support attaching managed data disks with UltraSSD_LRS storage account
type.
:vartype additional_capabilities: ~azure.mgmt.compute.v2022_03_01.models.AdditionalCapabilities
:ivar scale_in_policy: Specifies the policies applied when scaling in Virtual Machines in the
Virtual Machine Scale Set.
:vartype scale_in_policy: ~azure.mgmt.compute.v2022_03_01.models.ScaleInPolicy
:ivar proximity_placement_group: Specifies information about the proximity placement group that
the virtual machine scale set should be assigned to. :code:`<br>`:code:`<br>`Minimum
api-version: 2018-04-01.
:vartype proximity_placement_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'plan': {'key': 'plan', 'type': 'Plan'},
'identity': {'key': 'identity', 'type': 'VirtualMachineScaleSetIdentity'},
'upgrade_policy': {'key': 'properties.upgradePolicy', 'type': 'UpgradePolicy'},
'automatic_repairs_policy': {'key': 'properties.automaticRepairsPolicy', 'type': 'AutomaticRepairsPolicy'},
'virtual_machine_profile': {'key': 'properties.virtualMachineProfile', 'type': 'VirtualMachineScaleSetUpdateVMProfile'},
'overprovision': {'key': 'properties.overprovision', 'type': 'bool'},
'do_not_run_extensions_on_overprovisioned_v_ms': {'key': 'properties.doNotRunExtensionsOnOverprovisionedVMs', 'type': 'bool'},
'single_placement_group': {'key': 'properties.singlePlacementGroup', 'type': 'bool'},
'additional_capabilities': {'key': 'properties.additionalCapabilities', 'type': 'AdditionalCapabilities'},
'scale_in_policy': {'key': 'properties.scaleInPolicy', 'type': 'ScaleInPolicy'},
'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'SubResource'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
plan: Optional["Plan"] = None,
identity: Optional["VirtualMachineScaleSetIdentity"] = None,
upgrade_policy: Optional["UpgradePolicy"] = None,
automatic_repairs_policy: Optional["AutomaticRepairsPolicy"] = None,
virtual_machine_profile: Optional["VirtualMachineScaleSetUpdateVMProfile"] = None,
overprovision: Optional[bool] = None,
do_not_run_extensions_on_overprovisioned_v_ms: Optional[bool] = None,
single_placement_group: Optional[bool] = None,
additional_capabilities: Optional["AdditionalCapabilities"] = None,
scale_in_policy: Optional["ScaleInPolicy"] = None,
proximity_placement_group: Optional["SubResource"] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword sku: The virtual machine scale set sku.
:paramtype sku: ~azure.mgmt.compute.v2022_03_01.models.Sku
:keyword plan: The purchase plan when deploying a virtual machine scale set from VM Marketplace
images.
:paramtype plan: ~azure.mgmt.compute.v2022_03_01.models.Plan
:keyword identity: The identity of the virtual machine scale set, if configured.
:paramtype identity: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetIdentity
:keyword upgrade_policy: The upgrade policy.
:paramtype upgrade_policy: ~azure.mgmt.compute.v2022_03_01.models.UpgradePolicy
:keyword automatic_repairs_policy: Policy for automatic repairs.
:paramtype automatic_repairs_policy:
~azure.mgmt.compute.v2022_03_01.models.AutomaticRepairsPolicy
:keyword virtual_machine_profile: The virtual machine profile.
:paramtype virtual_machine_profile:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetUpdateVMProfile
:keyword overprovision: Specifies whether the Virtual Machine Scale Set should be
overprovisioned.
:paramtype overprovision: bool
:keyword do_not_run_extensions_on_overprovisioned_v_ms: When Overprovision is enabled,
extensions are launched only on the requested number of VMs which are finally kept. This
property will hence ensure that the extensions do not run on the extra overprovisioned VMs.
:paramtype do_not_run_extensions_on_overprovisioned_v_ms: bool
:keyword single_placement_group: When true this limits the scale set to a single placement
group, of max size 100 virtual machines. NOTE: If singlePlacementGroup is true, it may be
modified to false. However, if singlePlacementGroup is false, it may not be modified to true.
:paramtype single_placement_group: bool
:keyword additional_capabilities: Specifies additional capabilities enabled or disabled on the
Virtual Machines in the Virtual Machine Scale Set. For instance: whether the Virtual Machines
have the capability to support attaching managed data disks with UltraSSD_LRS storage account
type.
:paramtype additional_capabilities:
~azure.mgmt.compute.v2022_03_01.models.AdditionalCapabilities
:keyword scale_in_policy: Specifies the policies applied when scaling in Virtual Machines in
the Virtual Machine Scale Set.
:paramtype scale_in_policy: ~azure.mgmt.compute.v2022_03_01.models.ScaleInPolicy
:keyword proximity_placement_group: Specifies information about the proximity placement group
that the virtual machine scale set should be assigned to. :code:`<br>`:code:`<br>`Minimum
api-version: 2018-04-01.
:paramtype proximity_placement_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
"""
super(VirtualMachineScaleSetUpdate, self).__init__(tags=tags, **kwargs)
self.sku = sku
self.plan = plan
self.identity = identity
self.upgrade_policy = upgrade_policy
self.automatic_repairs_policy = automatic_repairs_policy
self.virtual_machine_profile = virtual_machine_profile
self.overprovision = overprovision
self.do_not_run_extensions_on_overprovisioned_v_ms = do_not_run_extensions_on_overprovisioned_v_ms
self.single_placement_group = single_placement_group
self.additional_capabilities = additional_capabilities
self.scale_in_policy = scale_in_policy
self.proximity_placement_group = proximity_placement_group
class VirtualMachineScaleSetUpdateIPConfiguration(SubResource):
"""Describes a virtual machine scale set network profile's IP configuration. NOTE: The subnet of a scale set may be modified as long as the original subnet and the new subnet are in the same virtual network.
:ivar id: Resource Id.
:vartype id: str
:ivar name: The IP configuration name.
:vartype name: str
:ivar subnet: The subnet.
:vartype subnet: ~azure.mgmt.compute.v2022_03_01.models.ApiEntityReference
:ivar primary: Specifies the primary IP Configuration in case the network interface has more
than one IP Configuration.
:vartype primary: bool
:ivar public_ip_address_configuration: The publicIPAddressConfiguration.
:vartype public_ip_address_configuration:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetUpdatePublicIPAddressConfiguration
:ivar private_ip_address_version: Available from Api-Version 2017-03-30 onwards, it represents
whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible
values are: 'IPv4' and 'IPv6'. Possible values include: "IPv4", "IPv6".
:vartype private_ip_address_version: str or ~azure.mgmt.compute.v2022_03_01.models.IPVersion
:ivar application_gateway_backend_address_pools: The application gateway backend address pools.
:vartype application_gateway_backend_address_pools:
list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
:ivar application_security_groups: Specifies an array of references to application security
group.
:vartype application_security_groups: list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
:ivar load_balancer_backend_address_pools: The load balancer backend address pools.
:vartype load_balancer_backend_address_pools:
list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
:ivar load_balancer_inbound_nat_pools: The load balancer inbound nat pools.
:vartype load_balancer_inbound_nat_pools:
list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'ApiEntityReference'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'public_ip_address_configuration': {'key': 'properties.publicIPAddressConfiguration', 'type': 'VirtualMachineScaleSetUpdatePublicIPAddressConfiguration'},
'private_ip_address_version': {'key': 'properties.privateIPAddressVersion', 'type': 'str'},
'application_gateway_backend_address_pools': {'key': 'properties.applicationGatewayBackendAddressPools', 'type': '[SubResource]'},
'application_security_groups': {'key': 'properties.applicationSecurityGroups', 'type': '[SubResource]'},
'load_balancer_backend_address_pools': {'key': 'properties.loadBalancerBackendAddressPools', 'type': '[SubResource]'},
'load_balancer_inbound_nat_pools': {'key': 'properties.loadBalancerInboundNatPools', 'type': '[SubResource]'},
}
def __init__(
self,
*,
id: Optional[str] = None,
name: Optional[str] = None,
subnet: Optional["ApiEntityReference"] = None,
primary: Optional[bool] = None,
public_ip_address_configuration: Optional["VirtualMachineScaleSetUpdatePublicIPAddressConfiguration"] = None,
private_ip_address_version: Optional[Union[str, "IPVersion"]] = None,
application_gateway_backend_address_pools: Optional[List["SubResource"]] = None,
application_security_groups: Optional[List["SubResource"]] = None,
load_balancer_backend_address_pools: Optional[List["SubResource"]] = None,
load_balancer_inbound_nat_pools: Optional[List["SubResource"]] = None,
**kwargs
):
"""
:keyword id: Resource Id.
:paramtype id: str
:keyword name: The IP configuration name.
:paramtype name: str
:keyword subnet: The subnet.
:paramtype subnet: ~azure.mgmt.compute.v2022_03_01.models.ApiEntityReference
:keyword primary: Specifies the primary IP Configuration in case the network interface has more
than one IP Configuration.
:paramtype primary: bool
:keyword public_ip_address_configuration: The publicIPAddressConfiguration.
:paramtype public_ip_address_configuration:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetUpdatePublicIPAddressConfiguration
:keyword private_ip_address_version: Available from Api-Version 2017-03-30 onwards, it
represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4.
Possible values are: 'IPv4' and 'IPv6'. Possible values include: "IPv4", "IPv6".
:paramtype private_ip_address_version: str or ~azure.mgmt.compute.v2022_03_01.models.IPVersion
:keyword application_gateway_backend_address_pools: The application gateway backend address
pools.
:paramtype application_gateway_backend_address_pools:
list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
:keyword application_security_groups: Specifies an array of references to application security
group.
:paramtype application_security_groups:
list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
:keyword load_balancer_backend_address_pools: The load balancer backend address pools.
:paramtype load_balancer_backend_address_pools:
list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
:keyword load_balancer_inbound_nat_pools: The load balancer inbound nat pools.
:paramtype load_balancer_inbound_nat_pools:
list[~azure.mgmt.compute.v2022_03_01.models.SubResource]
"""
super(VirtualMachineScaleSetUpdateIPConfiguration, self).__init__(id=id, **kwargs)
self.name = name
self.subnet = subnet
self.primary = primary
self.public_ip_address_configuration = public_ip_address_configuration
self.private_ip_address_version = private_ip_address_version
self.application_gateway_backend_address_pools = application_gateway_backend_address_pools
self.application_security_groups = application_security_groups
self.load_balancer_backend_address_pools = load_balancer_backend_address_pools
self.load_balancer_inbound_nat_pools = load_balancer_inbound_nat_pools
class VirtualMachineScaleSetUpdateNetworkConfiguration(SubResource):
"""Describes a virtual machine scale set network profile's network configurations.
:ivar id: Resource Id.
:vartype id: str
:ivar name: The network configuration name.
:vartype name: str
:ivar primary: Whether this is a primary NIC on a virtual machine.
:vartype primary: bool
:ivar enable_accelerated_networking: Specifies whether the network interface is accelerated
networking-enabled.
:vartype enable_accelerated_networking: bool
:ivar enable_fpga: Specifies whether the network interface is FPGA networking-enabled.
:vartype enable_fpga: bool
:ivar network_security_group: The network security group.
:vartype network_security_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar dns_settings: The dns settings to be applied on the network interfaces.
:vartype dns_settings:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetNetworkConfigurationDnsSettings
:ivar ip_configurations: The virtual machine scale set IP Configuration.
:vartype ip_configurations:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetUpdateIPConfiguration]
:ivar enable_ip_forwarding: Whether IP forwarding enabled on this NIC.
:vartype enable_ip_forwarding: bool
:ivar delete_option: Specify what happens to the network interface when the VM is deleted.
Possible values include: "Delete", "Detach".
:vartype delete_option: str or ~azure.mgmt.compute.v2022_03_01.models.DeleteOptions
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'enable_accelerated_networking': {'key': 'properties.enableAcceleratedNetworking', 'type': 'bool'},
'enable_fpga': {'key': 'properties.enableFpga', 'type': 'bool'},
'network_security_group': {'key': 'properties.networkSecurityGroup', 'type': 'SubResource'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'VirtualMachineScaleSetNetworkConfigurationDnsSettings'},
'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[VirtualMachineScaleSetUpdateIPConfiguration]'},
'enable_ip_forwarding': {'key': 'properties.enableIPForwarding', 'type': 'bool'},
'delete_option': {'key': 'properties.deleteOption', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
name: Optional[str] = None,
primary: Optional[bool] = None,
enable_accelerated_networking: Optional[bool] = None,
enable_fpga: Optional[bool] = None,
network_security_group: Optional["SubResource"] = None,
dns_settings: Optional["VirtualMachineScaleSetNetworkConfigurationDnsSettings"] = None,
ip_configurations: Optional[List["VirtualMachineScaleSetUpdateIPConfiguration"]] = None,
enable_ip_forwarding: Optional[bool] = None,
delete_option: Optional[Union[str, "DeleteOptions"]] = None,
**kwargs
):
"""
:keyword id: Resource Id.
:paramtype id: str
:keyword name: The network configuration name.
:paramtype name: str
:keyword primary: Whether this is a primary NIC on a virtual machine.
:paramtype primary: bool
:keyword enable_accelerated_networking: Specifies whether the network interface is accelerated
networking-enabled.
:paramtype enable_accelerated_networking: bool
:keyword enable_fpga: Specifies whether the network interface is FPGA networking-enabled.
:paramtype enable_fpga: bool
:keyword network_security_group: The network security group.
:paramtype network_security_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword dns_settings: The dns settings to be applied on the network interfaces.
:paramtype dns_settings:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetNetworkConfigurationDnsSettings
:keyword ip_configurations: The virtual machine scale set IP Configuration.
:paramtype ip_configurations:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetUpdateIPConfiguration]
:keyword enable_ip_forwarding: Whether IP forwarding enabled on this NIC.
:paramtype enable_ip_forwarding: bool
:keyword delete_option: Specify what happens to the network interface when the VM is deleted.
Possible values include: "Delete", "Detach".
:paramtype delete_option: str or ~azure.mgmt.compute.v2022_03_01.models.DeleteOptions
"""
super(VirtualMachineScaleSetUpdateNetworkConfiguration, self).__init__(id=id, **kwargs)
self.name = name
self.primary = primary
self.enable_accelerated_networking = enable_accelerated_networking
self.enable_fpga = enable_fpga
self.network_security_group = network_security_group
self.dns_settings = dns_settings
self.ip_configurations = ip_configurations
self.enable_ip_forwarding = enable_ip_forwarding
self.delete_option = delete_option
class VirtualMachineScaleSetUpdateNetworkProfile(msrest.serialization.Model):
"""Describes a virtual machine scale set network profile.
:ivar health_probe: A reference to a load balancer probe used to determine the health of an
instance in the virtual machine scale set. The reference will be in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}'.
:vartype health_probe: ~azure.mgmt.compute.v2022_03_01.models.ApiEntityReference
:ivar network_interface_configurations: The list of network configurations.
:vartype network_interface_configurations:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetUpdateNetworkConfiguration]
:ivar network_api_version: specifies the Microsoft.Network API version used when creating
networking resources in the Network Interface Configurations for Virtual Machine Scale Set with
orchestration mode 'Flexible'. Possible values include: "2020-11-01".
:vartype network_api_version: str or ~azure.mgmt.compute.v2022_03_01.models.NetworkApiVersion
"""
_attribute_map = {
'health_probe': {'key': 'healthProbe', 'type': 'ApiEntityReference'},
'network_interface_configurations': {'key': 'networkInterfaceConfigurations', 'type': '[VirtualMachineScaleSetUpdateNetworkConfiguration]'},
'network_api_version': {'key': 'networkApiVersion', 'type': 'str'},
}
def __init__(
self,
*,
health_probe: Optional["ApiEntityReference"] = None,
network_interface_configurations: Optional[List["VirtualMachineScaleSetUpdateNetworkConfiguration"]] = None,
network_api_version: Optional[Union[str, "NetworkApiVersion"]] = None,
**kwargs
):
"""
:keyword health_probe: A reference to a load balancer probe used to determine the health of an
instance in the virtual machine scale set. The reference will be in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}'.
:paramtype health_probe: ~azure.mgmt.compute.v2022_03_01.models.ApiEntityReference
:keyword network_interface_configurations: The list of network configurations.
:paramtype network_interface_configurations:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetUpdateNetworkConfiguration]
:keyword network_api_version: specifies the Microsoft.Network API version used when creating
networking resources in the Network Interface Configurations for Virtual Machine Scale Set with
orchestration mode 'Flexible'. Possible values include: "2020-11-01".
:paramtype network_api_version: str or ~azure.mgmt.compute.v2022_03_01.models.NetworkApiVersion
"""
super(VirtualMachineScaleSetUpdateNetworkProfile, self).__init__(**kwargs)
self.health_probe = health_probe
self.network_interface_configurations = network_interface_configurations
self.network_api_version = network_api_version
class VirtualMachineScaleSetUpdateOSDisk(msrest.serialization.Model):
"""Describes virtual machine scale set operating system disk Update Object. This should be used for Updating VMSS OS Disk.
:ivar caching: The caching type. Possible values include: "None", "ReadOnly", "ReadWrite".
:vartype caching: str or ~azure.mgmt.compute.v2022_03_01.models.CachingTypes
:ivar write_accelerator_enabled: Specifies whether writeAccelerator should be enabled or
disabled on the disk.
:vartype write_accelerator_enabled: bool
:ivar disk_size_gb: Specifies the size of the operating system disk in gigabytes. This element
can be used to overwrite the size of the disk in a virtual machine image.
:code:`<br>`:code:`<br>` This value cannot be larger than 1023 GB.
:vartype disk_size_gb: int
:ivar image: The Source User Image VirtualHardDisk. This VirtualHardDisk will be copied before
using it to attach to the Virtual Machine. If SourceImage is provided, the destination
VirtualHardDisk should not exist.
:vartype image: ~azure.mgmt.compute.v2022_03_01.models.VirtualHardDisk
:ivar vhd_containers: The list of virtual hard disk container uris.
:vartype vhd_containers: list[str]
:ivar managed_disk: The managed disk parameters.
:vartype managed_disk:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetManagedDiskParameters
:ivar delete_option: Specifies whether OS Disk should be deleted or detached upon VMSS Flex
deletion (This feature is available for VMSS with Flexible OrchestrationMode only).
:code:`<br>`:code:`<br>` Possible values: :code:`<br>`:code:`<br>` **Delete** If this value is
used, the OS disk is deleted when VMSS Flex VM is deleted.:code:`<br>`:code:`<br>` **Detach**
If this value is used, the OS disk is retained after VMSS Flex VM is deleted.
:code:`<br>`:code:`<br>` The default value is set to **Delete**. For an Ephemeral OS Disk, the
default value is set to **Delete**. User cannot change the delete option for Ephemeral OS Disk.
Possible values include: "Delete", "Detach".
:vartype delete_option: str or ~azure.mgmt.compute.v2022_03_01.models.DiskDeleteOptionTypes
"""
_attribute_map = {
'caching': {'key': 'caching', 'type': 'str'},
'write_accelerator_enabled': {'key': 'writeAcceleratorEnabled', 'type': 'bool'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'image': {'key': 'image', 'type': 'VirtualHardDisk'},
'vhd_containers': {'key': 'vhdContainers', 'type': '[str]'},
'managed_disk': {'key': 'managedDisk', 'type': 'VirtualMachineScaleSetManagedDiskParameters'},
'delete_option': {'key': 'deleteOption', 'type': 'str'},
}
def __init__(
self,
*,
caching: Optional[Union[str, "CachingTypes"]] = None,
write_accelerator_enabled: Optional[bool] = None,
disk_size_gb: Optional[int] = None,
image: Optional["VirtualHardDisk"] = None,
vhd_containers: Optional[List[str]] = None,
managed_disk: Optional["VirtualMachineScaleSetManagedDiskParameters"] = None,
delete_option: Optional[Union[str, "DiskDeleteOptionTypes"]] = None,
**kwargs
):
"""
:keyword caching: The caching type. Possible values include: "None", "ReadOnly", "ReadWrite".
:paramtype caching: str or ~azure.mgmt.compute.v2022_03_01.models.CachingTypes
:keyword write_accelerator_enabled: Specifies whether writeAccelerator should be enabled or
disabled on the disk.
:paramtype write_accelerator_enabled: bool
:keyword disk_size_gb: Specifies the size of the operating system disk in gigabytes. This
element can be used to overwrite the size of the disk in a virtual machine image.
:code:`<br>`:code:`<br>` This value cannot be larger than 1023 GB.
:paramtype disk_size_gb: int
:keyword image: The Source User Image VirtualHardDisk. This VirtualHardDisk will be copied
before using it to attach to the Virtual Machine. If SourceImage is provided, the destination
VirtualHardDisk should not exist.
:paramtype image: ~azure.mgmt.compute.v2022_03_01.models.VirtualHardDisk
:keyword vhd_containers: The list of virtual hard disk container uris.
:paramtype vhd_containers: list[str]
:keyword managed_disk: The managed disk parameters.
:paramtype managed_disk:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetManagedDiskParameters
:keyword delete_option: Specifies whether OS Disk should be deleted or detached upon VMSS Flex
deletion (This feature is available for VMSS with Flexible OrchestrationMode only).
:code:`<br>`:code:`<br>` Possible values: :code:`<br>`:code:`<br>` **Delete** If this value is
used, the OS disk is deleted when VMSS Flex VM is deleted.:code:`<br>`:code:`<br>` **Detach**
If this value is used, the OS disk is retained after VMSS Flex VM is deleted.
:code:`<br>`:code:`<br>` The default value is set to **Delete**. For an Ephemeral OS Disk, the
default value is set to **Delete**. User cannot change the delete option for Ephemeral OS Disk.
Possible values include: "Delete", "Detach".
:paramtype delete_option: str or ~azure.mgmt.compute.v2022_03_01.models.DiskDeleteOptionTypes
"""
super(VirtualMachineScaleSetUpdateOSDisk, self).__init__(**kwargs)
self.caching = caching
self.write_accelerator_enabled = write_accelerator_enabled
self.disk_size_gb = disk_size_gb
self.image = image
self.vhd_containers = vhd_containers
self.managed_disk = managed_disk
self.delete_option = delete_option
class VirtualMachineScaleSetUpdateOSProfile(msrest.serialization.Model):
"""Describes a virtual machine scale set OS profile.
:ivar custom_data: A base-64 encoded string of custom data.
:vartype custom_data: str
:ivar windows_configuration: The Windows Configuration of the OS profile.
:vartype windows_configuration: ~azure.mgmt.compute.v2022_03_01.models.WindowsConfiguration
:ivar linux_configuration: The Linux Configuration of the OS profile.
:vartype linux_configuration: ~azure.mgmt.compute.v2022_03_01.models.LinuxConfiguration
:ivar secrets: The List of certificates for addition to the VM.
:vartype secrets: list[~azure.mgmt.compute.v2022_03_01.models.VaultSecretGroup]
"""
_attribute_map = {
'custom_data': {'key': 'customData', 'type': 'str'},
'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'},
'linux_configuration': {'key': 'linuxConfiguration', 'type': 'LinuxConfiguration'},
'secrets': {'key': 'secrets', 'type': '[VaultSecretGroup]'},
}
def __init__(
self,
*,
custom_data: Optional[str] = None,
windows_configuration: Optional["WindowsConfiguration"] = None,
linux_configuration: Optional["LinuxConfiguration"] = None,
secrets: Optional[List["VaultSecretGroup"]] = None,
**kwargs
):
"""
:keyword custom_data: A base-64 encoded string of custom data.
:paramtype custom_data: str
:keyword windows_configuration: The Windows Configuration of the OS profile.
:paramtype windows_configuration: ~azure.mgmt.compute.v2022_03_01.models.WindowsConfiguration
:keyword linux_configuration: The Linux Configuration of the OS profile.
:paramtype linux_configuration: ~azure.mgmt.compute.v2022_03_01.models.LinuxConfiguration
:keyword secrets: The List of certificates for addition to the VM.
:paramtype secrets: list[~azure.mgmt.compute.v2022_03_01.models.VaultSecretGroup]
"""
super(VirtualMachineScaleSetUpdateOSProfile, self).__init__(**kwargs)
self.custom_data = custom_data
self.windows_configuration = windows_configuration
self.linux_configuration = linux_configuration
self.secrets = secrets
class VirtualMachineScaleSetUpdatePublicIPAddressConfiguration(msrest.serialization.Model):
"""Describes a virtual machines scale set IP Configuration's PublicIPAddress configuration.
:ivar name: The publicIP address configuration name.
:vartype name: str
:ivar idle_timeout_in_minutes: The idle timeout of the public IP address.
:vartype idle_timeout_in_minutes: int
:ivar dns_settings: The dns settings to be applied on the publicIP addresses .
:vartype dns_settings:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings
:ivar public_ip_prefix: The PublicIPPrefix from which to allocate publicIP addresses.
:vartype public_ip_prefix: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar delete_option: Specify what happens to the public IP when the VM is deleted. Possible
values include: "Delete", "Detach".
:vartype delete_option: str or ~azure.mgmt.compute.v2022_03_01.models.DeleteOptions
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings'},
'public_ip_prefix': {'key': 'properties.publicIPPrefix', 'type': 'SubResource'},
'delete_option': {'key': 'properties.deleteOption', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
idle_timeout_in_minutes: Optional[int] = None,
dns_settings: Optional["VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings"] = None,
public_ip_prefix: Optional["SubResource"] = None,
delete_option: Optional[Union[str, "DeleteOptions"]] = None,
**kwargs
):
"""
:keyword name: The publicIP address configuration name.
:paramtype name: str
:keyword idle_timeout_in_minutes: The idle timeout of the public IP address.
:paramtype idle_timeout_in_minutes: int
:keyword dns_settings: The dns settings to be applied on the publicIP addresses .
:paramtype dns_settings:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings
:keyword public_ip_prefix: The PublicIPPrefix from which to allocate publicIP addresses.
:paramtype public_ip_prefix: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword delete_option: Specify what happens to the public IP when the VM is deleted. Possible
values include: "Delete", "Detach".
:paramtype delete_option: str or ~azure.mgmt.compute.v2022_03_01.models.DeleteOptions
"""
super(VirtualMachineScaleSetUpdatePublicIPAddressConfiguration, self).__init__(**kwargs)
self.name = name
self.idle_timeout_in_minutes = idle_timeout_in_minutes
self.dns_settings = dns_settings
self.public_ip_prefix = public_ip_prefix
self.delete_option = delete_option
class VirtualMachineScaleSetUpdateStorageProfile(msrest.serialization.Model):
"""Describes a virtual machine scale set storage profile.
:ivar image_reference: The image reference.
:vartype image_reference: ~azure.mgmt.compute.v2022_03_01.models.ImageReference
:ivar os_disk: The OS disk.
:vartype os_disk: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetUpdateOSDisk
:ivar data_disks: The data disks.
:vartype data_disks:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetDataDisk]
"""
_attribute_map = {
'image_reference': {'key': 'imageReference', 'type': 'ImageReference'},
'os_disk': {'key': 'osDisk', 'type': 'VirtualMachineScaleSetUpdateOSDisk'},
'data_disks': {'key': 'dataDisks', 'type': '[VirtualMachineScaleSetDataDisk]'},
}
def __init__(
self,
*,
image_reference: Optional["ImageReference"] = None,
os_disk: Optional["VirtualMachineScaleSetUpdateOSDisk"] = None,
data_disks: Optional[List["VirtualMachineScaleSetDataDisk"]] = None,
**kwargs
):
"""
:keyword image_reference: The image reference.
:paramtype image_reference: ~azure.mgmt.compute.v2022_03_01.models.ImageReference
:keyword os_disk: The OS disk.
:paramtype os_disk: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetUpdateOSDisk
:keyword data_disks: The data disks.
:paramtype data_disks:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetDataDisk]
"""
super(VirtualMachineScaleSetUpdateStorageProfile, self).__init__(**kwargs)
self.image_reference = image_reference
self.os_disk = os_disk
self.data_disks = data_disks
class VirtualMachineScaleSetUpdateVMProfile(msrest.serialization.Model):
"""Describes a virtual machine scale set virtual machine profile.
:ivar os_profile: The virtual machine scale set OS profile.
:vartype os_profile:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetUpdateOSProfile
:ivar storage_profile: The virtual machine scale set storage profile.
:vartype storage_profile:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetUpdateStorageProfile
:ivar network_profile: The virtual machine scale set network profile.
:vartype network_profile:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetUpdateNetworkProfile
:ivar security_profile: The virtual machine scale set Security profile.
:vartype security_profile: ~azure.mgmt.compute.v2022_03_01.models.SecurityProfile
:ivar diagnostics_profile: The virtual machine scale set diagnostics profile.
:vartype diagnostics_profile: ~azure.mgmt.compute.v2022_03_01.models.DiagnosticsProfile
:ivar extension_profile: The virtual machine scale set extension profile.
:vartype extension_profile:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetExtensionProfile
:ivar license_type: The license type, which is for bring your own license scenario.
:vartype license_type: str
:ivar billing_profile: Specifies the billing related details of a Azure Spot VMSS.
:code:`<br>`:code:`<br>`Minimum api-version: 2019-03-01.
:vartype billing_profile: ~azure.mgmt.compute.v2022_03_01.models.BillingProfile
:ivar scheduled_events_profile: Specifies Scheduled Event related configurations.
:vartype scheduled_events_profile:
~azure.mgmt.compute.v2022_03_01.models.ScheduledEventsProfile
:ivar user_data: UserData for the VM, which must be base-64 encoded. Customer should not pass
any secrets in here. :code:`<br>`:code:`<br>`Minimum api-version: 2021-03-01.
:vartype user_data: str
"""
_attribute_map = {
'os_profile': {'key': 'osProfile', 'type': 'VirtualMachineScaleSetUpdateOSProfile'},
'storage_profile': {'key': 'storageProfile', 'type': 'VirtualMachineScaleSetUpdateStorageProfile'},
'network_profile': {'key': 'networkProfile', 'type': 'VirtualMachineScaleSetUpdateNetworkProfile'},
'security_profile': {'key': 'securityProfile', 'type': 'SecurityProfile'},
'diagnostics_profile': {'key': 'diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'extension_profile': {'key': 'extensionProfile', 'type': 'VirtualMachineScaleSetExtensionProfile'},
'license_type': {'key': 'licenseType', 'type': 'str'},
'billing_profile': {'key': 'billingProfile', 'type': 'BillingProfile'},
'scheduled_events_profile': {'key': 'scheduledEventsProfile', 'type': 'ScheduledEventsProfile'},
'user_data': {'key': 'userData', 'type': 'str'},
}
def __init__(
self,
*,
os_profile: Optional["VirtualMachineScaleSetUpdateOSProfile"] = None,
storage_profile: Optional["VirtualMachineScaleSetUpdateStorageProfile"] = None,
network_profile: Optional["VirtualMachineScaleSetUpdateNetworkProfile"] = None,
security_profile: Optional["SecurityProfile"] = None,
diagnostics_profile: Optional["DiagnosticsProfile"] = None,
extension_profile: Optional["VirtualMachineScaleSetExtensionProfile"] = None,
license_type: Optional[str] = None,
billing_profile: Optional["BillingProfile"] = None,
scheduled_events_profile: Optional["ScheduledEventsProfile"] = None,
user_data: Optional[str] = None,
**kwargs
):
"""
:keyword os_profile: The virtual machine scale set OS profile.
:paramtype os_profile:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetUpdateOSProfile
:keyword storage_profile: The virtual machine scale set storage profile.
:paramtype storage_profile:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetUpdateStorageProfile
:keyword network_profile: The virtual machine scale set network profile.
:paramtype network_profile:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetUpdateNetworkProfile
:keyword security_profile: The virtual machine scale set Security profile.
:paramtype security_profile: ~azure.mgmt.compute.v2022_03_01.models.SecurityProfile
:keyword diagnostics_profile: The virtual machine scale set diagnostics profile.
:paramtype diagnostics_profile: ~azure.mgmt.compute.v2022_03_01.models.DiagnosticsProfile
:keyword extension_profile: The virtual machine scale set extension profile.
:paramtype extension_profile:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetExtensionProfile
:keyword license_type: The license type, which is for bring your own license scenario.
:paramtype license_type: str
:keyword billing_profile: Specifies the billing related details of a Azure Spot VMSS.
:code:`<br>`:code:`<br>`Minimum api-version: 2019-03-01.
:paramtype billing_profile: ~azure.mgmt.compute.v2022_03_01.models.BillingProfile
:keyword scheduled_events_profile: Specifies Scheduled Event related configurations.
:paramtype scheduled_events_profile:
~azure.mgmt.compute.v2022_03_01.models.ScheduledEventsProfile
:keyword user_data: UserData for the VM, which must be base-64 encoded. Customer should not
pass any secrets in here. :code:`<br>`:code:`<br>`Minimum api-version: 2021-03-01.
:paramtype user_data: str
"""
super(VirtualMachineScaleSetUpdateVMProfile, self).__init__(**kwargs)
self.os_profile = os_profile
self.storage_profile = storage_profile
self.network_profile = network_profile
self.security_profile = security_profile
self.diagnostics_profile = diagnostics_profile
self.extension_profile = extension_profile
self.license_type = license_type
self.billing_profile = billing_profile
self.scheduled_events_profile = scheduled_events_profile
self.user_data = user_data
class VirtualMachineScaleSetVM(Resource):
"""Describes a virtual machine scale set virtual machine.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Required. Resource location.
:vartype location: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar instance_id: The virtual machine instance ID.
:vartype instance_id: str
:ivar sku: The virtual machine SKU.
:vartype sku: ~azure.mgmt.compute.v2022_03_01.models.Sku
:ivar plan: Specifies information about the marketplace image used to create the virtual
machine. This element is only used for marketplace images. Before you can use a marketplace
image from an API, you must enable the image for programmatic use. In the Azure portal, find
the marketplace image that you want to use and then click **Want to deploy programmatically,
Get Started ->**. Enter any required information and then click **Save**.
:vartype plan: ~azure.mgmt.compute.v2022_03_01.models.Plan
:ivar resources: The virtual machine child extension resources.
:vartype resources: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineExtension]
:ivar zones: The virtual machine zones.
:vartype zones: list[str]
:ivar identity: The identity of the virtual machine, if configured.
:vartype identity: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineIdentity
:ivar latest_model_applied: Specifies whether the latest model has been applied to the virtual
machine.
:vartype latest_model_applied: bool
:ivar vm_id: Azure VM unique ID.
:vartype vm_id: str
:ivar instance_view: The virtual machine instance view.
:vartype instance_view:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetVMInstanceView
:ivar hardware_profile: Specifies the hardware settings for the virtual machine.
:vartype hardware_profile: ~azure.mgmt.compute.v2022_03_01.models.HardwareProfile
:ivar storage_profile: Specifies the storage settings for the virtual machine disks.
:vartype storage_profile: ~azure.mgmt.compute.v2022_03_01.models.StorageProfile
:ivar additional_capabilities: Specifies additional capabilities enabled or disabled on the
virtual machine in the scale set. For instance: whether the virtual machine has the capability
to support attaching managed data disks with UltraSSD_LRS storage account type.
:vartype additional_capabilities: ~azure.mgmt.compute.v2022_03_01.models.AdditionalCapabilities
:ivar os_profile: Specifies the operating system settings for the virtual machine.
:vartype os_profile: ~azure.mgmt.compute.v2022_03_01.models.OSProfile
:ivar security_profile: Specifies the Security related profile settings for the virtual
machine.
:vartype security_profile: ~azure.mgmt.compute.v2022_03_01.models.SecurityProfile
:ivar network_profile: Specifies the network interfaces of the virtual machine.
:vartype network_profile: ~azure.mgmt.compute.v2022_03_01.models.NetworkProfile
:ivar network_profile_configuration: Specifies the network profile configuration of the virtual
machine.
:vartype network_profile_configuration:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetVMNetworkProfileConfiguration
:ivar diagnostics_profile: Specifies the boot diagnostic settings state.
:code:`<br>`:code:`<br>`Minimum api-version: 2015-06-15.
:vartype diagnostics_profile: ~azure.mgmt.compute.v2022_03_01.models.DiagnosticsProfile
:ivar availability_set: Specifies information about the availability set that the virtual
machine should be assigned to. Virtual machines specified in the same availability set are
allocated to different nodes to maximize availability. For more information about availability
sets, see `Availability sets overview
<https://docs.microsoft.com/azure/virtual-machines/availability-set-overview>`_.
:code:`<br>`:code:`<br>` For more information on Azure planned maintenance, see `Maintenance
and updates for Virtual Machines in Azure
<https://docs.microsoft.com/azure/virtual-machines/maintenance-and-updates>`_
:code:`<br>`:code:`<br>` Currently, a VM can only be added to availability set at creation
time. An existing VM cannot be added to an availability set.
:vartype availability_set: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar provisioning_state: The provisioning state, which only appears in the response.
:vartype provisioning_state: str
:ivar license_type: Specifies that the image or disk that is being used was licensed
on-premises. :code:`<br>`:code:`<br>` Possible values for Windows Server operating system are:
:code:`<br>`:code:`<br>` Windows_Client :code:`<br>`:code:`<br>` Windows_Server
:code:`<br>`:code:`<br>` Possible values for Linux Server operating system are:
:code:`<br>`:code:`<br>` RHEL_BYOS (for RHEL) :code:`<br>`:code:`<br>` SLES_BYOS (for SUSE)
:code:`<br>`:code:`<br>` For more information, see `Azure Hybrid Use Benefit for Windows Server
<https://docs.microsoft.com/azure/virtual-machines/windows/hybrid-use-benefit-licensing>`_
:code:`<br>`:code:`<br>` `Azure Hybrid Use Benefit for Linux Server
<https://docs.microsoft.com/azure/virtual-machines/linux/azure-hybrid-benefit-linux>`_
:code:`<br>`:code:`<br>` Minimum api-version: 2015-06-15.
:vartype license_type: str
:ivar model_definition_applied: Specifies whether the model applied to the virtual machine is
the model of the virtual machine scale set or the customized model for the virtual machine.
:vartype model_definition_applied: str
:ivar protection_policy: Specifies the protection policy of the virtual machine.
:vartype protection_policy:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetVMProtectionPolicy
:ivar user_data: UserData for the VM, which must be base-64 encoded. Customer should not pass
any secrets in here. :code:`<br>`:code:`<br>`Minimum api-version: 2021-03-01.
:vartype user_data: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'instance_id': {'readonly': True},
'sku': {'readonly': True},
'resources': {'readonly': True},
'zones': {'readonly': True},
'latest_model_applied': {'readonly': True},
'vm_id': {'readonly': True},
'instance_view': {'readonly': True},
'provisioning_state': {'readonly': True},
'model_definition_applied': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'instance_id': {'key': 'instanceId', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'plan': {'key': 'plan', 'type': 'Plan'},
'resources': {'key': 'resources', 'type': '[VirtualMachineExtension]'},
'zones': {'key': 'zones', 'type': '[str]'},
'identity': {'key': 'identity', 'type': 'VirtualMachineIdentity'},
'latest_model_applied': {'key': 'properties.latestModelApplied', 'type': 'bool'},
'vm_id': {'key': 'properties.vmId', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineScaleSetVMInstanceView'},
'hardware_profile': {'key': 'properties.hardwareProfile', 'type': 'HardwareProfile'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'StorageProfile'},
'additional_capabilities': {'key': 'properties.additionalCapabilities', 'type': 'AdditionalCapabilities'},
'os_profile': {'key': 'properties.osProfile', 'type': 'OSProfile'},
'security_profile': {'key': 'properties.securityProfile', 'type': 'SecurityProfile'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'NetworkProfile'},
'network_profile_configuration': {'key': 'properties.networkProfileConfiguration', 'type': 'VirtualMachineScaleSetVMNetworkProfileConfiguration'},
'diagnostics_profile': {'key': 'properties.diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'availability_set': {'key': 'properties.availabilitySet', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'model_definition_applied': {'key': 'properties.modelDefinitionApplied', 'type': 'str'},
'protection_policy': {'key': 'properties.protectionPolicy', 'type': 'VirtualMachineScaleSetVMProtectionPolicy'},
'user_data': {'key': 'properties.userData', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
plan: Optional["Plan"] = None,
identity: Optional["VirtualMachineIdentity"] = None,
hardware_profile: Optional["HardwareProfile"] = None,
storage_profile: Optional["StorageProfile"] = None,
additional_capabilities: Optional["AdditionalCapabilities"] = None,
os_profile: Optional["OSProfile"] = None,
security_profile: Optional["SecurityProfile"] = None,
network_profile: Optional["NetworkProfile"] = None,
network_profile_configuration: Optional["VirtualMachineScaleSetVMNetworkProfileConfiguration"] = None,
diagnostics_profile: Optional["DiagnosticsProfile"] = None,
availability_set: Optional["SubResource"] = None,
license_type: Optional[str] = None,
protection_policy: Optional["VirtualMachineScaleSetVMProtectionPolicy"] = None,
user_data: Optional[str] = None,
**kwargs
):
"""
:keyword location: Required. Resource location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword plan: Specifies information about the marketplace image used to create the virtual
machine. This element is only used for marketplace images. Before you can use a marketplace
image from an API, you must enable the image for programmatic use. In the Azure portal, find
the marketplace image that you want to use and then click **Want to deploy programmatically,
Get Started ->**. Enter any required information and then click **Save**.
:paramtype plan: ~azure.mgmt.compute.v2022_03_01.models.Plan
:keyword identity: The identity of the virtual machine, if configured.
:paramtype identity: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineIdentity
:keyword hardware_profile: Specifies the hardware settings for the virtual machine.
:paramtype hardware_profile: ~azure.mgmt.compute.v2022_03_01.models.HardwareProfile
:keyword storage_profile: Specifies the storage settings for the virtual machine disks.
:paramtype storage_profile: ~azure.mgmt.compute.v2022_03_01.models.StorageProfile
:keyword additional_capabilities: Specifies additional capabilities enabled or disabled on the
virtual machine in the scale set. For instance: whether the virtual machine has the capability
to support attaching managed data disks with UltraSSD_LRS storage account type.
:paramtype additional_capabilities:
~azure.mgmt.compute.v2022_03_01.models.AdditionalCapabilities
:keyword os_profile: Specifies the operating system settings for the virtual machine.
:paramtype os_profile: ~azure.mgmt.compute.v2022_03_01.models.OSProfile
:keyword security_profile: Specifies the Security related profile settings for the virtual
machine.
:paramtype security_profile: ~azure.mgmt.compute.v2022_03_01.models.SecurityProfile
:keyword network_profile: Specifies the network interfaces of the virtual machine.
:paramtype network_profile: ~azure.mgmt.compute.v2022_03_01.models.NetworkProfile
:keyword network_profile_configuration: Specifies the network profile configuration of the
virtual machine.
:paramtype network_profile_configuration:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetVMNetworkProfileConfiguration
:keyword diagnostics_profile: Specifies the boot diagnostic settings state.
:code:`<br>`:code:`<br>`Minimum api-version: 2015-06-15.
:paramtype diagnostics_profile: ~azure.mgmt.compute.v2022_03_01.models.DiagnosticsProfile
:keyword availability_set: Specifies information about the availability set that the virtual
machine should be assigned to. Virtual machines specified in the same availability set are
allocated to different nodes to maximize availability. For more information about availability
sets, see `Availability sets overview
<https://docs.microsoft.com/azure/virtual-machines/availability-set-overview>`_.
:code:`<br>`:code:`<br>` For more information on Azure planned maintenance, see `Maintenance
and updates for Virtual Machines in Azure
<https://docs.microsoft.com/azure/virtual-machines/maintenance-and-updates>`_
:code:`<br>`:code:`<br>` Currently, a VM can only be added to availability set at creation
time. An existing VM cannot be added to an availability set.
:paramtype availability_set: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword license_type: Specifies that the image or disk that is being used was licensed
on-premises. :code:`<br>`:code:`<br>` Possible values for Windows Server operating system are:
:code:`<br>`:code:`<br>` Windows_Client :code:`<br>`:code:`<br>` Windows_Server
:code:`<br>`:code:`<br>` Possible values for Linux Server operating system are:
:code:`<br>`:code:`<br>` RHEL_BYOS (for RHEL) :code:`<br>`:code:`<br>` SLES_BYOS (for SUSE)
:code:`<br>`:code:`<br>` For more information, see `Azure Hybrid Use Benefit for Windows Server
<https://docs.microsoft.com/azure/virtual-machines/windows/hybrid-use-benefit-licensing>`_
:code:`<br>`:code:`<br>` `Azure Hybrid Use Benefit for Linux Server
<https://docs.microsoft.com/azure/virtual-machines/linux/azure-hybrid-benefit-linux>`_
:code:`<br>`:code:`<br>` Minimum api-version: 2015-06-15.
:paramtype license_type: str
:keyword protection_policy: Specifies the protection policy of the virtual machine.
:paramtype protection_policy:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetVMProtectionPolicy
:keyword user_data: UserData for the VM, which must be base-64 encoded. Customer should not
pass any secrets in here. :code:`<br>`:code:`<br>`Minimum api-version: 2021-03-01.
:paramtype user_data: str
"""
super(VirtualMachineScaleSetVM, self).__init__(location=location, tags=tags, **kwargs)
self.instance_id = None
self.sku = None
self.plan = plan
self.resources = None
self.zones = None
self.identity = identity
self.latest_model_applied = None
self.vm_id = None
self.instance_view = None
self.hardware_profile = hardware_profile
self.storage_profile = storage_profile
self.additional_capabilities = additional_capabilities
self.os_profile = os_profile
self.security_profile = security_profile
self.network_profile = network_profile
self.network_profile_configuration = network_profile_configuration
self.diagnostics_profile = diagnostics_profile
self.availability_set = availability_set
self.provisioning_state = None
self.license_type = license_type
self.model_definition_applied = None
self.protection_policy = protection_policy
self.user_data = user_data
class VirtualMachineScaleSetVMExtension(SubResourceReadOnly):
"""Describes a VMSS VM Extension.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: The name of the extension.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar force_update_tag: How the extension handler should be forced to update even if the
extension configuration has not changed.
:vartype force_update_tag: str
:ivar publisher: The name of the extension handler publisher.
:vartype publisher: str
:ivar type_properties_type: Specifies the type of the extension; an example is
"CustomScriptExtension".
:vartype type_properties_type: str
:ivar type_handler_version: Specifies the version of the script handler.
:vartype type_handler_version: str
:ivar auto_upgrade_minor_version: Indicates whether the extension should use a newer minor
version if one is available at deployment time. Once deployed, however, the extension will not
upgrade minor versions unless redeployed, even with this property set to true.
:vartype auto_upgrade_minor_version: bool
:ivar enable_automatic_upgrade: Indicates whether the extension should be automatically
upgraded by the platform if there is a newer version of the extension available.
:vartype enable_automatic_upgrade: bool
:ivar settings: Json formatted public settings for the extension.
:vartype settings: any
:ivar protected_settings: The extension can contain either protectedSettings or
protectedSettingsFromKeyVault or no protected settings at all.
:vartype protected_settings: any
:ivar provisioning_state: The provisioning state, which only appears in the response.
:vartype provisioning_state: str
:ivar instance_view: The virtual machine extension instance view.
:vartype instance_view:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineExtensionInstanceView
:ivar suppress_failures: Indicates whether failures stemming from the extension will be
suppressed (Operational failures such as not connecting to the VM will not be suppressed
regardless of this value). The default is false.
:vartype suppress_failures: bool
:ivar protected_settings_from_key_vault: The extensions protected settings that are passed by
reference, and consumed from key vault.
:vartype protected_settings_from_key_vault: any
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
'publisher': {'key': 'properties.publisher', 'type': 'str'},
'type_properties_type': {'key': 'properties.type', 'type': 'str'},
'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'},
'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'},
'enable_automatic_upgrade': {'key': 'properties.enableAutomaticUpgrade', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': 'object'},
'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineExtensionInstanceView'},
'suppress_failures': {'key': 'properties.suppressFailures', 'type': 'bool'},
'protected_settings_from_key_vault': {'key': 'properties.protectedSettingsFromKeyVault', 'type': 'object'},
}
def __init__(
self,
*,
force_update_tag: Optional[str] = None,
publisher: Optional[str] = None,
type_properties_type: Optional[str] = None,
type_handler_version: Optional[str] = None,
auto_upgrade_minor_version: Optional[bool] = None,
enable_automatic_upgrade: Optional[bool] = None,
settings: Optional[Any] = None,
protected_settings: Optional[Any] = None,
instance_view: Optional["VirtualMachineExtensionInstanceView"] = None,
suppress_failures: Optional[bool] = None,
protected_settings_from_key_vault: Optional[Any] = None,
**kwargs
):
"""
:keyword force_update_tag: How the extension handler should be forced to update even if the
extension configuration has not changed.
:paramtype force_update_tag: str
:keyword publisher: The name of the extension handler publisher.
:paramtype publisher: str
:keyword type_properties_type: Specifies the type of the extension; an example is
"CustomScriptExtension".
:paramtype type_properties_type: str
:keyword type_handler_version: Specifies the version of the script handler.
:paramtype type_handler_version: str
:keyword auto_upgrade_minor_version: Indicates whether the extension should use a newer minor
version if one is available at deployment time. Once deployed, however, the extension will not
upgrade minor versions unless redeployed, even with this property set to true.
:paramtype auto_upgrade_minor_version: bool
:keyword enable_automatic_upgrade: Indicates whether the extension should be automatically
upgraded by the platform if there is a newer version of the extension available.
:paramtype enable_automatic_upgrade: bool
:keyword settings: Json formatted public settings for the extension.
:paramtype settings: any
:keyword protected_settings: The extension can contain either protectedSettings or
protectedSettingsFromKeyVault or no protected settings at all.
:paramtype protected_settings: any
:keyword instance_view: The virtual machine extension instance view.
:paramtype instance_view:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineExtensionInstanceView
:keyword suppress_failures: Indicates whether failures stemming from the extension will be
suppressed (Operational failures such as not connecting to the VM will not be suppressed
regardless of this value). The default is false.
:paramtype suppress_failures: bool
:keyword protected_settings_from_key_vault: The extensions protected settings that are passed
by reference, and consumed from key vault.
:paramtype protected_settings_from_key_vault: any
"""
super(VirtualMachineScaleSetVMExtension, self).__init__(**kwargs)
self.name = None
self.type = None
self.force_update_tag = force_update_tag
self.publisher = publisher
self.type_properties_type = type_properties_type
self.type_handler_version = type_handler_version
self.auto_upgrade_minor_version = auto_upgrade_minor_version
self.enable_automatic_upgrade = enable_automatic_upgrade
self.settings = settings
self.protected_settings = protected_settings
self.provisioning_state = None
self.instance_view = instance_view
self.suppress_failures = suppress_failures
self.protected_settings_from_key_vault = protected_settings_from_key_vault
class VirtualMachineScaleSetVMExtensionsListResult(msrest.serialization.Model):
"""The List VMSS VM Extension operation response.
:ivar value: The list of VMSS VM extensions.
:vartype value: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetVMExtension]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineScaleSetVMExtension]'},
}
def __init__(
self,
*,
value: Optional[List["VirtualMachineScaleSetVMExtension"]] = None,
**kwargs
):
"""
:keyword value: The list of VMSS VM extensions.
:paramtype value:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetVMExtension]
"""
super(VirtualMachineScaleSetVMExtensionsListResult, self).__init__(**kwargs)
self.value = value
class VirtualMachineScaleSetVMExtensionsSummary(msrest.serialization.Model):
"""Extensions summary for virtual machines of a virtual machine scale set.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The extension name.
:vartype name: str
:ivar statuses_summary: The extensions information.
:vartype statuses_summary:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineStatusCodeCount]
"""
_validation = {
'name': {'readonly': True},
'statuses_summary': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'statuses_summary': {'key': 'statusesSummary', 'type': '[VirtualMachineStatusCodeCount]'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(VirtualMachineScaleSetVMExtensionsSummary, self).__init__(**kwargs)
self.name = None
self.statuses_summary = None
class VirtualMachineScaleSetVMExtensionUpdate(SubResourceReadOnly):
"""Describes a VMSS VM Extension.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: The name of the extension.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar force_update_tag: How the extension handler should be forced to update even if the
extension configuration has not changed.
:vartype force_update_tag: str
:ivar publisher: The name of the extension handler publisher.
:vartype publisher: str
:ivar type_properties_type: Specifies the type of the extension; an example is
"CustomScriptExtension".
:vartype type_properties_type: str
:ivar type_handler_version: Specifies the version of the script handler.
:vartype type_handler_version: str
:ivar auto_upgrade_minor_version: Indicates whether the extension should use a newer minor
version if one is available at deployment time. Once deployed, however, the extension will not
upgrade minor versions unless redeployed, even with this property set to true.
:vartype auto_upgrade_minor_version: bool
:ivar enable_automatic_upgrade: Indicates whether the extension should be automatically
upgraded by the platform if there is a newer version of the extension available.
:vartype enable_automatic_upgrade: bool
:ivar settings: Json formatted public settings for the extension.
:vartype settings: any
:ivar protected_settings: The extension can contain either protectedSettings or
protectedSettingsFromKeyVault or no protected settings at all.
:vartype protected_settings: any
:ivar suppress_failures: Indicates whether failures stemming from the extension will be
suppressed (Operational failures such as not connecting to the VM will not be suppressed
regardless of this value). The default is false.
:vartype suppress_failures: bool
:ivar protected_settings_from_key_vault: The extensions protected settings that are passed by
reference, and consumed from key vault.
:vartype protected_settings_from_key_vault: any
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
'publisher': {'key': 'properties.publisher', 'type': 'str'},
'type_properties_type': {'key': 'properties.type', 'type': 'str'},
'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'},
'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'},
'enable_automatic_upgrade': {'key': 'properties.enableAutomaticUpgrade', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': 'object'},
'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'},
'suppress_failures': {'key': 'properties.suppressFailures', 'type': 'bool'},
'protected_settings_from_key_vault': {'key': 'properties.protectedSettingsFromKeyVault', 'type': 'object'},
}
def __init__(
self,
*,
force_update_tag: Optional[str] = None,
publisher: Optional[str] = None,
type_properties_type: Optional[str] = None,
type_handler_version: Optional[str] = None,
auto_upgrade_minor_version: Optional[bool] = None,
enable_automatic_upgrade: Optional[bool] = None,
settings: Optional[Any] = None,
protected_settings: Optional[Any] = None,
suppress_failures: Optional[bool] = None,
protected_settings_from_key_vault: Optional[Any] = None,
**kwargs
):
"""
:keyword force_update_tag: How the extension handler should be forced to update even if the
extension configuration has not changed.
:paramtype force_update_tag: str
:keyword publisher: The name of the extension handler publisher.
:paramtype publisher: str
:keyword type_properties_type: Specifies the type of the extension; an example is
"CustomScriptExtension".
:paramtype type_properties_type: str
:keyword type_handler_version: Specifies the version of the script handler.
:paramtype type_handler_version: str
:keyword auto_upgrade_minor_version: Indicates whether the extension should use a newer minor
version if one is available at deployment time. Once deployed, however, the extension will not
upgrade minor versions unless redeployed, even with this property set to true.
:paramtype auto_upgrade_minor_version: bool
:keyword enable_automatic_upgrade: Indicates whether the extension should be automatically
upgraded by the platform if there is a newer version of the extension available.
:paramtype enable_automatic_upgrade: bool
:keyword settings: Json formatted public settings for the extension.
:paramtype settings: any
:keyword protected_settings: The extension can contain either protectedSettings or
protectedSettingsFromKeyVault or no protected settings at all.
:paramtype protected_settings: any
:keyword suppress_failures: Indicates whether failures stemming from the extension will be
suppressed (Operational failures such as not connecting to the VM will not be suppressed
regardless of this value). The default is false.
:paramtype suppress_failures: bool
:keyword protected_settings_from_key_vault: The extensions protected settings that are passed
by reference, and consumed from key vault.
:paramtype protected_settings_from_key_vault: any
"""
super(VirtualMachineScaleSetVMExtensionUpdate, self).__init__(**kwargs)
self.name = None
self.type = None
self.force_update_tag = force_update_tag
self.publisher = publisher
self.type_properties_type = type_properties_type
self.type_handler_version = type_handler_version
self.auto_upgrade_minor_version = auto_upgrade_minor_version
self.enable_automatic_upgrade = enable_automatic_upgrade
self.settings = settings
self.protected_settings = protected_settings
self.suppress_failures = suppress_failures
self.protected_settings_from_key_vault = protected_settings_from_key_vault
class VirtualMachineScaleSetVMInstanceIDs(msrest.serialization.Model):
"""Specifies a list of virtual machine instance IDs from the VM scale set.
:ivar instance_ids: The virtual machine scale set instance ids. Omitting the virtual machine
scale set instance ids will result in the operation being performed on all virtual machines in
the virtual machine scale set.
:vartype instance_ids: list[str]
"""
_attribute_map = {
'instance_ids': {'key': 'instanceIds', 'type': '[str]'},
}
def __init__(
self,
*,
instance_ids: Optional[List[str]] = None,
**kwargs
):
"""
:keyword instance_ids: The virtual machine scale set instance ids. Omitting the virtual machine
scale set instance ids will result in the operation being performed on all virtual machines in
the virtual machine scale set.
:paramtype instance_ids: list[str]
"""
super(VirtualMachineScaleSetVMInstanceIDs, self).__init__(**kwargs)
self.instance_ids = instance_ids
class VirtualMachineScaleSetVMInstanceRequiredIDs(msrest.serialization.Model):
"""Specifies a list of virtual machine instance IDs from the VM scale set.
All required parameters must be populated in order to send to Azure.
:ivar instance_ids: Required. The virtual machine scale set instance ids.
:vartype instance_ids: list[str]
"""
_validation = {
'instance_ids': {'required': True},
}
_attribute_map = {
'instance_ids': {'key': 'instanceIds', 'type': '[str]'},
}
def __init__(
self,
*,
instance_ids: List[str],
**kwargs
):
"""
:keyword instance_ids: Required. The virtual machine scale set instance ids.
:paramtype instance_ids: list[str]
"""
super(VirtualMachineScaleSetVMInstanceRequiredIDs, self).__init__(**kwargs)
self.instance_ids = instance_ids
class VirtualMachineScaleSetVMInstanceView(msrest.serialization.Model):
"""The instance view of a virtual machine scale set VM.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar platform_update_domain: The Update Domain count.
:vartype platform_update_domain: int
:ivar platform_fault_domain: The Fault Domain count.
:vartype platform_fault_domain: int
:ivar rdp_thumb_print: The Remote desktop certificate thumbprint.
:vartype rdp_thumb_print: str
:ivar vm_agent: The VM Agent running on the virtual machine.
:vartype vm_agent: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineAgentInstanceView
:ivar maintenance_redeploy_status: The Maintenance Operation status on the virtual machine.
:vartype maintenance_redeploy_status:
~azure.mgmt.compute.v2022_03_01.models.MaintenanceRedeployStatus
:ivar disks: The disks information.
:vartype disks: list[~azure.mgmt.compute.v2022_03_01.models.DiskInstanceView]
:ivar extensions: The extensions information.
:vartype extensions:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineExtensionInstanceView]
:ivar vm_health: The health status for the VM.
:vartype vm_health: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineHealthStatus
:ivar boot_diagnostics: Boot Diagnostics is a debugging feature which allows you to view
Console Output and Screenshot to diagnose VM status. :code:`<br>`:code:`<br>` You can easily
view the output of your console log. :code:`<br>`:code:`<br>` Azure also enables you to see a
screenshot of the VM from the hypervisor.
:vartype boot_diagnostics: ~azure.mgmt.compute.v2022_03_01.models.BootDiagnosticsInstanceView
:ivar statuses: The resource status information.
:vartype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
:ivar assigned_host: Resource id of the dedicated host, on which the virtual machine is
allocated through automatic placement, when the virtual machine is associated with a dedicated
host group that has automatic placement enabled. :code:`<br>`:code:`<br>`Minimum api-version:
2020-06-01.
:vartype assigned_host: str
:ivar placement_group_id: The placement group in which the VM is running. If the VM is
deallocated it will not have a placementGroupId.
:vartype placement_group_id: str
"""
_validation = {
'vm_health': {'readonly': True},
'assigned_host': {'readonly': True},
}
_attribute_map = {
'platform_update_domain': {'key': 'platformUpdateDomain', 'type': 'int'},
'platform_fault_domain': {'key': 'platformFaultDomain', 'type': 'int'},
'rdp_thumb_print': {'key': 'rdpThumbPrint', 'type': 'str'},
'vm_agent': {'key': 'vmAgent', 'type': 'VirtualMachineAgentInstanceView'},
'maintenance_redeploy_status': {'key': 'maintenanceRedeployStatus', 'type': 'MaintenanceRedeployStatus'},
'disks': {'key': 'disks', 'type': '[DiskInstanceView]'},
'extensions': {'key': 'extensions', 'type': '[VirtualMachineExtensionInstanceView]'},
'vm_health': {'key': 'vmHealth', 'type': 'VirtualMachineHealthStatus'},
'boot_diagnostics': {'key': 'bootDiagnostics', 'type': 'BootDiagnosticsInstanceView'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
'assigned_host': {'key': 'assignedHost', 'type': 'str'},
'placement_group_id': {'key': 'placementGroupId', 'type': 'str'},
}
def __init__(
self,
*,
platform_update_domain: Optional[int] = None,
platform_fault_domain: Optional[int] = None,
rdp_thumb_print: Optional[str] = None,
vm_agent: Optional["VirtualMachineAgentInstanceView"] = None,
maintenance_redeploy_status: Optional["MaintenanceRedeployStatus"] = None,
disks: Optional[List["DiskInstanceView"]] = None,
extensions: Optional[List["VirtualMachineExtensionInstanceView"]] = None,
boot_diagnostics: Optional["BootDiagnosticsInstanceView"] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
placement_group_id: Optional[str] = None,
**kwargs
):
"""
:keyword platform_update_domain: The Update Domain count.
:paramtype platform_update_domain: int
:keyword platform_fault_domain: The Fault Domain count.
:paramtype platform_fault_domain: int
:keyword rdp_thumb_print: The Remote desktop certificate thumbprint.
:paramtype rdp_thumb_print: str
:keyword vm_agent: The VM Agent running on the virtual machine.
:paramtype vm_agent: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineAgentInstanceView
:keyword maintenance_redeploy_status: The Maintenance Operation status on the virtual machine.
:paramtype maintenance_redeploy_status:
~azure.mgmt.compute.v2022_03_01.models.MaintenanceRedeployStatus
:keyword disks: The disks information.
:paramtype disks: list[~azure.mgmt.compute.v2022_03_01.models.DiskInstanceView]
:keyword extensions: The extensions information.
:paramtype extensions:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineExtensionInstanceView]
:keyword boot_diagnostics: Boot Diagnostics is a debugging feature which allows you to view
Console Output and Screenshot to diagnose VM status. :code:`<br>`:code:`<br>` You can easily
view the output of your console log. :code:`<br>`:code:`<br>` Azure also enables you to see a
screenshot of the VM from the hypervisor.
:paramtype boot_diagnostics: ~azure.mgmt.compute.v2022_03_01.models.BootDiagnosticsInstanceView
:keyword statuses: The resource status information.
:paramtype statuses: list[~azure.mgmt.compute.v2022_03_01.models.InstanceViewStatus]
:keyword placement_group_id: The placement group in which the VM is running. If the VM is
deallocated it will not have a placementGroupId.
:paramtype placement_group_id: str
"""
super(VirtualMachineScaleSetVMInstanceView, self).__init__(**kwargs)
self.platform_update_domain = platform_update_domain
self.platform_fault_domain = platform_fault_domain
self.rdp_thumb_print = rdp_thumb_print
self.vm_agent = vm_agent
self.maintenance_redeploy_status = maintenance_redeploy_status
self.disks = disks
self.extensions = extensions
self.vm_health = None
self.boot_diagnostics = boot_diagnostics
self.statuses = statuses
self.assigned_host = None
self.placement_group_id = placement_group_id
class VirtualMachineScaleSetVMListResult(msrest.serialization.Model):
"""The List Virtual Machine Scale Set VMs operation response.
All required parameters must be populated in order to send to Azure.
:ivar value: Required. The list of virtual machine scale sets VMs.
:vartype value: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetVM]
:ivar next_link: The uri to fetch the next page of Virtual Machine Scale Set VMs. Call
ListNext() with this to fetch the next page of VMSS VMs.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineScaleSetVM]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachineScaleSetVM"],
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Required. The list of virtual machine scale sets VMs.
:paramtype value: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetVM]
:keyword next_link: The uri to fetch the next page of Virtual Machine Scale Set VMs. Call
ListNext() with this to fetch the next page of VMSS VMs.
:paramtype next_link: str
"""
super(VirtualMachineScaleSetVMListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineScaleSetVMNetworkProfileConfiguration(msrest.serialization.Model):
"""Describes a virtual machine scale set VM network profile.
:ivar network_interface_configurations: The list of network configurations.
:vartype network_interface_configurations:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetNetworkConfiguration]
"""
_attribute_map = {
'network_interface_configurations': {'key': 'networkInterfaceConfigurations', 'type': '[VirtualMachineScaleSetNetworkConfiguration]'},
}
def __init__(
self,
*,
network_interface_configurations: Optional[List["VirtualMachineScaleSetNetworkConfiguration"]] = None,
**kwargs
):
"""
:keyword network_interface_configurations: The list of network configurations.
:paramtype network_interface_configurations:
list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetNetworkConfiguration]
"""
super(VirtualMachineScaleSetVMNetworkProfileConfiguration, self).__init__(**kwargs)
self.network_interface_configurations = network_interface_configurations
class VirtualMachineScaleSetVMProfile(msrest.serialization.Model):
"""Describes a virtual machine scale set virtual machine profile.
:ivar os_profile: Specifies the operating system settings for the virtual machines in the scale
set.
:vartype os_profile: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetOSProfile
:ivar storage_profile: Specifies the storage settings for the virtual machine disks.
:vartype storage_profile:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetStorageProfile
:ivar network_profile: Specifies properties of the network interfaces of the virtual machines
in the scale set.
:vartype network_profile:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetNetworkProfile
:ivar security_profile: Specifies the Security related profile settings for the virtual
machines in the scale set.
:vartype security_profile: ~azure.mgmt.compute.v2022_03_01.models.SecurityProfile
:ivar diagnostics_profile: Specifies the boot diagnostic settings state.
:code:`<br>`:code:`<br>`Minimum api-version: 2015-06-15.
:vartype diagnostics_profile: ~azure.mgmt.compute.v2022_03_01.models.DiagnosticsProfile
:ivar extension_profile: Specifies a collection of settings for extensions installed on virtual
machines in the scale set.
:vartype extension_profile:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetExtensionProfile
:ivar license_type: Specifies that the image or disk that is being used was licensed
on-premises. :code:`<br>`:code:`<br>` Possible values for Windows Server operating system are:
:code:`<br>`:code:`<br>` Windows_Client :code:`<br>`:code:`<br>` Windows_Server
:code:`<br>`:code:`<br>` Possible values for Linux Server operating system are:
:code:`<br>`:code:`<br>` RHEL_BYOS (for RHEL) :code:`<br>`:code:`<br>` SLES_BYOS (for SUSE)
:code:`<br>`:code:`<br>` For more information, see `Azure Hybrid Use Benefit for Windows Server
<https://docs.microsoft.com/azure/virtual-machines/windows/hybrid-use-benefit-licensing>`_
:code:`<br>`:code:`<br>` `Azure Hybrid Use Benefit for Linux Server
<https://docs.microsoft.com/azure/virtual-machines/linux/azure-hybrid-benefit-linux>`_
:code:`<br>`:code:`<br>` Minimum api-version: 2015-06-15.
:vartype license_type: str
:ivar priority: Specifies the priority for the virtual machines in the scale set.
:code:`<br>`:code:`<br>`Minimum api-version: 2017-10-30-preview. Possible values include:
"Regular", "Low", "Spot".
:vartype priority: str or ~azure.mgmt.compute.v2022_03_01.models.VirtualMachinePriorityTypes
:ivar eviction_policy: Specifies the eviction policy for the Azure Spot virtual machine and
Azure Spot scale set. :code:`<br>`:code:`<br>`For Azure Spot virtual machines, both
'Deallocate' and 'Delete' are supported and the minimum api-version is 2019-03-01.
:code:`<br>`:code:`<br>`For Azure Spot scale sets, both 'Deallocate' and 'Delete' are supported
and the minimum api-version is 2017-10-30-preview. Possible values include: "Deallocate",
"Delete".
:vartype eviction_policy: str or
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineEvictionPolicyTypes
:ivar billing_profile: Specifies the billing related details of a Azure Spot VMSS.
:code:`<br>`:code:`<br>`Minimum api-version: 2019-03-01.
:vartype billing_profile: ~azure.mgmt.compute.v2022_03_01.models.BillingProfile
:ivar scheduled_events_profile: Specifies Scheduled Event related configurations.
:vartype scheduled_events_profile:
~azure.mgmt.compute.v2022_03_01.models.ScheduledEventsProfile
:ivar user_data: UserData for the virtual machines in the scale set, which must be base-64
encoded. Customer should not pass any secrets in here. :code:`<br>`:code:`<br>`Minimum
api-version: 2021-03-01.
:vartype user_data: str
:ivar capacity_reservation: Specifies the capacity reservation related details of a scale set.
:code:`<br>`:code:`<br>`Minimum api-version: 2021-04-01.
:vartype capacity_reservation:
~azure.mgmt.compute.v2022_03_01.models.CapacityReservationProfile
:ivar application_profile: Specifies the gallery applications that should be made available to
the VM/VMSS.
:vartype application_profile: ~azure.mgmt.compute.v2022_03_01.models.ApplicationProfile
:ivar hardware_profile: Specifies the hardware profile related details of a scale set.
:code:`<br>`:code:`<br>`Minimum api-version: 2022-03-01.
:vartype hardware_profile:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetHardwareProfile
"""
_attribute_map = {
'os_profile': {'key': 'osProfile', 'type': 'VirtualMachineScaleSetOSProfile'},
'storage_profile': {'key': 'storageProfile', 'type': 'VirtualMachineScaleSetStorageProfile'},
'network_profile': {'key': 'networkProfile', 'type': 'VirtualMachineScaleSetNetworkProfile'},
'security_profile': {'key': 'securityProfile', 'type': 'SecurityProfile'},
'diagnostics_profile': {'key': 'diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'extension_profile': {'key': 'extensionProfile', 'type': 'VirtualMachineScaleSetExtensionProfile'},
'license_type': {'key': 'licenseType', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'str'},
'eviction_policy': {'key': 'evictionPolicy', 'type': 'str'},
'billing_profile': {'key': 'billingProfile', 'type': 'BillingProfile'},
'scheduled_events_profile': {'key': 'scheduledEventsProfile', 'type': 'ScheduledEventsProfile'},
'user_data': {'key': 'userData', 'type': 'str'},
'capacity_reservation': {'key': 'capacityReservation', 'type': 'CapacityReservationProfile'},
'application_profile': {'key': 'applicationProfile', 'type': 'ApplicationProfile'},
'hardware_profile': {'key': 'hardwareProfile', 'type': 'VirtualMachineScaleSetHardwareProfile'},
}
def __init__(
self,
*,
os_profile: Optional["VirtualMachineScaleSetOSProfile"] = None,
storage_profile: Optional["VirtualMachineScaleSetStorageProfile"] = None,
network_profile: Optional["VirtualMachineScaleSetNetworkProfile"] = None,
security_profile: Optional["SecurityProfile"] = None,
diagnostics_profile: Optional["DiagnosticsProfile"] = None,
extension_profile: Optional["VirtualMachineScaleSetExtensionProfile"] = None,
license_type: Optional[str] = None,
priority: Optional[Union[str, "VirtualMachinePriorityTypes"]] = None,
eviction_policy: Optional[Union[str, "VirtualMachineEvictionPolicyTypes"]] = None,
billing_profile: Optional["BillingProfile"] = None,
scheduled_events_profile: Optional["ScheduledEventsProfile"] = None,
user_data: Optional[str] = None,
capacity_reservation: Optional["CapacityReservationProfile"] = None,
application_profile: Optional["ApplicationProfile"] = None,
hardware_profile: Optional["VirtualMachineScaleSetHardwareProfile"] = None,
**kwargs
):
"""
:keyword os_profile: Specifies the operating system settings for the virtual machines in the
scale set.
:paramtype os_profile: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetOSProfile
:keyword storage_profile: Specifies the storage settings for the virtual machine disks.
:paramtype storage_profile:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetStorageProfile
:keyword network_profile: Specifies properties of the network interfaces of the virtual
machines in the scale set.
:paramtype network_profile:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetNetworkProfile
:keyword security_profile: Specifies the Security related profile settings for the virtual
machines in the scale set.
:paramtype security_profile: ~azure.mgmt.compute.v2022_03_01.models.SecurityProfile
:keyword diagnostics_profile: Specifies the boot diagnostic settings state.
:code:`<br>`:code:`<br>`Minimum api-version: 2015-06-15.
:paramtype diagnostics_profile: ~azure.mgmt.compute.v2022_03_01.models.DiagnosticsProfile
:keyword extension_profile: Specifies a collection of settings for extensions installed on
virtual machines in the scale set.
:paramtype extension_profile:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetExtensionProfile
:keyword license_type: Specifies that the image or disk that is being used was licensed
on-premises. :code:`<br>`:code:`<br>` Possible values for Windows Server operating system are:
:code:`<br>`:code:`<br>` Windows_Client :code:`<br>`:code:`<br>` Windows_Server
:code:`<br>`:code:`<br>` Possible values for Linux Server operating system are:
:code:`<br>`:code:`<br>` RHEL_BYOS (for RHEL) :code:`<br>`:code:`<br>` SLES_BYOS (for SUSE)
:code:`<br>`:code:`<br>` For more information, see `Azure Hybrid Use Benefit for Windows Server
<https://docs.microsoft.com/azure/virtual-machines/windows/hybrid-use-benefit-licensing>`_
:code:`<br>`:code:`<br>` `Azure Hybrid Use Benefit for Linux Server
<https://docs.microsoft.com/azure/virtual-machines/linux/azure-hybrid-benefit-linux>`_
:code:`<br>`:code:`<br>` Minimum api-version: 2015-06-15.
:paramtype license_type: str
:keyword priority: Specifies the priority for the virtual machines in the scale set.
:code:`<br>`:code:`<br>`Minimum api-version: 2017-10-30-preview. Possible values include:
"Regular", "Low", "Spot".
:paramtype priority: str or ~azure.mgmt.compute.v2022_03_01.models.VirtualMachinePriorityTypes
:keyword eviction_policy: Specifies the eviction policy for the Azure Spot virtual machine and
Azure Spot scale set. :code:`<br>`:code:`<br>`For Azure Spot virtual machines, both
'Deallocate' and 'Delete' are supported and the minimum api-version is 2019-03-01.
:code:`<br>`:code:`<br>`For Azure Spot scale sets, both 'Deallocate' and 'Delete' are supported
and the minimum api-version is 2017-10-30-preview. Possible values include: "Deallocate",
"Delete".
:paramtype eviction_policy: str or
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineEvictionPolicyTypes
:keyword billing_profile: Specifies the billing related details of a Azure Spot VMSS.
:code:`<br>`:code:`<br>`Minimum api-version: 2019-03-01.
:paramtype billing_profile: ~azure.mgmt.compute.v2022_03_01.models.BillingProfile
:keyword scheduled_events_profile: Specifies Scheduled Event related configurations.
:paramtype scheduled_events_profile:
~azure.mgmt.compute.v2022_03_01.models.ScheduledEventsProfile
:keyword user_data: UserData for the virtual machines in the scale set, which must be base-64
encoded. Customer should not pass any secrets in here. :code:`<br>`:code:`<br>`Minimum
api-version: 2021-03-01.
:paramtype user_data: str
:keyword capacity_reservation: Specifies the capacity reservation related details of a scale
set. :code:`<br>`:code:`<br>`Minimum api-version: 2021-04-01.
:paramtype capacity_reservation:
~azure.mgmt.compute.v2022_03_01.models.CapacityReservationProfile
:keyword application_profile: Specifies the gallery applications that should be made available
to the VM/VMSS.
:paramtype application_profile: ~azure.mgmt.compute.v2022_03_01.models.ApplicationProfile
:keyword hardware_profile: Specifies the hardware profile related details of a scale set.
:code:`<br>`:code:`<br>`Minimum api-version: 2022-03-01.
:paramtype hardware_profile:
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineScaleSetHardwareProfile
"""
super(VirtualMachineScaleSetVMProfile, self).__init__(**kwargs)
self.os_profile = os_profile
self.storage_profile = storage_profile
self.network_profile = network_profile
self.security_profile = security_profile
self.diagnostics_profile = diagnostics_profile
self.extension_profile = extension_profile
self.license_type = license_type
self.priority = priority
self.eviction_policy = eviction_policy
self.billing_profile = billing_profile
self.scheduled_events_profile = scheduled_events_profile
self.user_data = user_data
self.capacity_reservation = capacity_reservation
self.application_profile = application_profile
self.hardware_profile = hardware_profile
class VirtualMachineScaleSetVMProtectionPolicy(msrest.serialization.Model):
"""The protection policy of a virtual machine scale set VM.
:ivar protect_from_scale_in: Indicates that the virtual machine scale set VM shouldn't be
considered for deletion during a scale-in operation.
:vartype protect_from_scale_in: bool
:ivar protect_from_scale_set_actions: Indicates that model updates or actions (including
scale-in) initiated on the virtual machine scale set should not be applied to the virtual
machine scale set VM.
:vartype protect_from_scale_set_actions: bool
"""
_attribute_map = {
'protect_from_scale_in': {'key': 'protectFromScaleIn', 'type': 'bool'},
'protect_from_scale_set_actions': {'key': 'protectFromScaleSetActions', 'type': 'bool'},
}
def __init__(
self,
*,
protect_from_scale_in: Optional[bool] = None,
protect_from_scale_set_actions: Optional[bool] = None,
**kwargs
):
"""
:keyword protect_from_scale_in: Indicates that the virtual machine scale set VM shouldn't be
considered for deletion during a scale-in operation.
:paramtype protect_from_scale_in: bool
:keyword protect_from_scale_set_actions: Indicates that model updates or actions (including
scale-in) initiated on the virtual machine scale set should not be applied to the virtual
machine scale set VM.
:paramtype protect_from_scale_set_actions: bool
"""
super(VirtualMachineScaleSetVMProtectionPolicy, self).__init__(**kwargs)
self.protect_from_scale_in = protect_from_scale_in
self.protect_from_scale_set_actions = protect_from_scale_set_actions
class VirtualMachineSize(msrest.serialization.Model):
"""Describes the properties of a VM size.
:ivar name: The name of the virtual machine size.
:vartype name: str
:ivar number_of_cores: The number of cores supported by the virtual machine size. For
Constrained vCPU capable VM sizes, this number represents the total vCPUs of quota that the VM
uses. For accurate vCPU count, please refer to
https://docs.microsoft.com/azure/virtual-machines/constrained-vcpu or
https://docs.microsoft.com/rest/api/compute/resourceskus/list.
:vartype number_of_cores: int
:ivar os_disk_size_in_mb: The OS disk size, in MB, allowed by the virtual machine size.
:vartype os_disk_size_in_mb: int
:ivar resource_disk_size_in_mb: The resource disk size, in MB, allowed by the virtual machine
size.
:vartype resource_disk_size_in_mb: int
:ivar memory_in_mb: The amount of memory, in MB, supported by the virtual machine size.
:vartype memory_in_mb: int
:ivar max_data_disk_count: The maximum number of data disks that can be attached to the virtual
machine size.
:vartype max_data_disk_count: int
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'number_of_cores': {'key': 'numberOfCores', 'type': 'int'},
'os_disk_size_in_mb': {'key': 'osDiskSizeInMB', 'type': 'int'},
'resource_disk_size_in_mb': {'key': 'resourceDiskSizeInMB', 'type': 'int'},
'memory_in_mb': {'key': 'memoryInMB', 'type': 'int'},
'max_data_disk_count': {'key': 'maxDataDiskCount', 'type': 'int'},
}
def __init__(
self,
*,
name: Optional[str] = None,
number_of_cores: Optional[int] = None,
os_disk_size_in_mb: Optional[int] = None,
resource_disk_size_in_mb: Optional[int] = None,
memory_in_mb: Optional[int] = None,
max_data_disk_count: Optional[int] = None,
**kwargs
):
"""
:keyword name: The name of the virtual machine size.
:paramtype name: str
:keyword number_of_cores: The number of cores supported by the virtual machine size. For
Constrained vCPU capable VM sizes, this number represents the total vCPUs of quota that the VM
uses. For accurate vCPU count, please refer to
https://docs.microsoft.com/azure/virtual-machines/constrained-vcpu or
https://docs.microsoft.com/rest/api/compute/resourceskus/list.
:paramtype number_of_cores: int
:keyword os_disk_size_in_mb: The OS disk size, in MB, allowed by the virtual machine size.
:paramtype os_disk_size_in_mb: int
:keyword resource_disk_size_in_mb: The resource disk size, in MB, allowed by the virtual
machine size.
:paramtype resource_disk_size_in_mb: int
:keyword memory_in_mb: The amount of memory, in MB, supported by the virtual machine size.
:paramtype memory_in_mb: int
:keyword max_data_disk_count: The maximum number of data disks that can be attached to the
virtual machine size.
:paramtype max_data_disk_count: int
"""
super(VirtualMachineSize, self).__init__(**kwargs)
self.name = name
self.number_of_cores = number_of_cores
self.os_disk_size_in_mb = os_disk_size_in_mb
self.resource_disk_size_in_mb = resource_disk_size_in_mb
self.memory_in_mb = memory_in_mb
self.max_data_disk_count = max_data_disk_count
class VirtualMachineSizeListResult(msrest.serialization.Model):
"""The List Virtual Machine operation response.
:ivar value: The list of virtual machine sizes.
:vartype value: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineSize]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineSize]'},
}
def __init__(
self,
*,
value: Optional[List["VirtualMachineSize"]] = None,
**kwargs
):
"""
:keyword value: The list of virtual machine sizes.
:paramtype value: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineSize]
"""
super(VirtualMachineSizeListResult, self).__init__(**kwargs)
self.value = value
class VirtualMachineSoftwarePatchProperties(msrest.serialization.Model):
"""Describes the properties of a Virtual Machine software patch.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar patch_id: A unique identifier for the patch.
:vartype patch_id: str
:ivar name: The friendly name of the patch.
:vartype name: str
:ivar version: The version number of the patch. This property applies only to Linux patches.
:vartype version: str
:ivar kb_id: The KBID of the patch. Only applies to Windows patches.
:vartype kb_id: str
:ivar classifications: The classification(s) of the patch as provided by the patch publisher.
:vartype classifications: list[str]
:ivar reboot_behavior: Describes the reboot requirements of the patch. Possible values include:
"Unknown", "NeverReboots", "AlwaysRequiresReboot", "CanRequestReboot".
:vartype reboot_behavior: str or
~azure.mgmt.compute.v2022_03_01.models.VMGuestPatchRebootBehavior
:ivar activity_id: The activity ID of the operation that produced this result. It is used to
correlate across CRP and extension logs.
:vartype activity_id: str
:ivar published_date: The UTC timestamp when the repository published this patch.
:vartype published_date: ~datetime.datetime
:ivar last_modified_date_time: The UTC timestamp of the last update to this patch record.
:vartype last_modified_date_time: ~datetime.datetime
:ivar assessment_state: Describes the availability of a given patch. Possible values include:
"Unknown", "Available".
:vartype assessment_state: str or ~azure.mgmt.compute.v2022_03_01.models.PatchAssessmentState
"""
_validation = {
'patch_id': {'readonly': True},
'name': {'readonly': True},
'version': {'readonly': True},
'kb_id': {'readonly': True},
'classifications': {'readonly': True},
'reboot_behavior': {'readonly': True},
'activity_id': {'readonly': True},
'published_date': {'readonly': True},
'last_modified_date_time': {'readonly': True},
'assessment_state': {'readonly': True},
}
_attribute_map = {
'patch_id': {'key': 'patchId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'kb_id': {'key': 'kbId', 'type': 'str'},
'classifications': {'key': 'classifications', 'type': '[str]'},
'reboot_behavior': {'key': 'rebootBehavior', 'type': 'str'},
'activity_id': {'key': 'activityId', 'type': 'str'},
'published_date': {'key': 'publishedDate', 'type': 'iso-8601'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'assessment_state': {'key': 'assessmentState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(VirtualMachineSoftwarePatchProperties, self).__init__(**kwargs)
self.patch_id = None
self.name = None
self.version = None
self.kb_id = None
self.classifications = None
self.reboot_behavior = None
self.activity_id = None
self.published_date = None
self.last_modified_date_time = None
self.assessment_state = None
class VirtualMachineStatusCodeCount(msrest.serialization.Model):
"""The status code and count of the virtual machine scale set instance view status summary.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The instance view status code.
:vartype code: str
:ivar count: The number of instances having a particular status code.
:vartype count: int
"""
_validation = {
'code': {'readonly': True},
'count': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'count': {'key': 'count', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(VirtualMachineStatusCodeCount, self).__init__(**kwargs)
self.code = None
self.count = None
class VirtualMachineUpdate(UpdateResource):
"""Describes a Virtual Machine Update.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar plan: Specifies information about the marketplace image used to create the virtual
machine. This element is only used for marketplace images. Before you can use a marketplace
image from an API, you must enable the image for programmatic use. In the Azure portal, find
the marketplace image that you want to use and then click **Want to deploy programmatically,
Get Started ->**. Enter any required information and then click **Save**.
:vartype plan: ~azure.mgmt.compute.v2022_03_01.models.Plan
:ivar identity: The identity of the virtual machine, if configured.
:vartype identity: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineIdentity
:ivar zones: The virtual machine zones.
:vartype zones: list[str]
:ivar hardware_profile: Specifies the hardware settings for the virtual machine.
:vartype hardware_profile: ~azure.mgmt.compute.v2022_03_01.models.HardwareProfile
:ivar storage_profile: Specifies the storage settings for the virtual machine disks.
:vartype storage_profile: ~azure.mgmt.compute.v2022_03_01.models.StorageProfile
:ivar additional_capabilities: Specifies additional capabilities enabled or disabled on the
virtual machine.
:vartype additional_capabilities: ~azure.mgmt.compute.v2022_03_01.models.AdditionalCapabilities
:ivar os_profile: Specifies the operating system settings used while creating the virtual
machine. Some of the settings cannot be changed once VM is provisioned.
:vartype os_profile: ~azure.mgmt.compute.v2022_03_01.models.OSProfile
:ivar network_profile: Specifies the network interfaces of the virtual machine.
:vartype network_profile: ~azure.mgmt.compute.v2022_03_01.models.NetworkProfile
:ivar security_profile: Specifies the Security related profile settings for the virtual
machine.
:vartype security_profile: ~azure.mgmt.compute.v2022_03_01.models.SecurityProfile
:ivar diagnostics_profile: Specifies the boot diagnostic settings state.
:code:`<br>`:code:`<br>`Minimum api-version: 2015-06-15.
:vartype diagnostics_profile: ~azure.mgmt.compute.v2022_03_01.models.DiagnosticsProfile
:ivar availability_set: Specifies information about the availability set that the virtual
machine should be assigned to. Virtual machines specified in the same availability set are
allocated to different nodes to maximize availability. For more information about availability
sets, see `Availability sets overview
<https://docs.microsoft.com/azure/virtual-machines/availability-set-overview>`_.
:code:`<br>`:code:`<br>` For more information on Azure planned maintenance, see `Maintenance
and updates for Virtual Machines in Azure
<https://docs.microsoft.com/azure/virtual-machines/maintenance-and-updates>`_
:code:`<br>`:code:`<br>` Currently, a VM can only be added to availability set at creation
time. The availability set to which the VM is being added should be under the same resource
group as the availability set resource. An existing VM cannot be added to an availability set.
:code:`<br>`:code:`<br>`This property cannot exist along with a non-null
properties.virtualMachineScaleSet reference.
:vartype availability_set: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar virtual_machine_scale_set: Specifies information about the virtual machine scale set that
the virtual machine should be assigned to. Virtual machines specified in the same virtual
machine scale set are allocated to different nodes to maximize availability. Currently, a VM
can only be added to virtual machine scale set at creation time. An existing VM cannot be added
to a virtual machine scale set. :code:`<br>`:code:`<br>`This property cannot exist along with a
non-null properties.availabilitySet reference. :code:`<br>`:code:`<br>`Minimum api‐version:
2019‐03‐01.
:vartype virtual_machine_scale_set: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar proximity_placement_group: Specifies information about the proximity placement group that
the virtual machine should be assigned to. :code:`<br>`:code:`<br>`Minimum api-version:
2018-04-01.
:vartype proximity_placement_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar priority: Specifies the priority for the virtual machine. :code:`<br>`:code:`<br>`Minimum
api-version: 2019-03-01. Possible values include: "Regular", "Low", "Spot".
:vartype priority: str or ~azure.mgmt.compute.v2022_03_01.models.VirtualMachinePriorityTypes
:ivar eviction_policy: Specifies the eviction policy for the Azure Spot virtual machine and
Azure Spot scale set. :code:`<br>`:code:`<br>`For Azure Spot virtual machines, both
'Deallocate' and 'Delete' are supported and the minimum api-version is 2019-03-01.
:code:`<br>`:code:`<br>`For Azure Spot scale sets, both 'Deallocate' and 'Delete' are supported
and the minimum api-version is 2017-10-30-preview. Possible values include: "Deallocate",
"Delete".
:vartype eviction_policy: str or
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineEvictionPolicyTypes
:ivar billing_profile: Specifies the billing related details of a Azure Spot virtual machine.
:code:`<br>`:code:`<br>`Minimum api-version: 2019-03-01.
:vartype billing_profile: ~azure.mgmt.compute.v2022_03_01.models.BillingProfile
:ivar host: Specifies information about the dedicated host that the virtual machine resides in.
:code:`<br>`:code:`<br>`Minimum api-version: 2018-10-01.
:vartype host: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar host_group: Specifies information about the dedicated host group that the virtual machine
resides in. :code:`<br>`:code:`<br>`Minimum api-version: 2020-06-01.
:code:`<br>`:code:`<br>`NOTE: User cannot specify both host and hostGroup properties.
:vartype host_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:ivar provisioning_state: The provisioning state, which only appears in the response.
:vartype provisioning_state: str
:ivar instance_view: The virtual machine instance view.
:vartype instance_view: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineInstanceView
:ivar license_type: Specifies that the image or disk that is being used was licensed
on-premises. :code:`<br>`:code:`<br>` Possible values for Windows Server operating system are:
:code:`<br>`:code:`<br>` Windows_Client :code:`<br>`:code:`<br>` Windows_Server
:code:`<br>`:code:`<br>` Possible values for Linux Server operating system are:
:code:`<br>`:code:`<br>` RHEL_BYOS (for RHEL) :code:`<br>`:code:`<br>` SLES_BYOS (for SUSE)
:code:`<br>`:code:`<br>` For more information, see `Azure Hybrid Use Benefit for Windows Server
<https://docs.microsoft.com/azure/virtual-machines/windows/hybrid-use-benefit-licensing>`_
:code:`<br>`:code:`<br>` `Azure Hybrid Use Benefit for Linux Server
<https://docs.microsoft.com/azure/virtual-machines/linux/azure-hybrid-benefit-linux>`_
:code:`<br>`:code:`<br>` Minimum api-version: 2015-06-15.
:vartype license_type: str
:ivar vm_id: Specifies the VM unique ID which is a 128-bits identifier that is encoded and
stored in all Azure IaaS VMs SMBIOS and can be read using platform BIOS commands.
:vartype vm_id: str
:ivar extensions_time_budget: Specifies the time alloted for all extensions to start. The time
duration should be between 15 minutes and 120 minutes (inclusive) and should be specified in
ISO 8601 format. The default value is 90 minutes (PT1H30M). :code:`<br>`:code:`<br>` Minimum
api-version: 2020-06-01.
:vartype extensions_time_budget: str
:ivar platform_fault_domain: Specifies the scale set logical fault domain into which the
Virtual Machine will be created. By default, the Virtual Machine will by automatically assigned
to a fault domain that best maintains balance across available fault
domains.:code:`<br>`:code:`<li>`This is applicable only if the 'virtualMachineScaleSet'
property of this Virtual Machine is set.:code:`<li>`The Virtual Machine Scale Set that is
referenced, must have 'platformFaultDomainCount' > 1.:code:`<li>`This property cannot be
updated once the Virtual Machine is created.:code:`<li>`Fault domain assignment can be viewed
in the Virtual Machine Instance View.:code:`<br>`:code:`<br>`Minimum api‐version: 2020‐12‐01.
:vartype platform_fault_domain: int
:ivar scheduled_events_profile: Specifies Scheduled Event related configurations.
:vartype scheduled_events_profile:
~azure.mgmt.compute.v2022_03_01.models.ScheduledEventsProfile
:ivar user_data: UserData for the VM, which must be base-64 encoded. Customer should not pass
any secrets in here. :code:`<br>`:code:`<br>`Minimum api-version: 2021-03-01.
:vartype user_data: str
:ivar capacity_reservation: Specifies information about the capacity reservation that is used
to allocate virtual machine. :code:`<br>`:code:`<br>`Minimum api-version: 2021-04-01.
:vartype capacity_reservation:
~azure.mgmt.compute.v2022_03_01.models.CapacityReservationProfile
:ivar application_profile: Specifies the gallery applications that should be made available to
the VM/VMSS.
:vartype application_profile: ~azure.mgmt.compute.v2022_03_01.models.ApplicationProfile
:ivar time_created: Specifies the time at which the Virtual Machine resource was
created.:code:`<br>`:code:`<br>`Minimum api-version: 2022-03-01.
:vartype time_created: ~datetime.datetime
"""
_validation = {
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
'vm_id': {'readonly': True},
'time_created': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'plan': {'key': 'plan', 'type': 'Plan'},
'identity': {'key': 'identity', 'type': 'VirtualMachineIdentity'},
'zones': {'key': 'zones', 'type': '[str]'},
'hardware_profile': {'key': 'properties.hardwareProfile', 'type': 'HardwareProfile'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'StorageProfile'},
'additional_capabilities': {'key': 'properties.additionalCapabilities', 'type': 'AdditionalCapabilities'},
'os_profile': {'key': 'properties.osProfile', 'type': 'OSProfile'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'NetworkProfile'},
'security_profile': {'key': 'properties.securityProfile', 'type': 'SecurityProfile'},
'diagnostics_profile': {'key': 'properties.diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'availability_set': {'key': 'properties.availabilitySet', 'type': 'SubResource'},
'virtual_machine_scale_set': {'key': 'properties.virtualMachineScaleSet', 'type': 'SubResource'},
'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'SubResource'},
'priority': {'key': 'properties.priority', 'type': 'str'},
'eviction_policy': {'key': 'properties.evictionPolicy', 'type': 'str'},
'billing_profile': {'key': 'properties.billingProfile', 'type': 'BillingProfile'},
'host': {'key': 'properties.host', 'type': 'SubResource'},
'host_group': {'key': 'properties.hostGroup', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineInstanceView'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'vm_id': {'key': 'properties.vmId', 'type': 'str'},
'extensions_time_budget': {'key': 'properties.extensionsTimeBudget', 'type': 'str'},
'platform_fault_domain': {'key': 'properties.platformFaultDomain', 'type': 'int'},
'scheduled_events_profile': {'key': 'properties.scheduledEventsProfile', 'type': 'ScheduledEventsProfile'},
'user_data': {'key': 'properties.userData', 'type': 'str'},
'capacity_reservation': {'key': 'properties.capacityReservation', 'type': 'CapacityReservationProfile'},
'application_profile': {'key': 'properties.applicationProfile', 'type': 'ApplicationProfile'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
plan: Optional["Plan"] = None,
identity: Optional["VirtualMachineIdentity"] = None,
zones: Optional[List[str]] = None,
hardware_profile: Optional["HardwareProfile"] = None,
storage_profile: Optional["StorageProfile"] = None,
additional_capabilities: Optional["AdditionalCapabilities"] = None,
os_profile: Optional["OSProfile"] = None,
network_profile: Optional["NetworkProfile"] = None,
security_profile: Optional["SecurityProfile"] = None,
diagnostics_profile: Optional["DiagnosticsProfile"] = None,
availability_set: Optional["SubResource"] = None,
virtual_machine_scale_set: Optional["SubResource"] = None,
proximity_placement_group: Optional["SubResource"] = None,
priority: Optional[Union[str, "VirtualMachinePriorityTypes"]] = None,
eviction_policy: Optional[Union[str, "VirtualMachineEvictionPolicyTypes"]] = None,
billing_profile: Optional["BillingProfile"] = None,
host: Optional["SubResource"] = None,
host_group: Optional["SubResource"] = None,
license_type: Optional[str] = None,
extensions_time_budget: Optional[str] = None,
platform_fault_domain: Optional[int] = None,
scheduled_events_profile: Optional["ScheduledEventsProfile"] = None,
user_data: Optional[str] = None,
capacity_reservation: Optional["CapacityReservationProfile"] = None,
application_profile: Optional["ApplicationProfile"] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword plan: Specifies information about the marketplace image used to create the virtual
machine. This element is only used for marketplace images. Before you can use a marketplace
image from an API, you must enable the image for programmatic use. In the Azure portal, find
the marketplace image that you want to use and then click **Want to deploy programmatically,
Get Started ->**. Enter any required information and then click **Save**.
:paramtype plan: ~azure.mgmt.compute.v2022_03_01.models.Plan
:keyword identity: The identity of the virtual machine, if configured.
:paramtype identity: ~azure.mgmt.compute.v2022_03_01.models.VirtualMachineIdentity
:keyword zones: The virtual machine zones.
:paramtype zones: list[str]
:keyword hardware_profile: Specifies the hardware settings for the virtual machine.
:paramtype hardware_profile: ~azure.mgmt.compute.v2022_03_01.models.HardwareProfile
:keyword storage_profile: Specifies the storage settings for the virtual machine disks.
:paramtype storage_profile: ~azure.mgmt.compute.v2022_03_01.models.StorageProfile
:keyword additional_capabilities: Specifies additional capabilities enabled or disabled on the
virtual machine.
:paramtype additional_capabilities:
~azure.mgmt.compute.v2022_03_01.models.AdditionalCapabilities
:keyword os_profile: Specifies the operating system settings used while creating the virtual
machine. Some of the settings cannot be changed once VM is provisioned.
:paramtype os_profile: ~azure.mgmt.compute.v2022_03_01.models.OSProfile
:keyword network_profile: Specifies the network interfaces of the virtual machine.
:paramtype network_profile: ~azure.mgmt.compute.v2022_03_01.models.NetworkProfile
:keyword security_profile: Specifies the Security related profile settings for the virtual
machine.
:paramtype security_profile: ~azure.mgmt.compute.v2022_03_01.models.SecurityProfile
:keyword diagnostics_profile: Specifies the boot diagnostic settings state.
:code:`<br>`:code:`<br>`Minimum api-version: 2015-06-15.
:paramtype diagnostics_profile: ~azure.mgmt.compute.v2022_03_01.models.DiagnosticsProfile
:keyword availability_set: Specifies information about the availability set that the virtual
machine should be assigned to. Virtual machines specified in the same availability set are
allocated to different nodes to maximize availability. For more information about availability
sets, see `Availability sets overview
<https://docs.microsoft.com/azure/virtual-machines/availability-set-overview>`_.
:code:`<br>`:code:`<br>` For more information on Azure planned maintenance, see `Maintenance
and updates for Virtual Machines in Azure
<https://docs.microsoft.com/azure/virtual-machines/maintenance-and-updates>`_
:code:`<br>`:code:`<br>` Currently, a VM can only be added to availability set at creation
time. The availability set to which the VM is being added should be under the same resource
group as the availability set resource. An existing VM cannot be added to an availability set.
:code:`<br>`:code:`<br>`This property cannot exist along with a non-null
properties.virtualMachineScaleSet reference.
:paramtype availability_set: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword virtual_machine_scale_set: Specifies information about the virtual machine scale set
that the virtual machine should be assigned to. Virtual machines specified in the same virtual
machine scale set are allocated to different nodes to maximize availability. Currently, a VM
can only be added to virtual machine scale set at creation time. An existing VM cannot be added
to a virtual machine scale set. :code:`<br>`:code:`<br>`This property cannot exist along with a
non-null properties.availabilitySet reference. :code:`<br>`:code:`<br>`Minimum api‐version:
2019‐03‐01.
:paramtype virtual_machine_scale_set: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword proximity_placement_group: Specifies information about the proximity placement group
that the virtual machine should be assigned to. :code:`<br>`:code:`<br>`Minimum api-version:
2018-04-01.
:paramtype proximity_placement_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword priority: Specifies the priority for the virtual machine.
:code:`<br>`:code:`<br>`Minimum api-version: 2019-03-01. Possible values include: "Regular",
"Low", "Spot".
:paramtype priority: str or ~azure.mgmt.compute.v2022_03_01.models.VirtualMachinePriorityTypes
:keyword eviction_policy: Specifies the eviction policy for the Azure Spot virtual machine and
Azure Spot scale set. :code:`<br>`:code:`<br>`For Azure Spot virtual machines, both
'Deallocate' and 'Delete' are supported and the minimum api-version is 2019-03-01.
:code:`<br>`:code:`<br>`For Azure Spot scale sets, both 'Deallocate' and 'Delete' are supported
and the minimum api-version is 2017-10-30-preview. Possible values include: "Deallocate",
"Delete".
:paramtype eviction_policy: str or
~azure.mgmt.compute.v2022_03_01.models.VirtualMachineEvictionPolicyTypes
:keyword billing_profile: Specifies the billing related details of a Azure Spot virtual
machine. :code:`<br>`:code:`<br>`Minimum api-version: 2019-03-01.
:paramtype billing_profile: ~azure.mgmt.compute.v2022_03_01.models.BillingProfile
:keyword host: Specifies information about the dedicated host that the virtual machine resides
in. :code:`<br>`:code:`<br>`Minimum api-version: 2018-10-01.
:paramtype host: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword host_group: Specifies information about the dedicated host group that the virtual
machine resides in. :code:`<br>`:code:`<br>`Minimum api-version: 2020-06-01.
:code:`<br>`:code:`<br>`NOTE: User cannot specify both host and hostGroup properties.
:paramtype host_group: ~azure.mgmt.compute.v2022_03_01.models.SubResource
:keyword license_type: Specifies that the image or disk that is being used was licensed
on-premises. :code:`<br>`:code:`<br>` Possible values for Windows Server operating system are:
:code:`<br>`:code:`<br>` Windows_Client :code:`<br>`:code:`<br>` Windows_Server
:code:`<br>`:code:`<br>` Possible values for Linux Server operating system are:
:code:`<br>`:code:`<br>` RHEL_BYOS (for RHEL) :code:`<br>`:code:`<br>` SLES_BYOS (for SUSE)
:code:`<br>`:code:`<br>` For more information, see `Azure Hybrid Use Benefit for Windows Server
<https://docs.microsoft.com/azure/virtual-machines/windows/hybrid-use-benefit-licensing>`_
:code:`<br>`:code:`<br>` `Azure Hybrid Use Benefit for Linux Server
<https://docs.microsoft.com/azure/virtual-machines/linux/azure-hybrid-benefit-linux>`_
:code:`<br>`:code:`<br>` Minimum api-version: 2015-06-15.
:paramtype license_type: str
:keyword extensions_time_budget: Specifies the time alloted for all extensions to start. The
time duration should be between 15 minutes and 120 minutes (inclusive) and should be specified
in ISO 8601 format. The default value is 90 minutes (PT1H30M). :code:`<br>`:code:`<br>` Minimum
api-version: 2020-06-01.
:paramtype extensions_time_budget: str
:keyword platform_fault_domain: Specifies the scale set logical fault domain into which the
Virtual Machine will be created. By default, the Virtual Machine will by automatically assigned
to a fault domain that best maintains balance across available fault
domains.:code:`<br>`:code:`<li>`This is applicable only if the 'virtualMachineScaleSet'
property of this Virtual Machine is set.:code:`<li>`The Virtual Machine Scale Set that is
referenced, must have 'platformFaultDomainCount' > 1.:code:`<li>`This property cannot be
updated once the Virtual Machine is created.:code:`<li>`Fault domain assignment can be viewed
in the Virtual Machine Instance View.:code:`<br>`:code:`<br>`Minimum api‐version: 2020‐12‐01.
:paramtype platform_fault_domain: int
:keyword scheduled_events_profile: Specifies Scheduled Event related configurations.
:paramtype scheduled_events_profile:
~azure.mgmt.compute.v2022_03_01.models.ScheduledEventsProfile
:keyword user_data: UserData for the VM, which must be base-64 encoded. Customer should not
pass any secrets in here. :code:`<br>`:code:`<br>`Minimum api-version: 2021-03-01.
:paramtype user_data: str
:keyword capacity_reservation: Specifies information about the capacity reservation that is
used to allocate virtual machine. :code:`<br>`:code:`<br>`Minimum api-version: 2021-04-01.
:paramtype capacity_reservation:
~azure.mgmt.compute.v2022_03_01.models.CapacityReservationProfile
:keyword application_profile: Specifies the gallery applications that should be made available
to the VM/VMSS.
:paramtype application_profile: ~azure.mgmt.compute.v2022_03_01.models.ApplicationProfile
"""
super(VirtualMachineUpdate, self).__init__(tags=tags, **kwargs)
self.plan = plan
self.identity = identity
self.zones = zones
self.hardware_profile = hardware_profile
self.storage_profile = storage_profile
self.additional_capabilities = additional_capabilities
self.os_profile = os_profile
self.network_profile = network_profile
self.security_profile = security_profile
self.diagnostics_profile = diagnostics_profile
self.availability_set = availability_set
self.virtual_machine_scale_set = virtual_machine_scale_set
self.proximity_placement_group = proximity_placement_group
self.priority = priority
self.eviction_policy = eviction_policy
self.billing_profile = billing_profile
self.host = host
self.host_group = host_group
self.provisioning_state = None
self.instance_view = None
self.license_type = license_type
self.vm_id = None
self.extensions_time_budget = extensions_time_budget
self.platform_fault_domain = platform_fault_domain
self.scheduled_events_profile = scheduled_events_profile
self.user_data = user_data
self.capacity_reservation = capacity_reservation
self.application_profile = application_profile
self.time_created = None
class VMDiskSecurityProfile(msrest.serialization.Model):
"""Specifies the security profile settings for the managed disk. :code:`<br>`:code:`<br>` NOTE: It can only be set for Confidential VMs.
:ivar security_encryption_type: Specifies the EncryptionType of the managed disk. :code:`<br>`
It is set to DiskWithVMGuestState for encryption of the managed disk along with VMGuestState
blob, and VMGuestStateOnly for encryption of just the VMGuestState blob.
:code:`<br>`:code:`<br>` NOTE: It can be set for only Confidential VMs. Possible values
include: "VMGuestStateOnly", "DiskWithVMGuestState".
:vartype security_encryption_type: str or
~azure.mgmt.compute.v2022_03_01.models.SecurityEncryptionTypes
:ivar disk_encryption_set: Specifies the customer managed disk encryption set resource id for
the managed disk that is used for Customer Managed Key encrypted ConfidentialVM OS Disk and
VMGuest blob.
:vartype disk_encryption_set:
~azure.mgmt.compute.v2022_03_01.models.DiskEncryptionSetParameters
"""
_attribute_map = {
'security_encryption_type': {'key': 'securityEncryptionType', 'type': 'str'},
'disk_encryption_set': {'key': 'diskEncryptionSet', 'type': 'DiskEncryptionSetParameters'},
}
def __init__(
self,
*,
security_encryption_type: Optional[Union[str, "SecurityEncryptionTypes"]] = None,
disk_encryption_set: Optional["DiskEncryptionSetParameters"] = None,
**kwargs
):
"""
:keyword security_encryption_type: Specifies the EncryptionType of the managed disk.
:code:`<br>` It is set to DiskWithVMGuestState for encryption of the managed disk along with
VMGuestState blob, and VMGuestStateOnly for encryption of just the VMGuestState blob.
:code:`<br>`:code:`<br>` NOTE: It can be set for only Confidential VMs. Possible values
include: "VMGuestStateOnly", "DiskWithVMGuestState".
:paramtype security_encryption_type: str or
~azure.mgmt.compute.v2022_03_01.models.SecurityEncryptionTypes
:keyword disk_encryption_set: Specifies the customer managed disk encryption set resource id
for the managed disk that is used for Customer Managed Key encrypted ConfidentialVM OS Disk and
VMGuest blob.
:paramtype disk_encryption_set:
~azure.mgmt.compute.v2022_03_01.models.DiskEncryptionSetParameters
"""
super(VMDiskSecurityProfile, self).__init__(**kwargs)
self.security_encryption_type = security_encryption_type
self.disk_encryption_set = disk_encryption_set
class VMGalleryApplication(msrest.serialization.Model):
"""Specifies the required information to reference a compute gallery application version.
All required parameters must be populated in order to send to Azure.
:ivar tags: A set of tags. Optional, Specifies a passthrough value for more generic context.
:vartype tags: str
:ivar order: Optional, Specifies the order in which the packages have to be installed.
:vartype order: int
:ivar package_reference_id: Required. Specifies the GalleryApplicationVersion resource id on
the form of
/subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{application}/versions/{version}.
:vartype package_reference_id: str
:ivar configuration_reference: Optional, Specifies the uri to an azure blob that will replace
the default configuration for the package if provided.
:vartype configuration_reference: str
:ivar treat_failure_as_deployment_failure: Optional, If true, any failure for any operation in
the VmApplication will fail the deployment.
:vartype treat_failure_as_deployment_failure: bool
:ivar enable_automatic_upgrade: If set to true, when a new Gallery Application version is
available in PIR/SIG, it will be automatically updated for the VM/VMSS.
:vartype enable_automatic_upgrade: bool
"""
_validation = {
'package_reference_id': {'required': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'package_reference_id': {'key': 'packageReferenceId', 'type': 'str'},
'configuration_reference': {'key': 'configurationReference', 'type': 'str'},
'treat_failure_as_deployment_failure': {'key': 'treatFailureAsDeploymentFailure', 'type': 'bool'},
'enable_automatic_upgrade': {'key': 'enableAutomaticUpgrade', 'type': 'bool'},
}
def __init__(
self,
*,
package_reference_id: str,
tags: Optional[str] = None,
order: Optional[int] = None,
configuration_reference: Optional[str] = None,
treat_failure_as_deployment_failure: Optional[bool] = None,
enable_automatic_upgrade: Optional[bool] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Optional, Specifies a passthrough value for more generic context.
:paramtype tags: str
:keyword order: Optional, Specifies the order in which the packages have to be installed.
:paramtype order: int
:keyword package_reference_id: Required. Specifies the GalleryApplicationVersion resource id on
the form of
/subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{application}/versions/{version}.
:paramtype package_reference_id: str
:keyword configuration_reference: Optional, Specifies the uri to an azure blob that will
replace the default configuration for the package if provided.
:paramtype configuration_reference: str
:keyword treat_failure_as_deployment_failure: Optional, If true, any failure for any operation
in the VmApplication will fail the deployment.
:paramtype treat_failure_as_deployment_failure: bool
:keyword enable_automatic_upgrade: If set to true, when a new Gallery Application version is
available in PIR/SIG, it will be automatically updated for the VM/VMSS.
:paramtype enable_automatic_upgrade: bool
"""
super(VMGalleryApplication, self).__init__(**kwargs)
self.tags = tags
self.order = order
self.package_reference_id = package_reference_id
self.configuration_reference = configuration_reference
self.treat_failure_as_deployment_failure = treat_failure_as_deployment_failure
self.enable_automatic_upgrade = enable_automatic_upgrade
class VmImagesInEdgeZoneListResult(msrest.serialization.Model):
"""The List VmImages in EdgeZone operation response.
:ivar value: The list of VMImages in EdgeZone.
:vartype value: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineImageResource]
:ivar next_link: The URI to fetch the next page of VMImages in EdgeZone. Call ListNext() with
this URI to fetch the next page of VmImages.
:vartype next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineImageResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["VirtualMachineImageResource"]] = None,
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: The list of VMImages in EdgeZone.
:paramtype value: list[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineImageResource]
:keyword next_link: The URI to fetch the next page of VMImages in EdgeZone. Call ListNext()
with this URI to fetch the next page of VmImages.
:paramtype next_link: str
"""
super(VmImagesInEdgeZoneListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VMScaleSetConvertToSinglePlacementGroupInput(msrest.serialization.Model):
"""VMScaleSetConvertToSinglePlacementGroupInput.
:ivar active_placement_group_id: Id of the placement group in which you want future virtual
machine instances to be placed. To query placement group Id, please use Virtual Machine Scale
Set VMs - Get API. If not provided, the platform will choose one with maximum number of virtual
machine instances.
:vartype active_placement_group_id: str
"""
_attribute_map = {
'active_placement_group_id': {'key': 'activePlacementGroupId', 'type': 'str'},
}
def __init__(
self,
*,
active_placement_group_id: Optional[str] = None,
**kwargs
):
"""
:keyword active_placement_group_id: Id of the placement group in which you want future virtual
machine instances to be placed. To query placement group Id, please use Virtual Machine Scale
Set VMs - Get API. If not provided, the platform will choose one with maximum number of virtual
machine instances.
:paramtype active_placement_group_id: str
"""
super(VMScaleSetConvertToSinglePlacementGroupInput, self).__init__(**kwargs)
self.active_placement_group_id = active_placement_group_id
class VMSizeProperties(msrest.serialization.Model):
"""Specifies VM Size Property settings on the virtual machine.
:ivar v_cpus_available: Specifies the number of vCPUs available for the VM.
:code:`<br>`:code:`<br>` When this property is not specified in the request body the default
behavior is to set it to the value of vCPUs available for that VM size exposed in api response
of `List all available virtual machine sizes in a region
<https://docs.microsoft.com/en-us/rest/api/compute/resource-skus/list>`_ .
:vartype v_cpus_available: int
:ivar v_cpus_per_core: Specifies the vCPU to physical core ratio. :code:`<br>`:code:`<br>` When
this property is not specified in the request body the default behavior is set to the value of
vCPUsPerCore for the VM Size exposed in api response of `List all available virtual machine
sizes in a region <https://docs.microsoft.com/en-us/rest/api/compute/resource-skus/list>`_
:code:`<br>`:code:`<br>` Setting this property to 1 also means that hyper-threading is
disabled.
:vartype v_cpus_per_core: int
"""
_attribute_map = {
'v_cpus_available': {'key': 'vCPUsAvailable', 'type': 'int'},
'v_cpus_per_core': {'key': 'vCPUsPerCore', 'type': 'int'},
}
def __init__(
self,
*,
v_cpus_available: Optional[int] = None,
v_cpus_per_core: Optional[int] = None,
**kwargs
):
"""
:keyword v_cpus_available: Specifies the number of vCPUs available for the VM.
:code:`<br>`:code:`<br>` When this property is not specified in the request body the default
behavior is to set it to the value of vCPUs available for that VM size exposed in api response
of `List all available virtual machine sizes in a region
<https://docs.microsoft.com/en-us/rest/api/compute/resource-skus/list>`_ .
:paramtype v_cpus_available: int
:keyword v_cpus_per_core: Specifies the vCPU to physical core ratio. :code:`<br>`:code:`<br>`
When this property is not specified in the request body the default behavior is set to the
value of vCPUsPerCore for the VM Size exposed in api response of `List all available virtual
machine sizes in a region
<https://docs.microsoft.com/en-us/rest/api/compute/resource-skus/list>`_
:code:`<br>`:code:`<br>` Setting this property to 1 also means that hyper-threading is
disabled.
:paramtype v_cpus_per_core: int
"""
super(VMSizeProperties, self).__init__(**kwargs)
self.v_cpus_available = v_cpus_available
self.v_cpus_per_core = v_cpus_per_core
class WindowsConfiguration(msrest.serialization.Model):
"""Specifies Windows operating system settings on the virtual machine.
:ivar provision_vm_agent: Indicates whether virtual machine agent should be provisioned on the
virtual machine. :code:`<br>`:code:`<br>` When this property is not specified in the request
body, default behavior is to set it to true. This will ensure that VM Agent is installed on
the VM so that extensions can be added to the VM later.
:vartype provision_vm_agent: bool
:ivar enable_automatic_updates: Indicates whether Automatic Updates is enabled for the Windows
virtual machine. Default value is true. :code:`<br>`:code:`<br>` For virtual machine scale
sets, this property can be updated and updates will take effect on OS reprovisioning.
:vartype enable_automatic_updates: bool
:ivar time_zone: Specifies the time zone of the virtual machine. e.g. "Pacific Standard Time".
:code:`<br>`:code:`<br>` Possible values can be `TimeZoneInfo.Id
<https://docs.microsoft.com/dotnet/api/system.timezoneinfo.id?#System_TimeZoneInfo_Id>`_ value
from time zones returned by `TimeZoneInfo.GetSystemTimeZones
<https://docs.microsoft.com/dotnet/api/system.timezoneinfo.getsystemtimezones>`_.
:vartype time_zone: str
:ivar additional_unattend_content: Specifies additional base-64 encoded XML formatted
information that can be included in the Unattend.xml file, which is used by Windows Setup.
:vartype additional_unattend_content:
list[~azure.mgmt.compute.v2022_03_01.models.AdditionalUnattendContent]
:ivar patch_settings: [Preview Feature] Specifies settings related to VM Guest Patching on
Windows.
:vartype patch_settings: ~azure.mgmt.compute.v2022_03_01.models.PatchSettings
:ivar win_rm: Specifies the Windows Remote Management listeners. This enables remote Windows
PowerShell.
:vartype win_rm: ~azure.mgmt.compute.v2022_03_01.models.WinRMConfiguration
"""
_attribute_map = {
'provision_vm_agent': {'key': 'provisionVMAgent', 'type': 'bool'},
'enable_automatic_updates': {'key': 'enableAutomaticUpdates', 'type': 'bool'},
'time_zone': {'key': 'timeZone', 'type': 'str'},
'additional_unattend_content': {'key': 'additionalUnattendContent', 'type': '[AdditionalUnattendContent]'},
'patch_settings': {'key': 'patchSettings', 'type': 'PatchSettings'},
'win_rm': {'key': 'winRM', 'type': 'WinRMConfiguration'},
}
def __init__(
self,
*,
provision_vm_agent: Optional[bool] = None,
enable_automatic_updates: Optional[bool] = None,
time_zone: Optional[str] = None,
additional_unattend_content: Optional[List["AdditionalUnattendContent"]] = None,
patch_settings: Optional["PatchSettings"] = None,
win_rm: Optional["WinRMConfiguration"] = None,
**kwargs
):
"""
:keyword provision_vm_agent: Indicates whether virtual machine agent should be provisioned on
the virtual machine. :code:`<br>`:code:`<br>` When this property is not specified in the
request body, default behavior is to set it to true. This will ensure that VM Agent is
installed on the VM so that extensions can be added to the VM later.
:paramtype provision_vm_agent: bool
:keyword enable_automatic_updates: Indicates whether Automatic Updates is enabled for the
Windows virtual machine. Default value is true. :code:`<br>`:code:`<br>` For virtual machine
scale sets, this property can be updated and updates will take effect on OS reprovisioning.
:paramtype enable_automatic_updates: bool
:keyword time_zone: Specifies the time zone of the virtual machine. e.g. "Pacific Standard
Time". :code:`<br>`:code:`<br>` Possible values can be `TimeZoneInfo.Id
<https://docs.microsoft.com/dotnet/api/system.timezoneinfo.id?#System_TimeZoneInfo_Id>`_ value
from time zones returned by `TimeZoneInfo.GetSystemTimeZones
<https://docs.microsoft.com/dotnet/api/system.timezoneinfo.getsystemtimezones>`_.
:paramtype time_zone: str
:keyword additional_unattend_content: Specifies additional base-64 encoded XML formatted
information that can be included in the Unattend.xml file, which is used by Windows Setup.
:paramtype additional_unattend_content:
list[~azure.mgmt.compute.v2022_03_01.models.AdditionalUnattendContent]
:keyword patch_settings: [Preview Feature] Specifies settings related to VM Guest Patching on
Windows.
:paramtype patch_settings: ~azure.mgmt.compute.v2022_03_01.models.PatchSettings
:keyword win_rm: Specifies the Windows Remote Management listeners. This enables remote Windows
PowerShell.
:paramtype win_rm: ~azure.mgmt.compute.v2022_03_01.models.WinRMConfiguration
"""
super(WindowsConfiguration, self).__init__(**kwargs)
self.provision_vm_agent = provision_vm_agent
self.enable_automatic_updates = enable_automatic_updates
self.time_zone = time_zone
self.additional_unattend_content = additional_unattend_content
self.patch_settings = patch_settings
self.win_rm = win_rm
class WindowsParameters(msrest.serialization.Model):
"""Input for InstallPatches on a Windows VM, as directly received by the API.
:ivar classifications_to_include: The update classifications to select when installing patches
for Windows.
:vartype classifications_to_include: list[str or
~azure.mgmt.compute.v2022_03_01.models.VMGuestPatchClassificationWindows]
:ivar kb_numbers_to_include: Kbs to include in the patch operation.
:vartype kb_numbers_to_include: list[str]
:ivar kb_numbers_to_exclude: Kbs to exclude in the patch operation.
:vartype kb_numbers_to_exclude: list[str]
:ivar exclude_kbs_requiring_reboot: Filters out Kbs that don't have an
InstallationRebootBehavior of 'NeverReboots' when this is set to true.
:vartype exclude_kbs_requiring_reboot: bool
:ivar max_patch_publish_date: This is used to install patches that were published on or before
this given max published date.
:vartype max_patch_publish_date: ~datetime.datetime
"""
_attribute_map = {
'classifications_to_include': {'key': 'classificationsToInclude', 'type': '[str]'},
'kb_numbers_to_include': {'key': 'kbNumbersToInclude', 'type': '[str]'},
'kb_numbers_to_exclude': {'key': 'kbNumbersToExclude', 'type': '[str]'},
'exclude_kbs_requiring_reboot': {'key': 'excludeKbsRequiringReboot', 'type': 'bool'},
'max_patch_publish_date': {'key': 'maxPatchPublishDate', 'type': 'iso-8601'},
}
def __init__(
self,
*,
classifications_to_include: Optional[List[Union[str, "VMGuestPatchClassificationWindows"]]] = None,
kb_numbers_to_include: Optional[List[str]] = None,
kb_numbers_to_exclude: Optional[List[str]] = None,
exclude_kbs_requiring_reboot: Optional[bool] = None,
max_patch_publish_date: Optional[datetime.datetime] = None,
**kwargs
):
"""
:keyword classifications_to_include: The update classifications to select when installing
patches for Windows.
:paramtype classifications_to_include: list[str or
~azure.mgmt.compute.v2022_03_01.models.VMGuestPatchClassificationWindows]
:keyword kb_numbers_to_include: Kbs to include in the patch operation.
:paramtype kb_numbers_to_include: list[str]
:keyword kb_numbers_to_exclude: Kbs to exclude in the patch operation.
:paramtype kb_numbers_to_exclude: list[str]
:keyword exclude_kbs_requiring_reboot: Filters out Kbs that don't have an
InstallationRebootBehavior of 'NeverReboots' when this is set to true.
:paramtype exclude_kbs_requiring_reboot: bool
:keyword max_patch_publish_date: This is used to install patches that were published on or
before this given max published date.
:paramtype max_patch_publish_date: ~datetime.datetime
"""
super(WindowsParameters, self).__init__(**kwargs)
self.classifications_to_include = classifications_to_include
self.kb_numbers_to_include = kb_numbers_to_include
self.kb_numbers_to_exclude = kb_numbers_to_exclude
self.exclude_kbs_requiring_reboot = exclude_kbs_requiring_reboot
self.max_patch_publish_date = max_patch_publish_date
class WindowsVMGuestPatchAutomaticByPlatformSettings(msrest.serialization.Model):
"""Specifies additional settings to be applied when patch mode AutomaticByPlatform is selected in Windows patch settings.
:ivar reboot_setting: Specifies the reboot setting for all AutomaticByPlatform patch
installation operations. Possible values include: "Unknown", "IfRequired", "Never", "Always".
:vartype reboot_setting: str or
~azure.mgmt.compute.v2022_03_01.models.WindowsVMGuestPatchAutomaticByPlatformRebootSetting
"""
_attribute_map = {
'reboot_setting': {'key': 'rebootSetting', 'type': 'str'},
}
def __init__(
self,
*,
reboot_setting: Optional[Union[str, "WindowsVMGuestPatchAutomaticByPlatformRebootSetting"]] = None,
**kwargs
):
"""
:keyword reboot_setting: Specifies the reboot setting for all AutomaticByPlatform patch
installation operations. Possible values include: "Unknown", "IfRequired", "Never", "Always".
:paramtype reboot_setting: str or
~azure.mgmt.compute.v2022_03_01.models.WindowsVMGuestPatchAutomaticByPlatformRebootSetting
"""
super(WindowsVMGuestPatchAutomaticByPlatformSettings, self).__init__(**kwargs)
self.reboot_setting = reboot_setting
class WinRMConfiguration(msrest.serialization.Model):
"""Describes Windows Remote Management configuration of the VM.
:ivar listeners: The list of Windows Remote Management listeners.
:vartype listeners: list[~azure.mgmt.compute.v2022_03_01.models.WinRMListener]
"""
_attribute_map = {
'listeners': {'key': 'listeners', 'type': '[WinRMListener]'},
}
def __init__(
self,
*,
listeners: Optional[List["WinRMListener"]] = None,
**kwargs
):
"""
:keyword listeners: The list of Windows Remote Management listeners.
:paramtype listeners: list[~azure.mgmt.compute.v2022_03_01.models.WinRMListener]
"""
super(WinRMConfiguration, self).__init__(**kwargs)
self.listeners = listeners
class WinRMListener(msrest.serialization.Model):
"""Describes Protocol and thumbprint of Windows Remote Management listener.
:ivar protocol: Specifies the protocol of WinRM listener. :code:`<br>`:code:`<br>` Possible
values are: :code:`<br>`\ **http** :code:`<br>`:code:`<br>` **https**. Possible values include:
"Http", "Https".
:vartype protocol: str or ~azure.mgmt.compute.v2022_03_01.models.ProtocolTypes
:ivar certificate_url: This is the URL of a certificate that has been uploaded to Key Vault as
a secret. For adding a secret to the Key Vault, see `Add a key or secret to the key vault
<https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add>`_. In this case, your
certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded
in UTF-8: :code:`<br>`:code:`<br>` {:code:`<br>`
"data":":code:`<Base64-encoded-certificate>`",:code:`<br>` "dataType":"pfx",:code:`<br>`
"password":":code:`<pfx-file-password>`":code:`<br>`} :code:`<br>` To install certificates on a
virtual machine it is recommended to use the `Azure Key Vault virtual machine extension for
Linux <https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-linux>`_ or the
`Azure Key Vault virtual machine extension for Windows
<https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-windows>`_.
:vartype certificate_url: str
"""
_attribute_map = {
'protocol': {'key': 'protocol', 'type': 'str'},
'certificate_url': {'key': 'certificateUrl', 'type': 'str'},
}
def __init__(
self,
*,
protocol: Optional[Union[str, "ProtocolTypes"]] = None,
certificate_url: Optional[str] = None,
**kwargs
):
"""
:keyword protocol: Specifies the protocol of WinRM listener. :code:`<br>`:code:`<br>` Possible
values are: :code:`<br>`\ **http** :code:`<br>`:code:`<br>` **https**. Possible values include:
"Http", "Https".
:paramtype protocol: str or ~azure.mgmt.compute.v2022_03_01.models.ProtocolTypes
:keyword certificate_url: This is the URL of a certificate that has been uploaded to Key Vault
as a secret. For adding a secret to the Key Vault, see `Add a key or secret to the key vault
<https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add>`_. In this case, your
certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded
in UTF-8: :code:`<br>`:code:`<br>` {:code:`<br>`
"data":":code:`<Base64-encoded-certificate>`",:code:`<br>` "dataType":"pfx",:code:`<br>`
"password":":code:`<pfx-file-password>`":code:`<br>`} :code:`<br>` To install certificates on a
virtual machine it is recommended to use the `Azure Key Vault virtual machine extension for
Linux <https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-linux>`_ or the
`Azure Key Vault virtual machine extension for Windows
<https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-windows>`_.
:paramtype certificate_url: str
"""
super(WinRMListener, self).__init__(**kwargs)
self.protocol = protocol
self.certificate_url = certificate_url
| 50.570581 | 742 | 0.692241 |
import datetime
from typing import Any, Dict, List, Optional, Union
import msrest.serialization
from ._compute_management_client_enums import *
class AdditionalCapabilities(msrest.serialization.Model):
_attribute_map = {
'ultra_ssd_enabled': {'key': 'ultraSSDEnabled', 'type': 'bool'},
'hibernation_enabled': {'key': 'hibernationEnabled', 'type': 'bool'},
}
def __init__(
self,
*,
ultra_ssd_enabled: Optional[bool] = None,
hibernation_enabled: Optional[bool] = None,
**kwargs
):
super(AdditionalCapabilities, self).__init__(**kwargs)
self.ultra_ssd_enabled = ultra_ssd_enabled
self.hibernation_enabled = hibernation_enabled
class AdditionalUnattendContent(msrest.serialization.Model):
_attribute_map = {
'pass_name': {'key': 'passName', 'type': 'str'},
'component_name': {'key': 'componentName', 'type': 'str'},
'setting_name': {'key': 'settingName', 'type': 'str'},
'content': {'key': 'content', 'type': 'str'},
}
def __init__(
self,
*,
pass_name: Optional[str] = None,
component_name: Optional[str] = None,
setting_name: Optional[Union[str, "SettingNames"]] = None,
content: Optional[str] = None,
**kwargs
):
super(AdditionalUnattendContent, self).__init__(**kwargs)
self.pass_name = pass_name
self.component_name = component_name
self.setting_name = setting_name
self.content = content
class ApiEntityReference(msrest.serialization.Model):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(ApiEntityReference, self).__init__(**kwargs)
self.id = id
class ApiError(msrest.serialization.Model):
_attribute_map = {
'details': {'key': 'details', 'type': '[ApiErrorBase]'},
'innererror': {'key': 'innererror', 'type': 'InnerError'},
'code': {'key': 'code', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
details: Optional[List["ApiErrorBase"]] = None,
innererror: Optional["InnerError"] = None,
code: Optional[str] = None,
target: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(ApiError, self).__init__(**kwargs)
self.details = details
self.innererror = innererror
self.code = code
self.target = target
self.message = message
class ApiErrorBase(msrest.serialization.Model):
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: Optional[str] = None,
target: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(ApiErrorBase, self).__init__(**kwargs)
self.code = code
self.target = target
self.message = message
class ApplicationProfile(msrest.serialization.Model):
_attribute_map = {
'gallery_applications': {'key': 'galleryApplications', 'type': '[VMGalleryApplication]'},
}
def __init__(
self,
*,
gallery_applications: Optional[List["VMGalleryApplication"]] = None,
**kwargs
):
super(ApplicationProfile, self).__init__(**kwargs)
self.gallery_applications = gallery_applications
class AutomaticOSUpgradePolicy(msrest.serialization.Model):
_attribute_map = {
'enable_automatic_os_upgrade': {'key': 'enableAutomaticOSUpgrade', 'type': 'bool'},
'disable_automatic_rollback': {'key': 'disableAutomaticRollback', 'type': 'bool'},
'use_rolling_upgrade_policy': {'key': 'useRollingUpgradePolicy', 'type': 'bool'},
}
def __init__(
self,
*,
enable_automatic_os_upgrade: Optional[bool] = None,
disable_automatic_rollback: Optional[bool] = None,
use_rolling_upgrade_policy: Optional[bool] = None,
**kwargs
):
super(AutomaticOSUpgradePolicy, self).__init__(**kwargs)
self.enable_automatic_os_upgrade = enable_automatic_os_upgrade
self.disable_automatic_rollback = disable_automatic_rollback
self.use_rolling_upgrade_policy = use_rolling_upgrade_policy
class AutomaticOSUpgradeProperties(msrest.serialization.Model):
_validation = {
'automatic_os_upgrade_supported': {'required': True},
}
_attribute_map = {
'automatic_os_upgrade_supported': {'key': 'automaticOSUpgradeSupported', 'type': 'bool'},
}
def __init__(
self,
*,
automatic_os_upgrade_supported: bool,
**kwargs
):
super(AutomaticOSUpgradeProperties, self).__init__(**kwargs)
self.automatic_os_upgrade_supported = automatic_os_upgrade_supported
class AutomaticRepairsPolicy(msrest.serialization.Model):
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'grace_period': {'key': 'gracePeriod', 'type': 'str'},
'repair_action': {'key': 'repairAction', 'type': 'str'},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
grace_period: Optional[str] = None,
repair_action: Optional[Union[str, "RepairAction"]] = None,
**kwargs
):
super(AutomaticRepairsPolicy, self).__init__(**kwargs)
self.enabled = enabled
self.grace_period = grace_period
self.repair_action = repair_action
class Resource(msrest.serialization.Model):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
class AvailabilitySet(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'statuses': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'platform_update_domain_count': {'key': 'properties.platformUpdateDomainCount', 'type': 'int'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'virtual_machines': {'key': 'properties.virtualMachines', 'type': '[SubResource]'},
'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'SubResource'},
'statuses': {'key': 'properties.statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
platform_update_domain_count: Optional[int] = None,
platform_fault_domain_count: Optional[int] = None,
virtual_machines: Optional[List["SubResource"]] = None,
proximity_placement_group: Optional["SubResource"] = None,
**kwargs
):
super(AvailabilitySet, self).__init__(location=location, tags=tags, **kwargs)
self.sku = sku
self.platform_update_domain_count = platform_update_domain_count
self.platform_fault_domain_count = platform_fault_domain_count
self.virtual_machines = virtual_machines
self.proximity_placement_group = proximity_placement_group
self.statuses = None
class AvailabilitySetListResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[AvailabilitySet]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["AvailabilitySet"],
next_link: Optional[str] = None,
**kwargs
):
super(AvailabilitySetListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class UpdateResource(msrest.serialization.Model):
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(UpdateResource, self).__init__(**kwargs)
self.tags = tags
class AvailabilitySetUpdate(UpdateResource):
_validation = {
'statuses': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'platform_update_domain_count': {'key': 'properties.platformUpdateDomainCount', 'type': 'int'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'virtual_machines': {'key': 'properties.virtualMachines', 'type': '[SubResource]'},
'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'SubResource'},
'statuses': {'key': 'properties.statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
platform_update_domain_count: Optional[int] = None,
platform_fault_domain_count: Optional[int] = None,
virtual_machines: Optional[List["SubResource"]] = None,
proximity_placement_group: Optional["SubResource"] = None,
**kwargs
):
super(AvailabilitySetUpdate, self).__init__(tags=tags, **kwargs)
self.sku = sku
self.platform_update_domain_count = platform_update_domain_count
self.platform_fault_domain_count = platform_fault_domain_count
self.virtual_machines = virtual_machines
self.proximity_placement_group = proximity_placement_group
self.statuses = None
class AvailablePatchSummary(msrest.serialization.Model):
_validation = {
'status': {'readonly': True},
'assessment_activity_id': {'readonly': True},
'reboot_pending': {'readonly': True},
'critical_and_security_patch_count': {'readonly': True},
'other_patch_count': {'readonly': True},
'start_time': {'readonly': True},
'last_modified_time': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'assessment_activity_id': {'key': 'assessmentActivityId', 'type': 'str'},
'reboot_pending': {'key': 'rebootPending', 'type': 'bool'},
'critical_and_security_patch_count': {'key': 'criticalAndSecurityPatchCount', 'type': 'int'},
'other_patch_count': {'key': 'otherPatchCount', 'type': 'int'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'last_modified_time': {'key': 'lastModifiedTime', 'type': 'iso-8601'},
'error': {'key': 'error', 'type': 'ApiError'},
}
def __init__(
self,
**kwargs
):
super(AvailablePatchSummary, self).__init__(**kwargs)
self.status = None
self.assessment_activity_id = None
self.reboot_pending = None
self.critical_and_security_patch_count = None
self.other_patch_count = None
self.start_time = None
self.last_modified_time = None
self.error = None
class BillingProfile(msrest.serialization.Model):
_attribute_map = {
'max_price': {'key': 'maxPrice', 'type': 'float'},
}
def __init__(
self,
*,
max_price: Optional[float] = None,
**kwargs
):
super(BillingProfile, self).__init__(**kwargs)
self.max_price = max_price
class BootDiagnostics(msrest.serialization.Model):
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'storage_uri': {'key': 'storageUri', 'type': 'str'},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
storage_uri: Optional[str] = None,
**kwargs
):
super(BootDiagnostics, self).__init__(**kwargs)
self.enabled = enabled
self.storage_uri = storage_uri
class BootDiagnosticsInstanceView(msrest.serialization.Model):
_validation = {
'console_screenshot_blob_uri': {'readonly': True},
'serial_console_log_blob_uri': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'console_screenshot_blob_uri': {'key': 'consoleScreenshotBlobUri', 'type': 'str'},
'serial_console_log_blob_uri': {'key': 'serialConsoleLogBlobUri', 'type': 'str'},
'status': {'key': 'status', 'type': 'InstanceViewStatus'},
}
def __init__(
self,
**kwargs
):
super(BootDiagnosticsInstanceView, self).__init__(**kwargs)
self.console_screenshot_blob_uri = None
self.serial_console_log_blob_uri = None
self.status = None
class CapacityReservation(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'sku': {'required': True},
'reservation_id': {'readonly': True},
'virtual_machines_associated': {'readonly': True},
'provisioning_time': {'readonly': True},
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
'time_created': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'zones': {'key': 'zones', 'type': '[str]'},
'reservation_id': {'key': 'properties.reservationId', 'type': 'str'},
'virtual_machines_associated': {'key': 'properties.virtualMachinesAssociated', 'type': '[SubResourceReadOnly]'},
'provisioning_time': {'key': 'properties.provisioningTime', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'CapacityReservationInstanceView'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
}
def __init__(
self,
*,
location: str,
sku: "Sku",
tags: Optional[Dict[str, str]] = None,
zones: Optional[List[str]] = None,
**kwargs
):
super(CapacityReservation, self).__init__(location=location, tags=tags, **kwargs)
self.sku = sku
self.zones = zones
self.reservation_id = None
self.virtual_machines_associated = None
self.provisioning_time = None
self.provisioning_state = None
self.instance_view = None
self.time_created = None
class CapacityReservationGroup(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'capacity_reservations': {'readonly': True},
'virtual_machines_associated': {'readonly': True},
'instance_view': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'zones': {'key': 'zones', 'type': '[str]'},
'capacity_reservations': {'key': 'properties.capacityReservations', 'type': '[SubResourceReadOnly]'},
'virtual_machines_associated': {'key': 'properties.virtualMachinesAssociated', 'type': '[SubResourceReadOnly]'},
'instance_view': {'key': 'properties.instanceView', 'type': 'CapacityReservationGroupInstanceView'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
zones: Optional[List[str]] = None,
**kwargs
):
super(CapacityReservationGroup, self).__init__(location=location, tags=tags, **kwargs)
self.zones = zones
self.capacity_reservations = None
self.virtual_machines_associated = None
self.instance_view = None
class CapacityReservationGroupInstanceView(msrest.serialization.Model):
_validation = {
'capacity_reservations': {'readonly': True},
}
_attribute_map = {
'capacity_reservations': {'key': 'capacityReservations', 'type': '[CapacityReservationInstanceViewWithName]'},
}
def __init__(
self,
**kwargs
):
super(CapacityReservationGroupInstanceView, self).__init__(**kwargs)
self.capacity_reservations = None
class CapacityReservationGroupListResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[CapacityReservationGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["CapacityReservationGroup"],
next_link: Optional[str] = None,
**kwargs
):
super(CapacityReservationGroupListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class CapacityReservationGroupUpdate(UpdateResource):
_validation = {
'capacity_reservations': {'readonly': True},
'virtual_machines_associated': {'readonly': True},
'instance_view': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'capacity_reservations': {'key': 'properties.capacityReservations', 'type': '[SubResourceReadOnly]'},
'virtual_machines_associated': {'key': 'properties.virtualMachinesAssociated', 'type': '[SubResourceReadOnly]'},
'instance_view': {'key': 'properties.instanceView', 'type': 'CapacityReservationGroupInstanceView'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(CapacityReservationGroupUpdate, self).__init__(tags=tags, **kwargs)
self.capacity_reservations = None
self.virtual_machines_associated = None
self.instance_view = None
class CapacityReservationInstanceView(msrest.serialization.Model):
_attribute_map = {
'utilization_info': {'key': 'utilizationInfo', 'type': 'CapacityReservationUtilization'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
utilization_info: Optional["CapacityReservationUtilization"] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
super(CapacityReservationInstanceView, self).__init__(**kwargs)
self.utilization_info = utilization_info
self.statuses = statuses
class CapacityReservationInstanceViewWithName(CapacityReservationInstanceView):
_validation = {
'name': {'readonly': True},
}
_attribute_map = {
'utilization_info': {'key': 'utilizationInfo', 'type': 'CapacityReservationUtilization'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
utilization_info: Optional["CapacityReservationUtilization"] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
super(CapacityReservationInstanceViewWithName, self).__init__(utilization_info=utilization_info, statuses=statuses, **kwargs)
self.name = None
class CapacityReservationListResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[CapacityReservation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["CapacityReservation"],
next_link: Optional[str] = None,
**kwargs
):
super(CapacityReservationListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class CapacityReservationProfile(msrest.serialization.Model):
_attribute_map = {
'capacity_reservation_group': {'key': 'capacityReservationGroup', 'type': 'SubResource'},
}
def __init__(
self,
*,
capacity_reservation_group: Optional["SubResource"] = None,
**kwargs
):
super(CapacityReservationProfile, self).__init__(**kwargs)
self.capacity_reservation_group = capacity_reservation_group
class CapacityReservationUpdate(UpdateResource):
_validation = {
'reservation_id': {'readonly': True},
'virtual_machines_associated': {'readonly': True},
'provisioning_time': {'readonly': True},
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
'time_created': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'reservation_id': {'key': 'properties.reservationId', 'type': 'str'},
'virtual_machines_associated': {'key': 'properties.virtualMachinesAssociated', 'type': '[SubResourceReadOnly]'},
'provisioning_time': {'key': 'properties.provisioningTime', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'CapacityReservationInstanceView'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
**kwargs
):
super(CapacityReservationUpdate, self).__init__(tags=tags, **kwargs)
self.sku = sku
self.reservation_id = None
self.virtual_machines_associated = None
self.provisioning_time = None
self.provisioning_state = None
self.instance_view = None
self.time_created = None
class CapacityReservationUtilization(msrest.serialization.Model):
_validation = {
'virtual_machines_allocated': {'readonly': True},
}
_attribute_map = {
'virtual_machines_allocated': {'key': 'virtualMachinesAllocated', 'type': '[SubResourceReadOnly]'},
}
def __init__(
self,
**kwargs
):
super(CapacityReservationUtilization, self).__init__(**kwargs)
self.virtual_machines_allocated = None
class ComputeOperationListResult(msrest.serialization.Model):
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ComputeOperationValue]'},
}
def __init__(
self,
**kwargs
):
super(ComputeOperationListResult, self).__init__(**kwargs)
self.value = None
class ComputeOperationValue(msrest.serialization.Model):
_validation = {
'origin': {'readonly': True},
'name': {'readonly': True},
'operation': {'readonly': True},
'resource': {'readonly': True},
'description': {'readonly': True},
'provider': {'readonly': True},
}
_attribute_map = {
'origin': {'key': 'origin', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'operation': {'key': 'display.operation', 'type': 'str'},
'resource': {'key': 'display.resource', 'type': 'str'},
'description': {'key': 'display.description', 'type': 'str'},
'provider': {'key': 'display.provider', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ComputeOperationValue, self).__init__(**kwargs)
self.origin = None
self.name = None
self.operation = None
self.resource = None
self.description = None
self.provider = None
class DataDisk(msrest.serialization.Model):
_validation = {
'lun': {'required': True},
'create_option': {'required': True},
'disk_iops_read_write': {'readonly': True},
'disk_m_bps_read_write': {'readonly': True},
}
_attribute_map = {
'lun': {'key': 'lun', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'vhd': {'key': 'vhd', 'type': 'VirtualHardDisk'},
'image': {'key': 'image', 'type': 'VirtualHardDisk'},
'caching': {'key': 'caching', 'type': 'str'},
'write_accelerator_enabled': {'key': 'writeAcceleratorEnabled', 'type': 'bool'},
'create_option': {'key': 'createOption', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'managed_disk': {'key': 'managedDisk', 'type': 'ManagedDiskParameters'},
'to_be_detached': {'key': 'toBeDetached', 'type': 'bool'},
'disk_iops_read_write': {'key': 'diskIOPSReadWrite', 'type': 'long'},
'disk_m_bps_read_write': {'key': 'diskMBpsReadWrite', 'type': 'long'},
'detach_option': {'key': 'detachOption', 'type': 'str'},
'delete_option': {'key': 'deleteOption', 'type': 'str'},
}
def __init__(
self,
*,
lun: int,
create_option: Union[str, "DiskCreateOptionTypes"],
name: Optional[str] = None,
vhd: Optional["VirtualHardDisk"] = None,
image: Optional["VirtualHardDisk"] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
write_accelerator_enabled: Optional[bool] = None,
disk_size_gb: Optional[int] = None,
managed_disk: Optional["ManagedDiskParameters"] = None,
to_be_detached: Optional[bool] = None,
detach_option: Optional[Union[str, "DiskDetachOptionTypes"]] = None,
delete_option: Optional[Union[str, "DiskDeleteOptionTypes"]] = None,
**kwargs
):
super(DataDisk, self).__init__(**kwargs)
self.lun = lun
self.name = name
self.vhd = vhd
self.image = image
self.caching = caching
self.write_accelerator_enabled = write_accelerator_enabled
self.create_option = create_option
self.disk_size_gb = disk_size_gb
self.managed_disk = managed_disk
self.to_be_detached = to_be_detached
self.disk_iops_read_write = None
self.disk_m_bps_read_write = None
self.detach_option = detach_option
self.delete_option = delete_option
class DataDiskImage(msrest.serialization.Model):
_validation = {
'lun': {'readonly': True},
}
_attribute_map = {
'lun': {'key': 'lun', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(DataDiskImage, self).__init__(**kwargs)
self.lun = None
class DedicatedHost(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'sku': {'required': True},
'platform_fault_domain': {'minimum': 0},
'host_id': {'readonly': True},
'virtual_machines': {'readonly': True},
'provisioning_time': {'readonly': True},
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
'time_created': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'platform_fault_domain': {'key': 'properties.platformFaultDomain', 'type': 'int'},
'auto_replace_on_failure': {'key': 'properties.autoReplaceOnFailure', 'type': 'bool'},
'host_id': {'key': 'properties.hostId', 'type': 'str'},
'virtual_machines': {'key': 'properties.virtualMachines', 'type': '[SubResourceReadOnly]'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'provisioning_time': {'key': 'properties.provisioningTime', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'DedicatedHostInstanceView'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
}
def __init__(
self,
*,
location: str,
sku: "Sku",
tags: Optional[Dict[str, str]] = None,
platform_fault_domain: Optional[int] = None,
auto_replace_on_failure: Optional[bool] = None,
license_type: Optional[Union[str, "DedicatedHostLicenseTypes"]] = None,
**kwargs
):
super(DedicatedHost, self).__init__(location=location, tags=tags, **kwargs)
self.sku = sku
self.platform_fault_domain = platform_fault_domain
self.auto_replace_on_failure = auto_replace_on_failure
self.host_id = None
self.virtual_machines = None
self.license_type = license_type
self.provisioning_time = None
self.provisioning_state = None
self.instance_view = None
self.time_created = None
class DedicatedHostAllocatableVM(msrest.serialization.Model):
_attribute_map = {
'vm_size': {'key': 'vmSize', 'type': 'str'},
'count': {'key': 'count', 'type': 'float'},
}
def __init__(
self,
*,
vm_size: Optional[str] = None,
count: Optional[float] = None,
**kwargs
):
super(DedicatedHostAllocatableVM, self).__init__(**kwargs)
self.vm_size = vm_size
self.count = count
class DedicatedHostAvailableCapacity(msrest.serialization.Model):
_attribute_map = {
'allocatable_v_ms': {'key': 'allocatableVMs', 'type': '[DedicatedHostAllocatableVM]'},
}
def __init__(
self,
*,
allocatable_v_ms: Optional[List["DedicatedHostAllocatableVM"]] = None,
**kwargs
):
super(DedicatedHostAvailableCapacity, self).__init__(**kwargs)
self.allocatable_v_ms = allocatable_v_ms
class DedicatedHostGroup(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'platform_fault_domain_count': {'minimum': 1},
'hosts': {'readonly': True},
'instance_view': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'zones': {'key': 'zones', 'type': '[str]'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'hosts': {'key': 'properties.hosts', 'type': '[SubResourceReadOnly]'},
'instance_view': {'key': 'properties.instanceView', 'type': 'DedicatedHostGroupInstanceView'},
'support_automatic_placement': {'key': 'properties.supportAutomaticPlacement', 'type': 'bool'},
'additional_capabilities': {'key': 'properties.additionalCapabilities', 'type': 'DedicatedHostGroupPropertiesAdditionalCapabilities'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
zones: Optional[List[str]] = None,
platform_fault_domain_count: Optional[int] = None,
support_automatic_placement: Optional[bool] = None,
additional_capabilities: Optional["DedicatedHostGroupPropertiesAdditionalCapabilities"] = None,
**kwargs
):
super(DedicatedHostGroup, self).__init__(location=location, tags=tags, **kwargs)
self.zones = zones
self.platform_fault_domain_count = platform_fault_domain_count
self.hosts = None
self.instance_view = None
self.support_automatic_placement = support_automatic_placement
self.additional_capabilities = additional_capabilities
class DedicatedHostGroupInstanceView(msrest.serialization.Model):
_attribute_map = {
'hosts': {'key': 'hosts', 'type': '[DedicatedHostInstanceViewWithName]'},
}
def __init__(
self,
*,
hosts: Optional[List["DedicatedHostInstanceViewWithName"]] = None,
**kwargs
):
super(DedicatedHostGroupInstanceView, self).__init__(**kwargs)
self.hosts = hosts
class DedicatedHostGroupListResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DedicatedHostGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["DedicatedHostGroup"],
next_link: Optional[str] = None,
**kwargs
):
super(DedicatedHostGroupListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DedicatedHostGroupPropertiesAdditionalCapabilities(msrest.serialization.Model):
_attribute_map = {
'ultra_ssd_enabled': {'key': 'ultraSSDEnabled', 'type': 'bool'},
}
def __init__(
self,
*,
ultra_ssd_enabled: Optional[bool] = None,
**kwargs
):
super(DedicatedHostGroupPropertiesAdditionalCapabilities, self).__init__(**kwargs)
self.ultra_ssd_enabled = ultra_ssd_enabled
class DedicatedHostGroupUpdate(UpdateResource):
_validation = {
'platform_fault_domain_count': {'minimum': 1},
'hosts': {'readonly': True},
'instance_view': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'zones': {'key': 'zones', 'type': '[str]'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'hosts': {'key': 'properties.hosts', 'type': '[SubResourceReadOnly]'},
'instance_view': {'key': 'properties.instanceView', 'type': 'DedicatedHostGroupInstanceView'},
'support_automatic_placement': {'key': 'properties.supportAutomaticPlacement', 'type': 'bool'},
'additional_capabilities': {'key': 'properties.additionalCapabilities', 'type': 'DedicatedHostGroupPropertiesAdditionalCapabilities'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
zones: Optional[List[str]] = None,
platform_fault_domain_count: Optional[int] = None,
support_automatic_placement: Optional[bool] = None,
additional_capabilities: Optional["DedicatedHostGroupPropertiesAdditionalCapabilities"] = None,
**kwargs
):
super(DedicatedHostGroupUpdate, self).__init__(tags=tags, **kwargs)
self.zones = zones
self.platform_fault_domain_count = platform_fault_domain_count
self.hosts = None
self.instance_view = None
self.support_automatic_placement = support_automatic_placement
self.additional_capabilities = additional_capabilities
class DedicatedHostInstanceView(msrest.serialization.Model):
_validation = {
'asset_id': {'readonly': True},
}
_attribute_map = {
'asset_id': {'key': 'assetId', 'type': 'str'},
'available_capacity': {'key': 'availableCapacity', 'type': 'DedicatedHostAvailableCapacity'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
available_capacity: Optional["DedicatedHostAvailableCapacity"] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
super(DedicatedHostInstanceView, self).__init__(**kwargs)
self.asset_id = None
self.available_capacity = available_capacity
self.statuses = statuses
class DedicatedHostInstanceViewWithName(DedicatedHostInstanceView):
_validation = {
'asset_id': {'readonly': True},
'name': {'readonly': True},
}
_attribute_map = {
'asset_id': {'key': 'assetId', 'type': 'str'},
'available_capacity': {'key': 'availableCapacity', 'type': 'DedicatedHostAvailableCapacity'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
available_capacity: Optional["DedicatedHostAvailableCapacity"] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
super(DedicatedHostInstanceViewWithName, self).__init__(available_capacity=available_capacity, statuses=statuses, **kwargs)
self.name = None
class DedicatedHostListResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DedicatedHost]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["DedicatedHost"],
next_link: Optional[str] = None,
**kwargs
):
super(DedicatedHostListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DedicatedHostUpdate(UpdateResource):
_validation = {
'platform_fault_domain': {'minimum': 0},
'host_id': {'readonly': True},
'virtual_machines': {'readonly': True},
'provisioning_time': {'readonly': True},
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
'time_created': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'platform_fault_domain': {'key': 'properties.platformFaultDomain', 'type': 'int'},
'auto_replace_on_failure': {'key': 'properties.autoReplaceOnFailure', 'type': 'bool'},
'host_id': {'key': 'properties.hostId', 'type': 'str'},
'virtual_machines': {'key': 'properties.virtualMachines', 'type': '[SubResourceReadOnly]'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'provisioning_time': {'key': 'properties.provisioningTime', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'DedicatedHostInstanceView'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
platform_fault_domain: Optional[int] = None,
auto_replace_on_failure: Optional[bool] = None,
license_type: Optional[Union[str, "DedicatedHostLicenseTypes"]] = None,
**kwargs
):
super(DedicatedHostUpdate, self).__init__(tags=tags, **kwargs)
self.platform_fault_domain = platform_fault_domain
self.auto_replace_on_failure = auto_replace_on_failure
self.host_id = None
self.virtual_machines = None
self.license_type = license_type
self.provisioning_time = None
self.provisioning_state = None
self.instance_view = None
self.time_created = None
class DiagnosticsProfile(msrest.serialization.Model):
_attribute_map = {
'boot_diagnostics': {'key': 'bootDiagnostics', 'type': 'BootDiagnostics'},
}
def __init__(
self,
*,
boot_diagnostics: Optional["BootDiagnostics"] = None,
**kwargs
):
super(DiagnosticsProfile, self).__init__(**kwargs)
self.boot_diagnostics = boot_diagnostics
class DiffDiskSettings(msrest.serialization.Model):
_attribute_map = {
'option': {'key': 'option', 'type': 'str'},
'placement': {'key': 'placement', 'type': 'str'},
}
def __init__(
self,
*,
option: Optional[Union[str, "DiffDiskOptions"]] = None,
placement: Optional[Union[str, "DiffDiskPlacement"]] = None,
**kwargs
):
super(DiffDiskSettings, self).__init__(**kwargs)
self.option = option
self.placement = placement
class DisallowedConfiguration(msrest.serialization.Model):
_attribute_map = {
'vm_disk_type': {'key': 'vmDiskType', 'type': 'str'},
}
def __init__(
self,
*,
vm_disk_type: Optional[Union[str, "VmDiskTypes"]] = None,
**kwargs
):
super(DisallowedConfiguration, self).__init__(**kwargs)
self.vm_disk_type = vm_disk_type
class SubResource(msrest.serialization.Model):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(SubResource, self).__init__(**kwargs)
self.id = id
class DiskEncryptionSetParameters(SubResource):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(DiskEncryptionSetParameters, self).__init__(id=id, **kwargs)
class DiskEncryptionSettings(msrest.serialization.Model):
_attribute_map = {
'disk_encryption_key': {'key': 'diskEncryptionKey', 'type': 'KeyVaultSecretReference'},
'key_encryption_key': {'key': 'keyEncryptionKey', 'type': 'KeyVaultKeyReference'},
'enabled': {'key': 'enabled', 'type': 'bool'},
}
def __init__(
self,
*,
disk_encryption_key: Optional["KeyVaultSecretReference"] = None,
key_encryption_key: Optional["KeyVaultKeyReference"] = None,
enabled: Optional[bool] = None,
**kwargs
):
super(DiskEncryptionSettings, self).__init__(**kwargs)
self.disk_encryption_key = disk_encryption_key
self.key_encryption_key = key_encryption_key
self.enabled = enabled
class DiskInstanceView(msrest.serialization.Model):
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'encryption_settings': {'key': 'encryptionSettings', 'type': '[DiskEncryptionSettings]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
name: Optional[str] = None,
encryption_settings: Optional[List["DiskEncryptionSettings"]] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
super(DiskInstanceView, self).__init__(**kwargs)
self.name = name
self.encryption_settings = encryption_settings
self.statuses = statuses
class DiskRestorePointInstanceView(msrest.serialization.Model):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'replication_status': {'key': 'replicationStatus', 'type': 'DiskRestorePointReplicationStatus'},
}
def __init__(
self,
*,
id: Optional[str] = None,
replication_status: Optional["DiskRestorePointReplicationStatus"] = None,
**kwargs
):
super(DiskRestorePointInstanceView, self).__init__(**kwargs)
self.id = id
self.replication_status = replication_status
class DiskRestorePointReplicationStatus(msrest.serialization.Model):
_attribute_map = {
'status': {'key': 'status', 'type': 'InstanceViewStatus'},
'completion_percent': {'key': 'completionPercent', 'type': 'int'},
}
def __init__(
self,
*,
status: Optional["InstanceViewStatus"] = None,
completion_percent: Optional[int] = None,
**kwargs
):
super(DiskRestorePointReplicationStatus, self).__init__(**kwargs)
self.status = status
self.completion_percent = completion_percent
class ExtendedLocation(msrest.serialization.Model):
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
type: Optional[Union[str, "ExtendedLocationTypes"]] = None,
**kwargs
):
super(ExtendedLocation, self).__init__(**kwargs)
self.name = name
self.type = type
class HardwareProfile(msrest.serialization.Model):
_attribute_map = {
'vm_size': {'key': 'vmSize', 'type': 'str'},
'vm_size_properties': {'key': 'vmSizeProperties', 'type': 'VMSizeProperties'},
}
def __init__(
self,
*,
vm_size: Optional[Union[str, "VirtualMachineSizeTypes"]] = None,
vm_size_properties: Optional["VMSizeProperties"] = None,
**kwargs
):
super(HardwareProfile, self).__init__(**kwargs)
self.vm_size = vm_size
self.vm_size_properties = vm_size_properties
class Image(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'extended_location': {'key': 'extendedLocation', 'type': 'ExtendedLocation'},
'source_virtual_machine': {'key': 'properties.sourceVirtualMachine', 'type': 'SubResource'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'ImageStorageProfile'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'hyper_v_generation': {'key': 'properties.hyperVGeneration', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
extended_location: Optional["ExtendedLocation"] = None,
source_virtual_machine: Optional["SubResource"] = None,
storage_profile: Optional["ImageStorageProfile"] = None,
hyper_v_generation: Optional[Union[str, "HyperVGenerationTypes"]] = None,
**kwargs
):
super(Image, self).__init__(location=location, tags=tags, **kwargs)
self.extended_location = extended_location
self.source_virtual_machine = source_virtual_machine
self.storage_profile = storage_profile
self.provisioning_state = None
self.hyper_v_generation = hyper_v_generation
class ImageDisk(msrest.serialization.Model):
_attribute_map = {
'snapshot': {'key': 'snapshot', 'type': 'SubResource'},
'managed_disk': {'key': 'managedDisk', 'type': 'SubResource'},
'blob_uri': {'key': 'blobUri', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
'disk_encryption_set': {'key': 'diskEncryptionSet', 'type': 'DiskEncryptionSetParameters'},
}
def __init__(
self,
*,
snapshot: Optional["SubResource"] = None,
managed_disk: Optional["SubResource"] = None,
blob_uri: Optional[str] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
disk_size_gb: Optional[int] = None,
storage_account_type: Optional[Union[str, "StorageAccountTypes"]] = None,
disk_encryption_set: Optional["DiskEncryptionSetParameters"] = None,
**kwargs
):
super(ImageDisk, self).__init__(**kwargs)
self.snapshot = snapshot
self.managed_disk = managed_disk
self.blob_uri = blob_uri
self.caching = caching
self.disk_size_gb = disk_size_gb
self.storage_account_type = storage_account_type
self.disk_encryption_set = disk_encryption_set
class ImageDataDisk(ImageDisk):
_validation = {
'lun': {'required': True},
}
_attribute_map = {
'snapshot': {'key': 'snapshot', 'type': 'SubResource'},
'managed_disk': {'key': 'managedDisk', 'type': 'SubResource'},
'blob_uri': {'key': 'blobUri', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
'disk_encryption_set': {'key': 'diskEncryptionSet', 'type': 'DiskEncryptionSetParameters'},
'lun': {'key': 'lun', 'type': 'int'},
}
def __init__(
self,
*,
lun: int,
snapshot: Optional["SubResource"] = None,
managed_disk: Optional["SubResource"] = None,
blob_uri: Optional[str] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
disk_size_gb: Optional[int] = None,
storage_account_type: Optional[Union[str, "StorageAccountTypes"]] = None,
disk_encryption_set: Optional["DiskEncryptionSetParameters"] = None,
**kwargs
):
super(ImageDataDisk, self).__init__(snapshot=snapshot, managed_disk=managed_disk, blob_uri=blob_uri, caching=caching, disk_size_gb=disk_size_gb, storage_account_type=storage_account_type, disk_encryption_set=disk_encryption_set, **kwargs)
self.lun = lun
class ImageListResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Image]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Image"],
next_link: Optional[str] = None,
**kwargs
):
super(ImageListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ImageOSDisk(ImageDisk):
_validation = {
'os_type': {'required': True},
'os_state': {'required': True},
}
_attribute_map = {
'snapshot': {'key': 'snapshot', 'type': 'SubResource'},
'managed_disk': {'key': 'managedDisk', 'type': 'SubResource'},
'blob_uri': {'key': 'blobUri', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
'disk_encryption_set': {'key': 'diskEncryptionSet', 'type': 'DiskEncryptionSetParameters'},
'os_type': {'key': 'osType', 'type': 'str'},
'os_state': {'key': 'osState', 'type': 'str'},
}
def __init__(
self,
*,
os_type: Union[str, "OperatingSystemTypes"],
os_state: Union[str, "OperatingSystemStateTypes"],
snapshot: Optional["SubResource"] = None,
managed_disk: Optional["SubResource"] = None,
blob_uri: Optional[str] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
disk_size_gb: Optional[int] = None,
storage_account_type: Optional[Union[str, "StorageAccountTypes"]] = None,
disk_encryption_set: Optional["DiskEncryptionSetParameters"] = None,
**kwargs
):
super(ImageOSDisk, self).__init__(snapshot=snapshot, managed_disk=managed_disk, blob_uri=blob_uri, caching=caching, disk_size_gb=disk_size_gb, storage_account_type=storage_account_type, disk_encryption_set=disk_encryption_set, **kwargs)
self.os_type = os_type
self.os_state = os_state
class ImageReference(SubResource):
_validation = {
'exact_version': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'publisher': {'key': 'publisher', 'type': 'str'},
'offer': {'key': 'offer', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'exact_version': {'key': 'exactVersion', 'type': 'str'},
'shared_gallery_image_id': {'key': 'sharedGalleryImageId', 'type': 'str'},
'community_gallery_image_id': {'key': 'communityGalleryImageId', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
publisher: Optional[str] = None,
offer: Optional[str] = None,
sku: Optional[str] = None,
version: Optional[str] = None,
shared_gallery_image_id: Optional[str] = None,
community_gallery_image_id: Optional[str] = None,
**kwargs
):
super(ImageReference, self).__init__(id=id, **kwargs)
self.publisher = publisher
self.offer = offer
self.sku = sku
self.version = version
self.exact_version = None
self.shared_gallery_image_id = shared_gallery_image_id
self.community_gallery_image_id = community_gallery_image_id
class ImageStorageProfile(msrest.serialization.Model):
_attribute_map = {
'os_disk': {'key': 'osDisk', 'type': 'ImageOSDisk'},
'data_disks': {'key': 'dataDisks', 'type': '[ImageDataDisk]'},
'zone_resilient': {'key': 'zoneResilient', 'type': 'bool'},
}
def __init__(
self,
*,
os_disk: Optional["ImageOSDisk"] = None,
data_disks: Optional[List["ImageDataDisk"]] = None,
zone_resilient: Optional[bool] = None,
**kwargs
):
super(ImageStorageProfile, self).__init__(**kwargs)
self.os_disk = os_disk
self.data_disks = data_disks
self.zone_resilient = zone_resilient
class ImageUpdate(UpdateResource):
_validation = {
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'source_virtual_machine': {'key': 'properties.sourceVirtualMachine', 'type': 'SubResource'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'ImageStorageProfile'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'hyper_v_generation': {'key': 'properties.hyperVGeneration', 'type': 'str'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
source_virtual_machine: Optional["SubResource"] = None,
storage_profile: Optional["ImageStorageProfile"] = None,
hyper_v_generation: Optional[Union[str, "HyperVGenerationTypes"]] = None,
**kwargs
):
super(ImageUpdate, self).__init__(tags=tags, **kwargs)
self.source_virtual_machine = source_virtual_machine
self.storage_profile = storage_profile
self.provisioning_state = None
self.hyper_v_generation = hyper_v_generation
class InnerError(msrest.serialization.Model):
_attribute_map = {
'exceptiontype': {'key': 'exceptiontype', 'type': 'str'},
'errordetail': {'key': 'errordetail', 'type': 'str'},
}
def __init__(
self,
*,
exceptiontype: Optional[str] = None,
errordetail: Optional[str] = None,
**kwargs
):
super(InnerError, self).__init__(**kwargs)
self.exceptiontype = exceptiontype
self.errordetail = errordetail
class InstanceViewStatus(msrest.serialization.Model):
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'level': {'key': 'level', 'type': 'str'},
'display_status': {'key': 'displayStatus', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'time': {'key': 'time', 'type': 'iso-8601'},
}
def __init__(
self,
*,
code: Optional[str] = None,
level: Optional[Union[str, "StatusLevelTypes"]] = None,
display_status: Optional[str] = None,
message: Optional[str] = None,
time: Optional[datetime.datetime] = None,
**kwargs
):
super(InstanceViewStatus, self).__init__(**kwargs)
self.code = code
self.level = level
self.display_status = display_status
self.message = message
self.time = time
class KeyVaultKeyReference(msrest.serialization.Model):
_validation = {
'key_url': {'required': True},
'source_vault': {'required': True},
}
_attribute_map = {
'key_url': {'key': 'keyUrl', 'type': 'str'},
'source_vault': {'key': 'sourceVault', 'type': 'SubResource'},
}
def __init__(
self,
*,
key_url: str,
source_vault: "SubResource",
**kwargs
):
super(KeyVaultKeyReference, self).__init__(**kwargs)
self.key_url = key_url
self.source_vault = source_vault
class KeyVaultSecretReference(msrest.serialization.Model):
_validation = {
'secret_url': {'required': True},
'source_vault': {'required': True},
}
_attribute_map = {
'secret_url': {'key': 'secretUrl', 'type': 'str'},
'source_vault': {'key': 'sourceVault', 'type': 'SubResource'},
}
def __init__(
self,
*,
secret_url: str,
source_vault: "SubResource",
**kwargs
):
super(KeyVaultSecretReference, self).__init__(**kwargs)
self.secret_url = secret_url
self.source_vault = source_vault
class LastPatchInstallationSummary(msrest.serialization.Model):
_validation = {
'status': {'readonly': True},
'installation_activity_id': {'readonly': True},
'maintenance_window_exceeded': {'readonly': True},
'not_selected_patch_count': {'readonly': True},
'excluded_patch_count': {'readonly': True},
'pending_patch_count': {'readonly': True},
'installed_patch_count': {'readonly': True},
'failed_patch_count': {'readonly': True},
'start_time': {'readonly': True},
'last_modified_time': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'installation_activity_id': {'key': 'installationActivityId', 'type': 'str'},
'maintenance_window_exceeded': {'key': 'maintenanceWindowExceeded', 'type': 'bool'},
'not_selected_patch_count': {'key': 'notSelectedPatchCount', 'type': 'int'},
'excluded_patch_count': {'key': 'excludedPatchCount', 'type': 'int'},
'pending_patch_count': {'key': 'pendingPatchCount', 'type': 'int'},
'installed_patch_count': {'key': 'installedPatchCount', 'type': 'int'},
'failed_patch_count': {'key': 'failedPatchCount', 'type': 'int'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'last_modified_time': {'key': 'lastModifiedTime', 'type': 'iso-8601'},
'error': {'key': 'error', 'type': 'ApiError'},
}
def __init__(
self,
**kwargs
):
super(LastPatchInstallationSummary, self).__init__(**kwargs)
self.status = None
self.installation_activity_id = None
self.maintenance_window_exceeded = None
self.not_selected_patch_count = None
self.excluded_patch_count = None
self.pending_patch_count = None
self.installed_patch_count = None
self.failed_patch_count = None
self.start_time = None
self.last_modified_time = None
self.error = None
class LinuxConfiguration(msrest.serialization.Model):
_attribute_map = {
'disable_password_authentication': {'key': 'disablePasswordAuthentication', 'type': 'bool'},
'ssh': {'key': 'ssh', 'type': 'SshConfiguration'},
'provision_vm_agent': {'key': 'provisionVMAgent', 'type': 'bool'},
'patch_settings': {'key': 'patchSettings', 'type': 'LinuxPatchSettings'},
}
def __init__(
self,
*,
disable_password_authentication: Optional[bool] = None,
ssh: Optional["SshConfiguration"] = None,
provision_vm_agent: Optional[bool] = None,
patch_settings: Optional["LinuxPatchSettings"] = None,
**kwargs
):
super(LinuxConfiguration, self).__init__(**kwargs)
self.disable_password_authentication = disable_password_authentication
self.ssh = ssh
self.provision_vm_agent = provision_vm_agent
self.patch_settings = patch_settings
class LinuxParameters(msrest.serialization.Model):
_attribute_map = {
'classifications_to_include': {'key': 'classificationsToInclude', 'type': '[str]'},
'package_name_masks_to_include': {'key': 'packageNameMasksToInclude', 'type': '[str]'},
'package_name_masks_to_exclude': {'key': 'packageNameMasksToExclude', 'type': '[str]'},
'maintenance_run_id': {'key': 'maintenanceRunId', 'type': 'str'},
}
def __init__(
self,
*,
classifications_to_include: Optional[List[Union[str, "VMGuestPatchClassificationLinux"]]] = None,
package_name_masks_to_include: Optional[List[str]] = None,
package_name_masks_to_exclude: Optional[List[str]] = None,
maintenance_run_id: Optional[str] = None,
**kwargs
):
super(LinuxParameters, self).__init__(**kwargs)
self.classifications_to_include = classifications_to_include
self.package_name_masks_to_include = package_name_masks_to_include
self.package_name_masks_to_exclude = package_name_masks_to_exclude
self.maintenance_run_id = maintenance_run_id
class LinuxPatchSettings(msrest.serialization.Model):
_attribute_map = {
'patch_mode': {'key': 'patchMode', 'type': 'str'},
'assessment_mode': {'key': 'assessmentMode', 'type': 'str'},
'automatic_by_platform_settings': {'key': 'automaticByPlatformSettings', 'type': 'LinuxVMGuestPatchAutomaticByPlatformSettings'},
}
def __init__(
self,
*,
patch_mode: Optional[Union[str, "LinuxVMGuestPatchMode"]] = None,
assessment_mode: Optional[Union[str, "LinuxPatchAssessmentMode"]] = None,
automatic_by_platform_settings: Optional["LinuxVMGuestPatchAutomaticByPlatformSettings"] = None,
**kwargs
):
super(LinuxPatchSettings, self).__init__(**kwargs)
self.patch_mode = patch_mode
self.assessment_mode = assessment_mode
self.automatic_by_platform_settings = automatic_by_platform_settings
class LinuxVMGuestPatchAutomaticByPlatformSettings(msrest.serialization.Model):
_attribute_map = {
'reboot_setting': {'key': 'rebootSetting', 'type': 'str'},
}
def __init__(
self,
*,
reboot_setting: Optional[Union[str, "LinuxVMGuestPatchAutomaticByPlatformRebootSetting"]] = None,
**kwargs
):
super(LinuxVMGuestPatchAutomaticByPlatformSettings, self).__init__(**kwargs)
self.reboot_setting = reboot_setting
class ListUsagesResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Usage]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Usage"],
next_link: Optional[str] = None,
**kwargs
):
super(ListUsagesResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class LogAnalyticsInputBase(msrest.serialization.Model):
_validation = {
'blob_container_sas_uri': {'required': True},
'from_time': {'required': True},
'to_time': {'required': True},
}
_attribute_map = {
'blob_container_sas_uri': {'key': 'blobContainerSasUri', 'type': 'str'},
'from_time': {'key': 'fromTime', 'type': 'iso-8601'},
'to_time': {'key': 'toTime', 'type': 'iso-8601'},
'group_by_throttle_policy': {'key': 'groupByThrottlePolicy', 'type': 'bool'},
'group_by_operation_name': {'key': 'groupByOperationName', 'type': 'bool'},
'group_by_resource_name': {'key': 'groupByResourceName', 'type': 'bool'},
'group_by_client_application_id': {'key': 'groupByClientApplicationId', 'type': 'bool'},
'group_by_user_agent': {'key': 'groupByUserAgent', 'type': 'bool'},
}
def __init__(
self,
*,
blob_container_sas_uri: str,
from_time: datetime.datetime,
to_time: datetime.datetime,
group_by_throttle_policy: Optional[bool] = None,
group_by_operation_name: Optional[bool] = None,
group_by_resource_name: Optional[bool] = None,
group_by_client_application_id: Optional[bool] = None,
group_by_user_agent: Optional[bool] = None,
**kwargs
):
super(LogAnalyticsInputBase, self).__init__(**kwargs)
self.blob_container_sas_uri = blob_container_sas_uri
self.from_time = from_time
self.to_time = to_time
self.group_by_throttle_policy = group_by_throttle_policy
self.group_by_operation_name = group_by_operation_name
self.group_by_resource_name = group_by_resource_name
self.group_by_client_application_id = group_by_client_application_id
self.group_by_user_agent = group_by_user_agent
class LogAnalyticsOperationResult(msrest.serialization.Model):
_validation = {
'properties': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': 'LogAnalyticsOutput'},
}
def __init__(
self,
**kwargs
):
super(LogAnalyticsOperationResult, self).__init__(**kwargs)
self.properties = None
class LogAnalyticsOutput(msrest.serialization.Model):
_validation = {
'output': {'readonly': True},
}
_attribute_map = {
'output': {'key': 'output', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LogAnalyticsOutput, self).__init__(**kwargs)
self.output = None
class MaintenanceRedeployStatus(msrest.serialization.Model):
_attribute_map = {
'is_customer_initiated_maintenance_allowed': {'key': 'isCustomerInitiatedMaintenanceAllowed', 'type': 'bool'},
'pre_maintenance_window_start_time': {'key': 'preMaintenanceWindowStartTime', 'type': 'iso-8601'},
'pre_maintenance_window_end_time': {'key': 'preMaintenanceWindowEndTime', 'type': 'iso-8601'},
'maintenance_window_start_time': {'key': 'maintenanceWindowStartTime', 'type': 'iso-8601'},
'maintenance_window_end_time': {'key': 'maintenanceWindowEndTime', 'type': 'iso-8601'},
'last_operation_result_code': {'key': 'lastOperationResultCode', 'type': 'str'},
'last_operation_message': {'key': 'lastOperationMessage', 'type': 'str'},
}
def __init__(
self,
*,
is_customer_initiated_maintenance_allowed: Optional[bool] = None,
pre_maintenance_window_start_time: Optional[datetime.datetime] = None,
pre_maintenance_window_end_time: Optional[datetime.datetime] = None,
maintenance_window_start_time: Optional[datetime.datetime] = None,
maintenance_window_end_time: Optional[datetime.datetime] = None,
last_operation_result_code: Optional[Union[str, "MaintenanceOperationResultCodeTypes"]] = None,
last_operation_message: Optional[str] = None,
**kwargs
):
super(MaintenanceRedeployStatus, self).__init__(**kwargs)
self.is_customer_initiated_maintenance_allowed = is_customer_initiated_maintenance_allowed
self.pre_maintenance_window_start_time = pre_maintenance_window_start_time
self.pre_maintenance_window_end_time = pre_maintenance_window_end_time
self.maintenance_window_start_time = maintenance_window_start_time
self.maintenance_window_end_time = maintenance_window_end_time
self.last_operation_result_code = last_operation_result_code
self.last_operation_message = last_operation_message
class ManagedDiskParameters(SubResource):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
'disk_encryption_set': {'key': 'diskEncryptionSet', 'type': 'DiskEncryptionSetParameters'},
'security_profile': {'key': 'securityProfile', 'type': 'VMDiskSecurityProfile'},
}
def __init__(
self,
*,
id: Optional[str] = None,
storage_account_type: Optional[Union[str, "StorageAccountTypes"]] = None,
disk_encryption_set: Optional["DiskEncryptionSetParameters"] = None,
security_profile: Optional["VMDiskSecurityProfile"] = None,
**kwargs
):
super(ManagedDiskParameters, self).__init__(id=id, **kwargs)
self.storage_account_type = storage_account_type
self.disk_encryption_set = disk_encryption_set
self.security_profile = security_profile
class NetworkInterfaceReference(SubResource):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'delete_option': {'key': 'properties.deleteOption', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
primary: Optional[bool] = None,
delete_option: Optional[Union[str, "DeleteOptions"]] = None,
**kwargs
):
super(NetworkInterfaceReference, self).__init__(id=id, **kwargs)
self.primary = primary
self.delete_option = delete_option
class NetworkProfile(msrest.serialization.Model):
_attribute_map = {
'network_interfaces': {'key': 'networkInterfaces', 'type': '[NetworkInterfaceReference]'},
'network_api_version': {'key': 'networkApiVersion', 'type': 'str'},
'network_interface_configurations': {'key': 'networkInterfaceConfigurations', 'type': '[VirtualMachineNetworkInterfaceConfiguration]'},
}
def __init__(
self,
*,
network_interfaces: Optional[List["NetworkInterfaceReference"]] = None,
network_api_version: Optional[Union[str, "NetworkApiVersion"]] = None,
network_interface_configurations: Optional[List["VirtualMachineNetworkInterfaceConfiguration"]] = None,
**kwargs
):
super(NetworkProfile, self).__init__(**kwargs)
self.network_interfaces = network_interfaces
self.network_api_version = network_api_version
self.network_interface_configurations = network_interface_configurations
class OrchestrationServiceStateInput(msrest.serialization.Model):
_validation = {
'service_name': {'required': True},
'action': {'required': True},
}
_attribute_map = {
'service_name': {'key': 'serviceName', 'type': 'str'},
'action': {'key': 'action', 'type': 'str'},
}
def __init__(
self,
*,
service_name: Union[str, "OrchestrationServiceNames"],
action: Union[str, "OrchestrationServiceStateAction"],
**kwargs
):
super(OrchestrationServiceStateInput, self).__init__(**kwargs)
self.service_name = service_name
self.action = action
class OrchestrationServiceSummary(msrest.serialization.Model):
_validation = {
'service_name': {'readonly': True},
'service_state': {'readonly': True},
}
_attribute_map = {
'service_name': {'key': 'serviceName', 'type': 'str'},
'service_state': {'key': 'serviceState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OrchestrationServiceSummary, self).__init__(**kwargs)
self.service_name = None
self.service_state = None
class OSDisk(msrest.serialization.Model):
_validation = {
'create_option': {'required': True},
}
_attribute_map = {
'os_type': {'key': 'osType', 'type': 'str'},
'encryption_settings': {'key': 'encryptionSettings', 'type': 'DiskEncryptionSettings'},
'name': {'key': 'name', 'type': 'str'},
'vhd': {'key': 'vhd', 'type': 'VirtualHardDisk'},
'image': {'key': 'image', 'type': 'VirtualHardDisk'},
'caching': {'key': 'caching', 'type': 'str'},
'write_accelerator_enabled': {'key': 'writeAcceleratorEnabled', 'type': 'bool'},
'diff_disk_settings': {'key': 'diffDiskSettings', 'type': 'DiffDiskSettings'},
'create_option': {'key': 'createOption', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'managed_disk': {'key': 'managedDisk', 'type': 'ManagedDiskParameters'},
'delete_option': {'key': 'deleteOption', 'type': 'str'},
}
def __init__(
self,
*,
create_option: Union[str, "DiskCreateOptionTypes"],
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
encryption_settings: Optional["DiskEncryptionSettings"] = None,
name: Optional[str] = None,
vhd: Optional["VirtualHardDisk"] = None,
image: Optional["VirtualHardDisk"] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
write_accelerator_enabled: Optional[bool] = None,
diff_disk_settings: Optional["DiffDiskSettings"] = None,
disk_size_gb: Optional[int] = None,
managed_disk: Optional["ManagedDiskParameters"] = None,
delete_option: Optional[Union[str, "DiskDeleteOptionTypes"]] = None,
**kwargs
):
super(OSDisk, self).__init__(**kwargs)
self.os_type = os_type
self.encryption_settings = encryption_settings
self.name = name
self.vhd = vhd
self.image = image
self.caching = caching
self.write_accelerator_enabled = write_accelerator_enabled
self.diff_disk_settings = diff_disk_settings
self.create_option = create_option
self.disk_size_gb = disk_size_gb
self.managed_disk = managed_disk
self.delete_option = delete_option
class OSDiskImage(msrest.serialization.Model):
_validation = {
'operating_system': {'required': True},
}
_attribute_map = {
'operating_system': {'key': 'operatingSystem', 'type': 'str'},
}
def __init__(
self,
*,
operating_system: Union[str, "OperatingSystemTypes"],
**kwargs
):
super(OSDiskImage, self).__init__(**kwargs)
self.operating_system = operating_system
class OSProfile(msrest.serialization.Model):
_attribute_map = {
'computer_name': {'key': 'computerName', 'type': 'str'},
'admin_username': {'key': 'adminUsername', 'type': 'str'},
'admin_password': {'key': 'adminPassword', 'type': 'str'},
'custom_data': {'key': 'customData', 'type': 'str'},
'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'},
'linux_configuration': {'key': 'linuxConfiguration', 'type': 'LinuxConfiguration'},
'secrets': {'key': 'secrets', 'type': '[VaultSecretGroup]'},
'allow_extension_operations': {'key': 'allowExtensionOperations', 'type': 'bool'},
'require_guest_provision_signal': {'key': 'requireGuestProvisionSignal', 'type': 'bool'},
}
def __init__(
self,
*,
computer_name: Optional[str] = None,
admin_username: Optional[str] = None,
admin_password: Optional[str] = None,
custom_data: Optional[str] = None,
windows_configuration: Optional["WindowsConfiguration"] = None,
linux_configuration: Optional["LinuxConfiguration"] = None,
secrets: Optional[List["VaultSecretGroup"]] = None,
allow_extension_operations: Optional[bool] = None,
require_guest_provision_signal: Optional[bool] = None,
**kwargs
):
super(OSProfile, self).__init__(**kwargs)
self.computer_name = computer_name
self.admin_username = admin_username
self.admin_password = admin_password
self.custom_data = custom_data
self.windows_configuration = windows_configuration
self.linux_configuration = linux_configuration
self.secrets = secrets
self.allow_extension_operations = allow_extension_operations
self.require_guest_provision_signal = require_guest_provision_signal
class PatchInstallationDetail(msrest.serialization.Model):
_validation = {
'patch_id': {'readonly': True},
'name': {'readonly': True},
'version': {'readonly': True},
'kb_id': {'readonly': True},
'classifications': {'readonly': True},
'installation_state': {'readonly': True},
}
_attribute_map = {
'patch_id': {'key': 'patchId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'kb_id': {'key': 'kbId', 'type': 'str'},
'classifications': {'key': 'classifications', 'type': '[str]'},
'installation_state': {'key': 'installationState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PatchInstallationDetail, self).__init__(**kwargs)
self.patch_id = None
self.name = None
self.version = None
self.kb_id = None
self.classifications = None
self.installation_state = None
class PatchSettings(msrest.serialization.Model):
_attribute_map = {
'patch_mode': {'key': 'patchMode', 'type': 'str'},
'enable_hotpatching': {'key': 'enableHotpatching', 'type': 'bool'},
'assessment_mode': {'key': 'assessmentMode', 'type': 'str'},
'automatic_by_platform_settings': {'key': 'automaticByPlatformSettings', 'type': 'WindowsVMGuestPatchAutomaticByPlatformSettings'},
}
def __init__(
self,
*,
patch_mode: Optional[Union[str, "WindowsVMGuestPatchMode"]] = None,
enable_hotpatching: Optional[bool] = None,
assessment_mode: Optional[Union[str, "WindowsPatchAssessmentMode"]] = None,
automatic_by_platform_settings: Optional["WindowsVMGuestPatchAutomaticByPlatformSettings"] = None,
**kwargs
):
super(PatchSettings, self).__init__(**kwargs)
self.patch_mode = patch_mode
self.enable_hotpatching = enable_hotpatching
self.assessment_mode = assessment_mode
self.automatic_by_platform_settings = automatic_by_platform_settings
class Plan(msrest.serialization.Model):
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'publisher': {'key': 'publisher', 'type': 'str'},
'product': {'key': 'product', 'type': 'str'},
'promotion_code': {'key': 'promotionCode', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
publisher: Optional[str] = None,
product: Optional[str] = None,
promotion_code: Optional[str] = None,
**kwargs
):
super(Plan, self).__init__(**kwargs)
self.name = name
self.publisher = publisher
self.product = product
self.promotion_code = promotion_code
class ProximityPlacementGroup(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'virtual_machines': {'readonly': True},
'virtual_machine_scale_sets': {'readonly': True},
'availability_sets': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'zones': {'key': 'zones', 'type': '[str]'},
'proximity_placement_group_type': {'key': 'properties.proximityPlacementGroupType', 'type': 'str'},
'virtual_machines': {'key': 'properties.virtualMachines', 'type': '[SubResourceWithColocationStatus]'},
'virtual_machine_scale_sets': {'key': 'properties.virtualMachineScaleSets', 'type': '[SubResourceWithColocationStatus]'},
'availability_sets': {'key': 'properties.availabilitySets', 'type': '[SubResourceWithColocationStatus]'},
'colocation_status': {'key': 'properties.colocationStatus', 'type': 'InstanceViewStatus'},
'intent': {'key': 'properties.intent', 'type': 'ProximityPlacementGroupPropertiesIntent'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
zones: Optional[List[str]] = None,
proximity_placement_group_type: Optional[Union[str, "ProximityPlacementGroupType"]] = None,
colocation_status: Optional["InstanceViewStatus"] = None,
intent: Optional["ProximityPlacementGroupPropertiesIntent"] = None,
**kwargs
):
super(ProximityPlacementGroup, self).__init__(location=location, tags=tags, **kwargs)
self.zones = zones
self.proximity_placement_group_type = proximity_placement_group_type
self.virtual_machines = None
self.virtual_machine_scale_sets = None
self.availability_sets = None
self.colocation_status = colocation_status
self.intent = intent
class ProximityPlacementGroupListResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ProximityPlacementGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["ProximityPlacementGroup"],
next_link: Optional[str] = None,
**kwargs
):
super(ProximityPlacementGroupListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ProximityPlacementGroupPropertiesIntent(msrest.serialization.Model):
_attribute_map = {
'vm_sizes': {'key': 'vmSizes', 'type': '[str]'},
}
def __init__(
self,
*,
vm_sizes: Optional[List[str]] = None,
**kwargs
):
super(ProximityPlacementGroupPropertiesIntent, self).__init__(**kwargs)
self.vm_sizes = vm_sizes
class ProximityPlacementGroupUpdate(UpdateResource):
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(ProximityPlacementGroupUpdate, self).__init__(tags=tags, **kwargs)
class ProxyResource(msrest.serialization.Model):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ProxyResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class PublicIPAddressSku(msrest.serialization.Model):
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[Union[str, "PublicIPAddressSkuName"]] = None,
tier: Optional[Union[str, "PublicIPAddressSkuTier"]] = None,
**kwargs
):
super(PublicIPAddressSku, self).__init__(**kwargs)
self.name = name
self.tier = tier
class PurchasePlan(msrest.serialization.Model):
_validation = {
'publisher': {'required': True},
'name': {'required': True},
'product': {'required': True},
}
_attribute_map = {
'publisher': {'key': 'publisher', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'product': {'key': 'product', 'type': 'str'},
}
def __init__(
self,
*,
publisher: str,
name: str,
product: str,
**kwargs
):
super(PurchasePlan, self).__init__(**kwargs)
self.publisher = publisher
self.name = name
self.product = product
class RecoveryWalkResponse(msrest.serialization.Model):
_validation = {
'walk_performed': {'readonly': True},
'next_platform_update_domain': {'readonly': True},
}
_attribute_map = {
'walk_performed': {'key': 'walkPerformed', 'type': 'bool'},
'next_platform_update_domain': {'key': 'nextPlatformUpdateDomain', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(RecoveryWalkResponse, self).__init__(**kwargs)
self.walk_performed = None
self.next_platform_update_domain = None
class RequestRateByIntervalInput(LogAnalyticsInputBase):
_validation = {
'blob_container_sas_uri': {'required': True},
'from_time': {'required': True},
'to_time': {'required': True},
'interval_length': {'required': True},
}
_attribute_map = {
'blob_container_sas_uri': {'key': 'blobContainerSasUri', 'type': 'str'},
'from_time': {'key': 'fromTime', 'type': 'iso-8601'},
'to_time': {'key': 'toTime', 'type': 'iso-8601'},
'group_by_throttle_policy': {'key': 'groupByThrottlePolicy', 'type': 'bool'},
'group_by_operation_name': {'key': 'groupByOperationName', 'type': 'bool'},
'group_by_resource_name': {'key': 'groupByResourceName', 'type': 'bool'},
'group_by_client_application_id': {'key': 'groupByClientApplicationId', 'type': 'bool'},
'group_by_user_agent': {'key': 'groupByUserAgent', 'type': 'bool'},
'interval_length': {'key': 'intervalLength', 'type': 'str'},
}
def __init__(
self,
*,
blob_container_sas_uri: str,
from_time: datetime.datetime,
to_time: datetime.datetime,
interval_length: Union[str, "IntervalInMins"],
group_by_throttle_policy: Optional[bool] = None,
group_by_operation_name: Optional[bool] = None,
group_by_resource_name: Optional[bool] = None,
group_by_client_application_id: Optional[bool] = None,
group_by_user_agent: Optional[bool] = None,
**kwargs
):
super(RequestRateByIntervalInput, self).__init__(blob_container_sas_uri=blob_container_sas_uri, from_time=from_time, to_time=to_time, group_by_throttle_policy=group_by_throttle_policy, group_by_operation_name=group_by_operation_name, group_by_resource_name=group_by_resource_name, group_by_client_application_id=group_by_client_application_id, group_by_user_agent=group_by_user_agent, **kwargs)
self.interval_length = interval_length
class ResourceWithOptionalLocation(msrest.serialization.Model):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(ResourceWithOptionalLocation, self).__init__(**kwargs)
self.location = location
self.id = None
self.name = None
self.type = None
self.tags = tags
class RestorePoint(ProxyResource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'source_metadata': {'readonly': True},
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'exclude_disks': {'key': 'properties.excludeDisks', 'type': '[ApiEntityReference]'},
'source_metadata': {'key': 'properties.sourceMetadata', 'type': 'RestorePointSourceMetadata'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'consistency_mode': {'key': 'properties.consistencyMode', 'type': 'str'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
'source_restore_point': {'key': 'properties.sourceRestorePoint', 'type': 'ApiEntityReference'},
'instance_view': {'key': 'properties.instanceView', 'type': 'RestorePointInstanceView'},
}
def __init__(
self,
*,
exclude_disks: Optional[List["ApiEntityReference"]] = None,
consistency_mode: Optional[Union[str, "ConsistencyModeTypes"]] = None,
time_created: Optional[datetime.datetime] = None,
source_restore_point: Optional["ApiEntityReference"] = None,
**kwargs
):
super(RestorePoint, self).__init__(**kwargs)
self.exclude_disks = exclude_disks
self.source_metadata = None
self.provisioning_state = None
self.consistency_mode = consistency_mode
self.time_created = time_created
self.source_restore_point = source_restore_point
self.instance_view = None
class RestorePointCollection(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'restore_point_collection_id': {'readonly': True},
'restore_points': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'source': {'key': 'properties.source', 'type': 'RestorePointCollectionSourceProperties'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'restore_point_collection_id': {'key': 'properties.restorePointCollectionId', 'type': 'str'},
'restore_points': {'key': 'properties.restorePoints', 'type': '[RestorePoint]'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
source: Optional["RestorePointCollectionSourceProperties"] = None,
**kwargs
):
super(RestorePointCollection, self).__init__(location=location, tags=tags, **kwargs)
self.source = source
self.provisioning_state = None
self.restore_point_collection_id = None
self.restore_points = None
class RestorePointCollectionListResult(msrest.serialization.Model):
_attribute_map = {
'value': {'key': 'value', 'type': '[RestorePointCollection]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["RestorePointCollection"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(RestorePointCollectionListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class RestorePointCollectionSourceProperties(msrest.serialization.Model):
_validation = {
'location': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(RestorePointCollectionSourceProperties, self).__init__(**kwargs)
self.location = None
self.id = id
class RestorePointCollectionUpdate(UpdateResource):
_validation = {
'provisioning_state': {'readonly': True},
'restore_point_collection_id': {'readonly': True},
'restore_points': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'source': {'key': 'properties.source', 'type': 'RestorePointCollectionSourceProperties'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'restore_point_collection_id': {'key': 'properties.restorePointCollectionId', 'type': 'str'},
'restore_points': {'key': 'properties.restorePoints', 'type': '[RestorePoint]'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
source: Optional["RestorePointCollectionSourceProperties"] = None,
**kwargs
):
super(RestorePointCollectionUpdate, self).__init__(tags=tags, **kwargs)
self.source = source
self.provisioning_state = None
self.restore_point_collection_id = None
self.restore_points = None
class RestorePointInstanceView(msrest.serialization.Model):
_attribute_map = {
'disk_restore_points': {'key': 'diskRestorePoints', 'type': '[DiskRestorePointInstanceView]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
disk_restore_points: Optional[List["DiskRestorePointInstanceView"]] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
super(RestorePointInstanceView, self).__init__(**kwargs)
self.disk_restore_points = disk_restore_points
self.statuses = statuses
class RestorePointSourceMetadata(msrest.serialization.Model):
_attribute_map = {
'hardware_profile': {'key': 'hardwareProfile', 'type': 'HardwareProfile'},
'storage_profile': {'key': 'storageProfile', 'type': 'RestorePointSourceVMStorageProfile'},
'os_profile': {'key': 'osProfile', 'type': 'OSProfile'},
'diagnostics_profile': {'key': 'diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'license_type': {'key': 'licenseType', 'type': 'str'},
'vm_id': {'key': 'vmId', 'type': 'str'},
'security_profile': {'key': 'securityProfile', 'type': 'SecurityProfile'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
*,
hardware_profile: Optional["HardwareProfile"] = None,
storage_profile: Optional["RestorePointSourceVMStorageProfile"] = None,
os_profile: Optional["OSProfile"] = None,
diagnostics_profile: Optional["DiagnosticsProfile"] = None,
license_type: Optional[str] = None,
vm_id: Optional[str] = None,
security_profile: Optional["SecurityProfile"] = None,
location: Optional[str] = None,
**kwargs
):
super(RestorePointSourceMetadata, self).__init__(**kwargs)
self.hardware_profile = hardware_profile
self.storage_profile = storage_profile
self.os_profile = os_profile
self.diagnostics_profile = diagnostics_profile
self.license_type = license_type
self.vm_id = vm_id
self.security_profile = security_profile
self.location = location
class RestorePointSourceVMDataDisk(msrest.serialization.Model):
_attribute_map = {
'lun': {'key': 'lun', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'managed_disk': {'key': 'managedDisk', 'type': 'ManagedDiskParameters'},
'disk_restore_point': {'key': 'diskRestorePoint', 'type': 'ApiEntityReference'},
}
def __init__(
self,
*,
lun: Optional[int] = None,
name: Optional[str] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
disk_size_gb: Optional[int] = None,
managed_disk: Optional["ManagedDiskParameters"] = None,
disk_restore_point: Optional["ApiEntityReference"] = None,
**kwargs
):
super(RestorePointSourceVMDataDisk, self).__init__(**kwargs)
self.lun = lun
self.name = name
self.caching = caching
self.disk_size_gb = disk_size_gb
self.managed_disk = managed_disk
self.disk_restore_point = disk_restore_point
class RestorePointSourceVMOSDisk(msrest.serialization.Model):
_attribute_map = {
'os_type': {'key': 'osType', 'type': 'str'},
'encryption_settings': {'key': 'encryptionSettings', 'type': 'DiskEncryptionSettings'},
'name': {'key': 'name', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'managed_disk': {'key': 'managedDisk', 'type': 'ManagedDiskParameters'},
'disk_restore_point': {'key': 'diskRestorePoint', 'type': 'ApiEntityReference'},
}
def __init__(
self,
*,
os_type: Optional[Union[str, "OperatingSystemType"]] = None,
encryption_settings: Optional["DiskEncryptionSettings"] = None,
name: Optional[str] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
disk_size_gb: Optional[int] = None,
managed_disk: Optional["ManagedDiskParameters"] = None,
disk_restore_point: Optional["ApiEntityReference"] = None,
**kwargs
):
super(RestorePointSourceVMOSDisk, self).__init__(**kwargs)
self.os_type = os_type
self.encryption_settings = encryption_settings
self.name = name
self.caching = caching
self.disk_size_gb = disk_size_gb
self.managed_disk = managed_disk
self.disk_restore_point = disk_restore_point
class RestorePointSourceVMStorageProfile(msrest.serialization.Model):
_attribute_map = {
'os_disk': {'key': 'osDisk', 'type': 'RestorePointSourceVMOSDisk'},
'data_disks': {'key': 'dataDisks', 'type': '[RestorePointSourceVMDataDisk]'},
}
def __init__(
self,
*,
os_disk: Optional["RestorePointSourceVMOSDisk"] = None,
data_disks: Optional[List["RestorePointSourceVMDataDisk"]] = None,
**kwargs
):
super(RestorePointSourceVMStorageProfile, self).__init__(**kwargs)
self.os_disk = os_disk
self.data_disks = data_disks
class RetrieveBootDiagnosticsDataResult(msrest.serialization.Model):
_validation = {
'console_screenshot_blob_uri': {'readonly': True},
'serial_console_log_blob_uri': {'readonly': True},
}
_attribute_map = {
'console_screenshot_blob_uri': {'key': 'consoleScreenshotBlobUri', 'type': 'str'},
'serial_console_log_blob_uri': {'key': 'serialConsoleLogBlobUri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RetrieveBootDiagnosticsDataResult, self).__init__(**kwargs)
self.console_screenshot_blob_uri = None
self.serial_console_log_blob_uri = None
class RollbackStatusInfo(msrest.serialization.Model):
_validation = {
'successfully_rolledback_instance_count': {'readonly': True},
'failed_rolledback_instance_count': {'readonly': True},
'rollback_error': {'readonly': True},
}
_attribute_map = {
'successfully_rolledback_instance_count': {'key': 'successfullyRolledbackInstanceCount', 'type': 'int'},
'failed_rolledback_instance_count': {'key': 'failedRolledbackInstanceCount', 'type': 'int'},
'rollback_error': {'key': 'rollbackError', 'type': 'ApiError'},
}
def __init__(
self,
**kwargs
):
super(RollbackStatusInfo, self).__init__(**kwargs)
self.successfully_rolledback_instance_count = None
self.failed_rolledback_instance_count = None
self.rollback_error = None
class RollingUpgradePolicy(msrest.serialization.Model):
_validation = {
'max_batch_instance_percent': {'maximum': 100, 'minimum': 5},
'max_unhealthy_instance_percent': {'maximum': 100, 'minimum': 5},
'max_unhealthy_upgraded_instance_percent': {'maximum': 100, 'minimum': 0},
}
_attribute_map = {
'max_batch_instance_percent': {'key': 'maxBatchInstancePercent', 'type': 'int'},
'max_unhealthy_instance_percent': {'key': 'maxUnhealthyInstancePercent', 'type': 'int'},
'max_unhealthy_upgraded_instance_percent': {'key': 'maxUnhealthyUpgradedInstancePercent', 'type': 'int'},
'pause_time_between_batches': {'key': 'pauseTimeBetweenBatches', 'type': 'str'},
'enable_cross_zone_upgrade': {'key': 'enableCrossZoneUpgrade', 'type': 'bool'},
'prioritize_unhealthy_instances': {'key': 'prioritizeUnhealthyInstances', 'type': 'bool'},
}
def __init__(
self,
*,
max_batch_instance_percent: Optional[int] = None,
max_unhealthy_instance_percent: Optional[int] = None,
max_unhealthy_upgraded_instance_percent: Optional[int] = None,
pause_time_between_batches: Optional[str] = None,
enable_cross_zone_upgrade: Optional[bool] = None,
prioritize_unhealthy_instances: Optional[bool] = None,
**kwargs
):
super(RollingUpgradePolicy, self).__init__(**kwargs)
self.max_batch_instance_percent = max_batch_instance_percent
self.max_unhealthy_instance_percent = max_unhealthy_instance_percent
self.max_unhealthy_upgraded_instance_percent = max_unhealthy_upgraded_instance_percent
self.pause_time_between_batches = pause_time_between_batches
self.enable_cross_zone_upgrade = enable_cross_zone_upgrade
self.prioritize_unhealthy_instances = prioritize_unhealthy_instances
class RollingUpgradeProgressInfo(msrest.serialization.Model):
_validation = {
'successful_instance_count': {'readonly': True},
'failed_instance_count': {'readonly': True},
'in_progress_instance_count': {'readonly': True},
'pending_instance_count': {'readonly': True},
}
_attribute_map = {
'successful_instance_count': {'key': 'successfulInstanceCount', 'type': 'int'},
'failed_instance_count': {'key': 'failedInstanceCount', 'type': 'int'},
'in_progress_instance_count': {'key': 'inProgressInstanceCount', 'type': 'int'},
'pending_instance_count': {'key': 'pendingInstanceCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(RollingUpgradeProgressInfo, self).__init__(**kwargs)
self.successful_instance_count = None
self.failed_instance_count = None
self.in_progress_instance_count = None
self.pending_instance_count = None
class RollingUpgradeRunningStatus(msrest.serialization.Model):
_validation = {
'code': {'readonly': True},
'start_time': {'readonly': True},
'last_action': {'readonly': True},
'last_action_time': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'last_action': {'key': 'lastAction', 'type': 'str'},
'last_action_time': {'key': 'lastActionTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(RollingUpgradeRunningStatus, self).__init__(**kwargs)
self.code = None
self.start_time = None
self.last_action = None
self.last_action_time = None
class RollingUpgradeStatusInfo(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'policy': {'readonly': True},
'running_status': {'readonly': True},
'progress': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'policy': {'key': 'properties.policy', 'type': 'RollingUpgradePolicy'},
'running_status': {'key': 'properties.runningStatus', 'type': 'RollingUpgradeRunningStatus'},
'progress': {'key': 'properties.progress', 'type': 'RollingUpgradeProgressInfo'},
'error': {'key': 'properties.error', 'type': 'ApiError'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(RollingUpgradeStatusInfo, self).__init__(location=location, tags=tags, **kwargs)
self.policy = None
self.running_status = None
self.progress = None
self.error = None
class RunCommandDocumentBase(msrest.serialization.Model):
_validation = {
'schema': {'required': True},
'id': {'required': True},
'os_type': {'required': True},
'label': {'required': True},
'description': {'required': True},
}
_attribute_map = {
'schema': {'key': '$schema', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'os_type': {'key': 'osType', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
schema: str,
id: str,
os_type: Union[str, "OperatingSystemTypes"],
label: str,
description: str,
**kwargs
):
super(RunCommandDocumentBase, self).__init__(**kwargs)
self.schema = schema
self.id = id
self.os_type = os_type
self.label = label
self.description = description
class RunCommandDocument(RunCommandDocumentBase):
_validation = {
'schema': {'required': True},
'id': {'required': True},
'os_type': {'required': True},
'label': {'required': True},
'description': {'required': True},
'script': {'required': True},
}
_attribute_map = {
'schema': {'key': '$schema', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'os_type': {'key': 'osType', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'script': {'key': 'script', 'type': '[str]'},
'parameters': {'key': 'parameters', 'type': '[RunCommandParameterDefinition]'},
}
def __init__(
self,
*,
schema: str,
id: str,
os_type: Union[str, "OperatingSystemTypes"],
label: str,
description: str,
script: List[str],
parameters: Optional[List["RunCommandParameterDefinition"]] = None,
**kwargs
):
super(RunCommandDocument, self).__init__(schema=schema, id=id, os_type=os_type, label=label, description=description, **kwargs)
self.script = script
self.parameters = parameters
class RunCommandInput(msrest.serialization.Model):
_validation = {
'command_id': {'required': True},
}
_attribute_map = {
'command_id': {'key': 'commandId', 'type': 'str'},
'script': {'key': 'script', 'type': '[str]'},
'parameters': {'key': 'parameters', 'type': '[RunCommandInputParameter]'},
}
def __init__(
self,
*,
command_id: str,
script: Optional[List[str]] = None,
parameters: Optional[List["RunCommandInputParameter"]] = None,
**kwargs
):
super(RunCommandInput, self).__init__(**kwargs)
self.command_id = command_id
self.script = script
self.parameters = parameters
class RunCommandInputParameter(msrest.serialization.Model):
_validation = {
'name': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
value: str,
**kwargs
):
super(RunCommandInputParameter, self).__init__(**kwargs)
self.name = name
self.value = value
class RunCommandListResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[RunCommandDocumentBase]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["RunCommandDocumentBase"],
next_link: Optional[str] = None,
**kwargs
):
super(RunCommandListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class RunCommandParameterDefinition(msrest.serialization.Model):
_validation = {
'name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'default_value': {'key': 'defaultValue', 'type': 'str'},
'required': {'key': 'required', 'type': 'bool'},
}
def __init__(
self,
*,
name: str,
type: str,
default_value: Optional[str] = None,
required: Optional[bool] = False,
**kwargs
):
super(RunCommandParameterDefinition, self).__init__(**kwargs)
self.name = name
self.type = type
self.default_value = default_value
self.required = required
class RunCommandResult(msrest.serialization.Model):
_attribute_map = {
'value': {'key': 'value', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
value: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
super(RunCommandResult, self).__init__(**kwargs)
self.value = value
class ScaleInPolicy(msrest.serialization.Model):
_attribute_map = {
'rules': {'key': 'rules', 'type': '[str]'},
'force_deletion': {'key': 'forceDeletion', 'type': 'bool'},
}
def __init__(
self,
*,
rules: Optional[List[Union[str, "VirtualMachineScaleSetScaleInRules"]]] = None,
force_deletion: Optional[bool] = None,
**kwargs
):
super(ScaleInPolicy, self).__init__(**kwargs)
self.rules = rules
self.force_deletion = force_deletion
class ScheduledEventsProfile(msrest.serialization.Model):
_attribute_map = {
'terminate_notification_profile': {'key': 'terminateNotificationProfile', 'type': 'TerminateNotificationProfile'},
}
def __init__(
self,
*,
terminate_notification_profile: Optional["TerminateNotificationProfile"] = None,
**kwargs
):
super(ScheduledEventsProfile, self).__init__(**kwargs)
self.terminate_notification_profile = terminate_notification_profile
class SecurityProfile(msrest.serialization.Model):
_attribute_map = {
'uefi_settings': {'key': 'uefiSettings', 'type': 'UefiSettings'},
'encryption_at_host': {'key': 'encryptionAtHost', 'type': 'bool'},
'security_type': {'key': 'securityType', 'type': 'str'},
}
def __init__(
self,
*,
uefi_settings: Optional["UefiSettings"] = None,
encryption_at_host: Optional[bool] = None,
security_type: Optional[Union[str, "SecurityTypes"]] = None,
**kwargs
):
super(SecurityProfile, self).__init__(**kwargs)
self.uefi_settings = uefi_settings
self.encryption_at_host = encryption_at_host
self.security_type = security_type
class Sku(msrest.serialization.Model):
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'long'},
}
def __init__(
self,
*,
name: Optional[str] = None,
tier: Optional[str] = None,
capacity: Optional[int] = None,
**kwargs
):
super(Sku, self).__init__(**kwargs)
self.name = name
self.tier = tier
self.capacity = capacity
class SpotRestorePolicy(msrest.serialization.Model):
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'restore_timeout': {'key': 'restoreTimeout', 'type': 'str'},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
restore_timeout: Optional[str] = None,
**kwargs
):
super(SpotRestorePolicy, self).__init__(**kwargs)
self.enabled = enabled
self.restore_timeout = restore_timeout
class SshConfiguration(msrest.serialization.Model):
_attribute_map = {
'public_keys': {'key': 'publicKeys', 'type': '[SshPublicKey]'},
}
def __init__(
self,
*,
public_keys: Optional[List["SshPublicKey"]] = None,
**kwargs
):
super(SshConfiguration, self).__init__(**kwargs)
self.public_keys = public_keys
class SshPublicKey(msrest.serialization.Model):
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'key_data': {'key': 'keyData', 'type': 'str'},
}
def __init__(
self,
*,
path: Optional[str] = None,
key_data: Optional[str] = None,
**kwargs
):
super(SshPublicKey, self).__init__(**kwargs)
self.path = path
self.key_data = key_data
class SshPublicKeyGenerateKeyPairResult(msrest.serialization.Model):
_validation = {
'private_key': {'required': True},
'public_key': {'required': True},
'id': {'required': True},
}
_attribute_map = {
'private_key': {'key': 'privateKey', 'type': 'str'},
'public_key': {'key': 'publicKey', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
private_key: str,
public_key: str,
id: str,
**kwargs
):
super(SshPublicKeyGenerateKeyPairResult, self).__init__(**kwargs)
self.private_key = private_key
self.public_key = public_key
self.id = id
class SshPublicKeyResource(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'public_key': {'key': 'properties.publicKey', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
public_key: Optional[str] = None,
**kwargs
):
super(SshPublicKeyResource, self).__init__(location=location, tags=tags, **kwargs)
self.public_key = public_key
class SshPublicKeysGroupListResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SshPublicKeyResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["SshPublicKeyResource"],
next_link: Optional[str] = None,
**kwargs
):
super(SshPublicKeysGroupListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class SshPublicKeyUpdateResource(UpdateResource):
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'public_key': {'key': 'properties.publicKey', 'type': 'str'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
public_key: Optional[str] = None,
**kwargs
):
super(SshPublicKeyUpdateResource, self).__init__(tags=tags, **kwargs)
self.public_key = public_key
class StorageProfile(msrest.serialization.Model):
_attribute_map = {
'image_reference': {'key': 'imageReference', 'type': 'ImageReference'},
'os_disk': {'key': 'osDisk', 'type': 'OSDisk'},
'data_disks': {'key': 'dataDisks', 'type': '[DataDisk]'},
}
def __init__(
self,
*,
image_reference: Optional["ImageReference"] = None,
os_disk: Optional["OSDisk"] = None,
data_disks: Optional[List["DataDisk"]] = None,
**kwargs
):
super(StorageProfile, self).__init__(**kwargs)
self.image_reference = image_reference
self.os_disk = os_disk
self.data_disks = data_disks
class SubResourceReadOnly(msrest.serialization.Model):
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SubResourceReadOnly, self).__init__(**kwargs)
self.id = None
class SubResourceWithColocationStatus(SubResource):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'colocation_status': {'key': 'colocationStatus', 'type': 'InstanceViewStatus'},
}
def __init__(
self,
*,
id: Optional[str] = None,
colocation_status: Optional["InstanceViewStatus"] = None,
**kwargs
):
super(SubResourceWithColocationStatus, self).__init__(id=id, **kwargs)
self.colocation_status = colocation_status
class TerminateNotificationProfile(msrest.serialization.Model):
_attribute_map = {
'not_before_timeout': {'key': 'notBeforeTimeout', 'type': 'str'},
'enable': {'key': 'enable', 'type': 'bool'},
}
def __init__(
self,
*,
not_before_timeout: Optional[str] = None,
enable: Optional[bool] = None,
**kwargs
):
super(TerminateNotificationProfile, self).__init__(**kwargs)
self.not_before_timeout = not_before_timeout
self.enable = enable
class ThrottledRequestsInput(LogAnalyticsInputBase):
_validation = {
'blob_container_sas_uri': {'required': True},
'from_time': {'required': True},
'to_time': {'required': True},
}
_attribute_map = {
'blob_container_sas_uri': {'key': 'blobContainerSasUri', 'type': 'str'},
'from_time': {'key': 'fromTime', 'type': 'iso-8601'},
'to_time': {'key': 'toTime', 'type': 'iso-8601'},
'group_by_throttle_policy': {'key': 'groupByThrottlePolicy', 'type': 'bool'},
'group_by_operation_name': {'key': 'groupByOperationName', 'type': 'bool'},
'group_by_resource_name': {'key': 'groupByResourceName', 'type': 'bool'},
'group_by_client_application_id': {'key': 'groupByClientApplicationId', 'type': 'bool'},
'group_by_user_agent': {'key': 'groupByUserAgent', 'type': 'bool'},
}
def __init__(
self,
*,
blob_container_sas_uri: str,
from_time: datetime.datetime,
to_time: datetime.datetime,
group_by_throttle_policy: Optional[bool] = None,
group_by_operation_name: Optional[bool] = None,
group_by_resource_name: Optional[bool] = None,
group_by_client_application_id: Optional[bool] = None,
group_by_user_agent: Optional[bool] = None,
**kwargs
):
super(ThrottledRequestsInput, self).__init__(blob_container_sas_uri=blob_container_sas_uri, from_time=from_time, to_time=to_time, group_by_throttle_policy=group_by_throttle_policy, group_by_operation_name=group_by_operation_name, group_by_resource_name=group_by_resource_name, group_by_client_application_id=group_by_client_application_id, group_by_user_agent=group_by_user_agent, **kwargs)
class UefiSettings(msrest.serialization.Model):
_attribute_map = {
'secure_boot_enabled': {'key': 'secureBootEnabled', 'type': 'bool'},
'v_tpm_enabled': {'key': 'vTpmEnabled', 'type': 'bool'},
}
def __init__(
self,
*,
secure_boot_enabled: Optional[bool] = None,
v_tpm_enabled: Optional[bool] = None,
**kwargs
):
super(UefiSettings, self).__init__(**kwargs)
self.secure_boot_enabled = secure_boot_enabled
self.v_tpm_enabled = v_tpm_enabled
class UpgradeOperationHistoricalStatusInfo(msrest.serialization.Model):
_validation = {
'properties': {'readonly': True},
'type': {'readonly': True},
'location': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': 'UpgradeOperationHistoricalStatusInfoProperties'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UpgradeOperationHistoricalStatusInfo, self).__init__(**kwargs)
self.properties = None
self.type = None
self.location = None
class UpgradeOperationHistoricalStatusInfoProperties(msrest.serialization.Model):
_validation = {
'running_status': {'readonly': True},
'progress': {'readonly': True},
'error': {'readonly': True},
'started_by': {'readonly': True},
'target_image_reference': {'readonly': True},
'rollback_info': {'readonly': True},
}
_attribute_map = {
'running_status': {'key': 'runningStatus', 'type': 'UpgradeOperationHistoryStatus'},
'progress': {'key': 'progress', 'type': 'RollingUpgradeProgressInfo'},
'error': {'key': 'error', 'type': 'ApiError'},
'started_by': {'key': 'startedBy', 'type': 'str'},
'target_image_reference': {'key': 'targetImageReference', 'type': 'ImageReference'},
'rollback_info': {'key': 'rollbackInfo', 'type': 'RollbackStatusInfo'},
}
def __init__(
self,
**kwargs
):
super(UpgradeOperationHistoricalStatusInfoProperties, self).__init__(**kwargs)
self.running_status = None
self.progress = None
self.error = None
self.started_by = None
self.target_image_reference = None
self.rollback_info = None
class UpgradeOperationHistoryStatus(msrest.serialization.Model):
_validation = {
'code': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(UpgradeOperationHistoryStatus, self).__init__(**kwargs)
self.code = None
self.start_time = None
self.end_time = None
class UpgradePolicy(msrest.serialization.Model):
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'rolling_upgrade_policy': {'key': 'rollingUpgradePolicy', 'type': 'RollingUpgradePolicy'},
'automatic_os_upgrade_policy': {'key': 'automaticOSUpgradePolicy', 'type': 'AutomaticOSUpgradePolicy'},
}
def __init__(
self,
*,
mode: Optional[Union[str, "UpgradeMode"]] = None,
rolling_upgrade_policy: Optional["RollingUpgradePolicy"] = None,
automatic_os_upgrade_policy: Optional["AutomaticOSUpgradePolicy"] = None,
**kwargs
):
super(UpgradePolicy, self).__init__(**kwargs)
self.mode = mode
self.rolling_upgrade_policy = rolling_upgrade_policy
self.automatic_os_upgrade_policy = automatic_os_upgrade_policy
class Usage(msrest.serialization.Model):
_validation = {
'unit': {'required': True, 'constant': True},
'current_value': {'required': True},
'limit': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'long'},
'name': {'key': 'name', 'type': 'UsageName'},
}
unit = "Count"
def __init__(
self,
*,
current_value: int,
limit: int,
name: "UsageName",
**kwargs
):
super(Usage, self).__init__(**kwargs)
self.current_value = current_value
self.limit = limit
self.name = name
class UsageName(msrest.serialization.Model):
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[str] = None,
localized_value: Optional[str] = None,
**kwargs
):
super(UsageName, self).__init__(**kwargs)
self.value = value
self.localized_value = localized_value
class UserAssignedIdentitiesValue(msrest.serialization.Model):
_validation = {
'principal_id': {'readonly': True},
'client_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UserAssignedIdentitiesValue, self).__init__(**kwargs)
self.principal_id = None
self.client_id = None
class VaultCertificate(msrest.serialization.Model):
_attribute_map = {
'certificate_url': {'key': 'certificateUrl', 'type': 'str'},
'certificate_store': {'key': 'certificateStore', 'type': 'str'},
}
def __init__(
self,
*,
certificate_url: Optional[str] = None,
certificate_store: Optional[str] = None,
**kwargs
):
super(VaultCertificate, self).__init__(**kwargs)
self.certificate_url = certificate_url
self.certificate_store = certificate_store
class VaultSecretGroup(msrest.serialization.Model):
_attribute_map = {
'source_vault': {'key': 'sourceVault', 'type': 'SubResource'},
'vault_certificates': {'key': 'vaultCertificates', 'type': '[VaultCertificate]'},
}
def __init__(
self,
*,
source_vault: Optional["SubResource"] = None,
vault_certificates: Optional[List["VaultCertificate"]] = None,
**kwargs
):
super(VaultSecretGroup, self).__init__(**kwargs)
self.source_vault = source_vault
self.vault_certificates = vault_certificates
class VirtualHardDisk(msrest.serialization.Model):
_attribute_map = {
'uri': {'key': 'uri', 'type': 'str'},
}
def __init__(
self,
*,
uri: Optional[str] = None,
**kwargs
):
super(VirtualHardDisk, self).__init__(**kwargs)
self.uri = uri
class VirtualMachine(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'resources': {'readonly': True},
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
'vm_id': {'readonly': True},
'time_created': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'plan': {'key': 'plan', 'type': 'Plan'},
'resources': {'key': 'resources', 'type': '[VirtualMachineExtension]'},
'identity': {'key': 'identity', 'type': 'VirtualMachineIdentity'},
'zones': {'key': 'zones', 'type': '[str]'},
'extended_location': {'key': 'extendedLocation', 'type': 'ExtendedLocation'},
'hardware_profile': {'key': 'properties.hardwareProfile', 'type': 'HardwareProfile'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'StorageProfile'},
'additional_capabilities': {'key': 'properties.additionalCapabilities', 'type': 'AdditionalCapabilities'},
'os_profile': {'key': 'properties.osProfile', 'type': 'OSProfile'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'NetworkProfile'},
'security_profile': {'key': 'properties.securityProfile', 'type': 'SecurityProfile'},
'diagnostics_profile': {'key': 'properties.diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'availability_set': {'key': 'properties.availabilitySet', 'type': 'SubResource'},
'virtual_machine_scale_set': {'key': 'properties.virtualMachineScaleSet', 'type': 'SubResource'},
'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'SubResource'},
'priority': {'key': 'properties.priority', 'type': 'str'},
'eviction_policy': {'key': 'properties.evictionPolicy', 'type': 'str'},
'billing_profile': {'key': 'properties.billingProfile', 'type': 'BillingProfile'},
'host': {'key': 'properties.host', 'type': 'SubResource'},
'host_group': {'key': 'properties.hostGroup', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineInstanceView'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'vm_id': {'key': 'properties.vmId', 'type': 'str'},
'extensions_time_budget': {'key': 'properties.extensionsTimeBudget', 'type': 'str'},
'platform_fault_domain': {'key': 'properties.platformFaultDomain', 'type': 'int'},
'scheduled_events_profile': {'key': 'properties.scheduledEventsProfile', 'type': 'ScheduledEventsProfile'},
'user_data': {'key': 'properties.userData', 'type': 'str'},
'capacity_reservation': {'key': 'properties.capacityReservation', 'type': 'CapacityReservationProfile'},
'application_profile': {'key': 'properties.applicationProfile', 'type': 'ApplicationProfile'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
plan: Optional["Plan"] = None,
identity: Optional["VirtualMachineIdentity"] = None,
zones: Optional[List[str]] = None,
extended_location: Optional["ExtendedLocation"] = None,
hardware_profile: Optional["HardwareProfile"] = None,
storage_profile: Optional["StorageProfile"] = None,
additional_capabilities: Optional["AdditionalCapabilities"] = None,
os_profile: Optional["OSProfile"] = None,
network_profile: Optional["NetworkProfile"] = None,
security_profile: Optional["SecurityProfile"] = None,
diagnostics_profile: Optional["DiagnosticsProfile"] = None,
availability_set: Optional["SubResource"] = None,
virtual_machine_scale_set: Optional["SubResource"] = None,
proximity_placement_group: Optional["SubResource"] = None,
priority: Optional[Union[str, "VirtualMachinePriorityTypes"]] = None,
eviction_policy: Optional[Union[str, "VirtualMachineEvictionPolicyTypes"]] = None,
billing_profile: Optional["BillingProfile"] = None,
host: Optional["SubResource"] = None,
host_group: Optional["SubResource"] = None,
license_type: Optional[str] = None,
extensions_time_budget: Optional[str] = None,
platform_fault_domain: Optional[int] = None,
scheduled_events_profile: Optional["ScheduledEventsProfile"] = None,
user_data: Optional[str] = None,
capacity_reservation: Optional["CapacityReservationProfile"] = None,
application_profile: Optional["ApplicationProfile"] = None,
**kwargs
):
super(VirtualMachine, self).__init__(location=location, tags=tags, **kwargs)
self.plan = plan
self.resources = None
self.identity = identity
self.zones = zones
self.extended_location = extended_location
self.hardware_profile = hardware_profile
self.storage_profile = storage_profile
self.additional_capabilities = additional_capabilities
self.os_profile = os_profile
self.network_profile = network_profile
self.security_profile = security_profile
self.diagnostics_profile = diagnostics_profile
self.availability_set = availability_set
self.virtual_machine_scale_set = virtual_machine_scale_set
self.proximity_placement_group = proximity_placement_group
self.priority = priority
self.eviction_policy = eviction_policy
self.billing_profile = billing_profile
self.host = host
self.host_group = host_group
self.provisioning_state = None
self.instance_view = None
self.license_type = license_type
self.vm_id = None
self.extensions_time_budget = extensions_time_budget
self.platform_fault_domain = platform_fault_domain
self.scheduled_events_profile = scheduled_events_profile
self.user_data = user_data
self.capacity_reservation = capacity_reservation
self.application_profile = application_profile
self.time_created = None
class VirtualMachineAgentInstanceView(msrest.serialization.Model):
_attribute_map = {
'vm_agent_version': {'key': 'vmAgentVersion', 'type': 'str'},
'extension_handlers': {'key': 'extensionHandlers', 'type': '[VirtualMachineExtensionHandlerInstanceView]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
vm_agent_version: Optional[str] = None,
extension_handlers: Optional[List["VirtualMachineExtensionHandlerInstanceView"]] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
super(VirtualMachineAgentInstanceView, self).__init__(**kwargs)
self.vm_agent_version = vm_agent_version
self.extension_handlers = extension_handlers
self.statuses = statuses
class VirtualMachineAssessPatchesResult(msrest.serialization.Model):
_validation = {
'status': {'readonly': True},
'assessment_activity_id': {'readonly': True},
'reboot_pending': {'readonly': True},
'critical_and_security_patch_count': {'readonly': True},
'other_patch_count': {'readonly': True},
'start_date_time': {'readonly': True},
'available_patches': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'assessment_activity_id': {'key': 'assessmentActivityId', 'type': 'str'},
'reboot_pending': {'key': 'rebootPending', 'type': 'bool'},
'critical_and_security_patch_count': {'key': 'criticalAndSecurityPatchCount', 'type': 'int'},
'other_patch_count': {'key': 'otherPatchCount', 'type': 'int'},
'start_date_time': {'key': 'startDateTime', 'type': 'iso-8601'},
'available_patches': {'key': 'availablePatches', 'type': '[VirtualMachineSoftwarePatchProperties]'},
'error': {'key': 'error', 'type': 'ApiError'},
}
def __init__(
self,
**kwargs
):
super(VirtualMachineAssessPatchesResult, self).__init__(**kwargs)
self.status = None
self.assessment_activity_id = None
self.reboot_pending = None
self.critical_and_security_patch_count = None
self.other_patch_count = None
self.start_date_time = None
self.available_patches = None
self.error = None
class VirtualMachineCaptureParameters(msrest.serialization.Model):
_validation = {
'vhd_prefix': {'required': True},
'destination_container_name': {'required': True},
'overwrite_vhds': {'required': True},
}
_attribute_map = {
'vhd_prefix': {'key': 'vhdPrefix', 'type': 'str'},
'destination_container_name': {'key': 'destinationContainerName', 'type': 'str'},
'overwrite_vhds': {'key': 'overwriteVhds', 'type': 'bool'},
}
def __init__(
self,
*,
vhd_prefix: str,
destination_container_name: str,
overwrite_vhds: bool,
**kwargs
):
super(VirtualMachineCaptureParameters, self).__init__(**kwargs)
self.vhd_prefix = vhd_prefix
self.destination_container_name = destination_container_name
self.overwrite_vhds = overwrite_vhds
class VirtualMachineCaptureResult(SubResource):
_validation = {
'schema': {'readonly': True},
'content_version': {'readonly': True},
'parameters': {'readonly': True},
'resources': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'schema': {'key': '$schema', 'type': 'str'},
'content_version': {'key': 'contentVersion', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'object'},
'resources': {'key': 'resources', 'type': '[object]'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(VirtualMachineCaptureResult, self).__init__(id=id, **kwargs)
self.schema = None
self.content_version = None
self.parameters = None
self.resources = None
class VirtualMachineExtension(ResourceWithOptionalLocation):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
'publisher': {'key': 'properties.publisher', 'type': 'str'},
'type_properties_type': {'key': 'properties.type', 'type': 'str'},
'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'},
'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'},
'enable_automatic_upgrade': {'key': 'properties.enableAutomaticUpgrade', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': 'object'},
'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineExtensionInstanceView'},
'suppress_failures': {'key': 'properties.suppressFailures', 'type': 'bool'},
'protected_settings_from_key_vault': {'key': 'properties.protectedSettingsFromKeyVault', 'type': 'object'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
force_update_tag: Optional[str] = None,
publisher: Optional[str] = None,
type_properties_type: Optional[str] = None,
type_handler_version: Optional[str] = None,
auto_upgrade_minor_version: Optional[bool] = None,
enable_automatic_upgrade: Optional[bool] = None,
settings: Optional[Any] = None,
protected_settings: Optional[Any] = None,
instance_view: Optional["VirtualMachineExtensionInstanceView"] = None,
suppress_failures: Optional[bool] = None,
protected_settings_from_key_vault: Optional[Any] = None,
**kwargs
):
super(VirtualMachineExtension, self).__init__(location=location, tags=tags, **kwargs)
self.force_update_tag = force_update_tag
self.publisher = publisher
self.type_properties_type = type_properties_type
self.type_handler_version = type_handler_version
self.auto_upgrade_minor_version = auto_upgrade_minor_version
self.enable_automatic_upgrade = enable_automatic_upgrade
self.settings = settings
self.protected_settings = protected_settings
self.provisioning_state = None
self.instance_view = instance_view
self.suppress_failures = suppress_failures
self.protected_settings_from_key_vault = protected_settings_from_key_vault
class VirtualMachineExtensionHandlerInstanceView(msrest.serialization.Model):
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'type_handler_version': {'key': 'typeHandlerVersion', 'type': 'str'},
'status': {'key': 'status', 'type': 'InstanceViewStatus'},
}
def __init__(
self,
*,
type: Optional[str] = None,
type_handler_version: Optional[str] = None,
status: Optional["InstanceViewStatus"] = None,
**kwargs
):
super(VirtualMachineExtensionHandlerInstanceView, self).__init__(**kwargs)
self.type = type
self.type_handler_version = type_handler_version
self.status = status
class VirtualMachineExtensionImage(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'operating_system': {'key': 'properties.operatingSystem', 'type': 'str'},
'compute_role': {'key': 'properties.computeRole', 'type': 'str'},
'handler_schema': {'key': 'properties.handlerSchema', 'type': 'str'},
'vm_scale_set_enabled': {'key': 'properties.vmScaleSetEnabled', 'type': 'bool'},
'supports_multiple_extensions': {'key': 'properties.supportsMultipleExtensions', 'type': 'bool'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
operating_system: Optional[str] = None,
compute_role: Optional[str] = None,
handler_schema: Optional[str] = None,
vm_scale_set_enabled: Optional[bool] = None,
supports_multiple_extensions: Optional[bool] = None,
**kwargs
):
super(VirtualMachineExtensionImage, self).__init__(location=location, tags=tags, **kwargs)
self.operating_system = operating_system
self.compute_role = compute_role
self.handler_schema = handler_schema
self.vm_scale_set_enabled = vm_scale_set_enabled
self.supports_multiple_extensions = supports_multiple_extensions
class VirtualMachineExtensionInstanceView(msrest.serialization.Model):
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'type_handler_version': {'key': 'typeHandlerVersion', 'type': 'str'},
'substatuses': {'key': 'substatuses', 'type': '[InstanceViewStatus]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
name: Optional[str] = None,
type: Optional[str] = None,
type_handler_version: Optional[str] = None,
substatuses: Optional[List["InstanceViewStatus"]] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
super(VirtualMachineExtensionInstanceView, self).__init__(**kwargs)
self.name = name
self.type = type
self.type_handler_version = type_handler_version
self.substatuses = substatuses
self.statuses = statuses
class VirtualMachineExtensionsListResult(msrest.serialization.Model):
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineExtension]'},
}
def __init__(
self,
*,
value: Optional[List["VirtualMachineExtension"]] = None,
**kwargs
):
super(VirtualMachineExtensionsListResult, self).__init__(**kwargs)
self.value = value
class VirtualMachineExtensionUpdate(UpdateResource):
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
'publisher': {'key': 'properties.publisher', 'type': 'str'},
'type': {'key': 'properties.type', 'type': 'str'},
'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'},
'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'},
'enable_automatic_upgrade': {'key': 'properties.enableAutomaticUpgrade', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': 'object'},
'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'},
'suppress_failures': {'key': 'properties.suppressFailures', 'type': 'bool'},
'protected_settings_from_key_vault': {'key': 'properties.protectedSettingsFromKeyVault', 'type': 'object'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
force_update_tag: Optional[str] = None,
publisher: Optional[str] = None,
type: Optional[str] = None,
type_handler_version: Optional[str] = None,
auto_upgrade_minor_version: Optional[bool] = None,
enable_automatic_upgrade: Optional[bool] = None,
settings: Optional[Any] = None,
protected_settings: Optional[Any] = None,
suppress_failures: Optional[bool] = None,
protected_settings_from_key_vault: Optional[Any] = None,
**kwargs
):
super(VirtualMachineExtensionUpdate, self).__init__(tags=tags, **kwargs)
self.force_update_tag = force_update_tag
self.publisher = publisher
self.type = type
self.type_handler_version = type_handler_version
self.auto_upgrade_minor_version = auto_upgrade_minor_version
self.enable_automatic_upgrade = enable_automatic_upgrade
self.settings = settings
self.protected_settings = protected_settings
self.suppress_failures = suppress_failures
self.protected_settings_from_key_vault = protected_settings_from_key_vault
class VirtualMachineHealthStatus(msrest.serialization.Model):
_validation = {
'status': {'readonly': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'InstanceViewStatus'},
}
def __init__(
self,
**kwargs
):
super(VirtualMachineHealthStatus, self).__init__(**kwargs)
self.status = None
class VirtualMachineIdentity(msrest.serialization.Model):
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedIdentitiesValue}'},
}
def __init__(
self,
*,
type: Optional[Union[str, "ResourceIdentityType"]] = None,
user_assigned_identities: Optional[Dict[str, "UserAssignedIdentitiesValue"]] = None,
**kwargs
):
super(VirtualMachineIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
self.user_assigned_identities = user_assigned_identities
class VirtualMachineImageResource(SubResource):
_validation = {
'name': {'required': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'extended_location': {'key': 'extendedLocation', 'type': 'ExtendedLocation'},
}
def __init__(
self,
*,
name: str,
location: str,
id: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
extended_location: Optional["ExtendedLocation"] = None,
**kwargs
):
super(VirtualMachineImageResource, self).__init__(id=id, **kwargs)
self.name = name
self.location = location
self.tags = tags
self.extended_location = extended_location
class VirtualMachineImage(VirtualMachineImageResource):
_validation = {
'name': {'required': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'extended_location': {'key': 'extendedLocation', 'type': 'ExtendedLocation'},
'plan': {'key': 'properties.plan', 'type': 'PurchasePlan'},
'os_disk_image': {'key': 'properties.osDiskImage', 'type': 'OSDiskImage'},
'data_disk_images': {'key': 'properties.dataDiskImages', 'type': '[DataDiskImage]'},
'automatic_os_upgrade_properties': {'key': 'properties.automaticOSUpgradeProperties', 'type': 'AutomaticOSUpgradeProperties'},
'hyper_v_generation': {'key': 'properties.hyperVGeneration', 'type': 'str'},
'disallowed': {'key': 'properties.disallowed', 'type': 'DisallowedConfiguration'},
'features': {'key': 'properties.features', 'type': '[VirtualMachineImageFeature]'},
'architecture': {'key': 'properties.architecture', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
location: str,
id: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
extended_location: Optional["ExtendedLocation"] = None,
plan: Optional["PurchasePlan"] = None,
os_disk_image: Optional["OSDiskImage"] = None,
data_disk_images: Optional[List["DataDiskImage"]] = None,
automatic_os_upgrade_properties: Optional["AutomaticOSUpgradeProperties"] = None,
hyper_v_generation: Optional[Union[str, "HyperVGenerationTypes"]] = None,
disallowed: Optional["DisallowedConfiguration"] = None,
features: Optional[List["VirtualMachineImageFeature"]] = None,
architecture: Optional[Union[str, "ArchitectureTypes"]] = None,
**kwargs
):
super(VirtualMachineImage, self).__init__(id=id, name=name, location=location, tags=tags, extended_location=extended_location, **kwargs)
self.plan = plan
self.os_disk_image = os_disk_image
self.data_disk_images = data_disk_images
self.automatic_os_upgrade_properties = automatic_os_upgrade_properties
self.hyper_v_generation = hyper_v_generation
self.disallowed = disallowed
self.features = features
self.architecture = architecture
class VirtualMachineImageFeature(msrest.serialization.Model):
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
value: Optional[str] = None,
**kwargs
):
super(VirtualMachineImageFeature, self).__init__(**kwargs)
self.name = name
self.value = value
class VirtualMachineInstallPatchesParameters(msrest.serialization.Model):
_validation = {
'reboot_setting': {'required': True},
}
_attribute_map = {
'maximum_duration': {'key': 'maximumDuration', 'type': 'str'},
'reboot_setting': {'key': 'rebootSetting', 'type': 'str'},
'windows_parameters': {'key': 'windowsParameters', 'type': 'WindowsParameters'},
'linux_parameters': {'key': 'linuxParameters', 'type': 'LinuxParameters'},
}
def __init__(
self,
*,
reboot_setting: Union[str, "VMGuestPatchRebootSetting"],
maximum_duration: Optional[str] = None,
windows_parameters: Optional["WindowsParameters"] = None,
linux_parameters: Optional["LinuxParameters"] = None,
**kwargs
):
super(VirtualMachineInstallPatchesParameters, self).__init__(**kwargs)
self.maximum_duration = maximum_duration
self.reboot_setting = reboot_setting
self.windows_parameters = windows_parameters
self.linux_parameters = linux_parameters
class VirtualMachineInstallPatchesResult(msrest.serialization.Model):
_validation = {
'status': {'readonly': True},
'installation_activity_id': {'readonly': True},
'reboot_status': {'readonly': True},
'maintenance_window_exceeded': {'readonly': True},
'excluded_patch_count': {'readonly': True},
'not_selected_patch_count': {'readonly': True},
'pending_patch_count': {'readonly': True},
'installed_patch_count': {'readonly': True},
'failed_patch_count': {'readonly': True},
'patches': {'readonly': True},
'start_date_time': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'installation_activity_id': {'key': 'installationActivityId', 'type': 'str'},
'reboot_status': {'key': 'rebootStatus', 'type': 'str'},
'maintenance_window_exceeded': {'key': 'maintenanceWindowExceeded', 'type': 'bool'},
'excluded_patch_count': {'key': 'excludedPatchCount', 'type': 'int'},
'not_selected_patch_count': {'key': 'notSelectedPatchCount', 'type': 'int'},
'pending_patch_count': {'key': 'pendingPatchCount', 'type': 'int'},
'installed_patch_count': {'key': 'installedPatchCount', 'type': 'int'},
'failed_patch_count': {'key': 'failedPatchCount', 'type': 'int'},
'patches': {'key': 'patches', 'type': '[PatchInstallationDetail]'},
'start_date_time': {'key': 'startDateTime', 'type': 'iso-8601'},
'error': {'key': 'error', 'type': 'ApiError'},
}
def __init__(
self,
**kwargs
):
super(VirtualMachineInstallPatchesResult, self).__init__(**kwargs)
self.status = None
self.installation_activity_id = None
self.reboot_status = None
self.maintenance_window_exceeded = None
self.excluded_patch_count = None
self.not_selected_patch_count = None
self.pending_patch_count = None
self.installed_patch_count = None
self.failed_patch_count = None
self.patches = None
self.start_date_time = None
self.error = None
class VirtualMachineInstanceView(msrest.serialization.Model):
_validation = {
'vm_health': {'readonly': True},
'assigned_host': {'readonly': True},
}
_attribute_map = {
'platform_update_domain': {'key': 'platformUpdateDomain', 'type': 'int'},
'platform_fault_domain': {'key': 'platformFaultDomain', 'type': 'int'},
'computer_name': {'key': 'computerName', 'type': 'str'},
'os_name': {'key': 'osName', 'type': 'str'},
'os_version': {'key': 'osVersion', 'type': 'str'},
'hyper_v_generation': {'key': 'hyperVGeneration', 'type': 'str'},
'rdp_thumb_print': {'key': 'rdpThumbPrint', 'type': 'str'},
'vm_agent': {'key': 'vmAgent', 'type': 'VirtualMachineAgentInstanceView'},
'maintenance_redeploy_status': {'key': 'maintenanceRedeployStatus', 'type': 'MaintenanceRedeployStatus'},
'disks': {'key': 'disks', 'type': '[DiskInstanceView]'},
'extensions': {'key': 'extensions', 'type': '[VirtualMachineExtensionInstanceView]'},
'vm_health': {'key': 'vmHealth', 'type': 'VirtualMachineHealthStatus'},
'boot_diagnostics': {'key': 'bootDiagnostics', 'type': 'BootDiagnosticsInstanceView'},
'assigned_host': {'key': 'assignedHost', 'type': 'str'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
'patch_status': {'key': 'patchStatus', 'type': 'VirtualMachinePatchStatus'},
}
def __init__(
self,
*,
platform_update_domain: Optional[int] = None,
platform_fault_domain: Optional[int] = None,
computer_name: Optional[str] = None,
os_name: Optional[str] = None,
os_version: Optional[str] = None,
hyper_v_generation: Optional[Union[str, "HyperVGenerationType"]] = None,
rdp_thumb_print: Optional[str] = None,
vm_agent: Optional["VirtualMachineAgentInstanceView"] = None,
maintenance_redeploy_status: Optional["MaintenanceRedeployStatus"] = None,
disks: Optional[List["DiskInstanceView"]] = None,
extensions: Optional[List["VirtualMachineExtensionInstanceView"]] = None,
boot_diagnostics: Optional["BootDiagnosticsInstanceView"] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
patch_status: Optional["VirtualMachinePatchStatus"] = None,
**kwargs
):
super(VirtualMachineInstanceView, self).__init__(**kwargs)
self.platform_update_domain = platform_update_domain
self.platform_fault_domain = platform_fault_domain
self.computer_name = computer_name
self.os_name = os_name
self.os_version = os_version
self.hyper_v_generation = hyper_v_generation
self.rdp_thumb_print = rdp_thumb_print
self.vm_agent = vm_agent
self.maintenance_redeploy_status = maintenance_redeploy_status
self.disks = disks
self.extensions = extensions
self.vm_health = None
self.boot_diagnostics = boot_diagnostics
self.assigned_host = None
self.statuses = statuses
self.patch_status = patch_status
class VirtualMachineIpTag(msrest.serialization.Model):
_attribute_map = {
'ip_tag_type': {'key': 'ipTagType', 'type': 'str'},
'tag': {'key': 'tag', 'type': 'str'},
}
def __init__(
self,
*,
ip_tag_type: Optional[str] = None,
tag: Optional[str] = None,
**kwargs
):
super(VirtualMachineIpTag, self).__init__(**kwargs)
self.ip_tag_type = ip_tag_type
self.tag = tag
class VirtualMachineListResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachine]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachine"],
next_link: Optional[str] = None,
**kwargs
):
super(VirtualMachineListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineNetworkInterfaceConfiguration(msrest.serialization.Model):
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'delete_option': {'key': 'properties.deleteOption', 'type': 'str'},
'enable_accelerated_networking': {'key': 'properties.enableAcceleratedNetworking', 'type': 'bool'},
'enable_fpga': {'key': 'properties.enableFpga', 'type': 'bool'},
'enable_ip_forwarding': {'key': 'properties.enableIPForwarding', 'type': 'bool'},
'network_security_group': {'key': 'properties.networkSecurityGroup', 'type': 'SubResource'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'VirtualMachineNetworkInterfaceDnsSettingsConfiguration'},
'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[VirtualMachineNetworkInterfaceIPConfiguration]'},
'dscp_configuration': {'key': 'properties.dscpConfiguration', 'type': 'SubResource'},
}
def __init__(
self,
*,
name: str,
primary: Optional[bool] = None,
delete_option: Optional[Union[str, "DeleteOptions"]] = None,
enable_accelerated_networking: Optional[bool] = None,
enable_fpga: Optional[bool] = None,
enable_ip_forwarding: Optional[bool] = None,
network_security_group: Optional["SubResource"] = None,
dns_settings: Optional["VirtualMachineNetworkInterfaceDnsSettingsConfiguration"] = None,
ip_configurations: Optional[List["VirtualMachineNetworkInterfaceIPConfiguration"]] = None,
dscp_configuration: Optional["SubResource"] = None,
**kwargs
):
super(VirtualMachineNetworkInterfaceConfiguration, self).__init__(**kwargs)
self.name = name
self.primary = primary
self.delete_option = delete_option
self.enable_accelerated_networking = enable_accelerated_networking
self.enable_fpga = enable_fpga
self.enable_ip_forwarding = enable_ip_forwarding
self.network_security_group = network_security_group
self.dns_settings = dns_settings
self.ip_configurations = ip_configurations
self.dscp_configuration = dscp_configuration
class VirtualMachineNetworkInterfaceDnsSettingsConfiguration(msrest.serialization.Model):
_attribute_map = {
'dns_servers': {'key': 'dnsServers', 'type': '[str]'},
}
def __init__(
self,
*,
dns_servers: Optional[List[str]] = None,
**kwargs
):
super(VirtualMachineNetworkInterfaceDnsSettingsConfiguration, self).__init__(**kwargs)
self.dns_servers = dns_servers
class VirtualMachineNetworkInterfaceIPConfiguration(msrest.serialization.Model):
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'SubResource'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'public_ip_address_configuration': {'key': 'properties.publicIPAddressConfiguration', 'type': 'VirtualMachinePublicIPAddressConfiguration'},
'private_ip_address_version': {'key': 'properties.privateIPAddressVersion', 'type': 'str'},
'application_security_groups': {'key': 'properties.applicationSecurityGroups', 'type': '[SubResource]'},
'application_gateway_backend_address_pools': {'key': 'properties.applicationGatewayBackendAddressPools', 'type': '[SubResource]'},
'load_balancer_backend_address_pools': {'key': 'properties.loadBalancerBackendAddressPools', 'type': '[SubResource]'},
}
def __init__(
self,
*,
name: str,
subnet: Optional["SubResource"] = None,
primary: Optional[bool] = None,
public_ip_address_configuration: Optional["VirtualMachinePublicIPAddressConfiguration"] = None,
private_ip_address_version: Optional[Union[str, "IPVersions"]] = None,
application_security_groups: Optional[List["SubResource"]] = None,
application_gateway_backend_address_pools: Optional[List["SubResource"]] = None,
load_balancer_backend_address_pools: Optional[List["SubResource"]] = None,
**kwargs
):
super(VirtualMachineNetworkInterfaceIPConfiguration, self).__init__(**kwargs)
self.name = name
self.subnet = subnet
self.primary = primary
self.public_ip_address_configuration = public_ip_address_configuration
self.private_ip_address_version = private_ip_address_version
self.application_security_groups = application_security_groups
self.application_gateway_backend_address_pools = application_gateway_backend_address_pools
self.load_balancer_backend_address_pools = load_balancer_backend_address_pools
class VirtualMachinePatchStatus(msrest.serialization.Model):
_validation = {
'configuration_statuses': {'readonly': True},
}
_attribute_map = {
'available_patch_summary': {'key': 'availablePatchSummary', 'type': 'AvailablePatchSummary'},
'last_patch_installation_summary': {'key': 'lastPatchInstallationSummary', 'type': 'LastPatchInstallationSummary'},
'configuration_statuses': {'key': 'configurationStatuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
available_patch_summary: Optional["AvailablePatchSummary"] = None,
last_patch_installation_summary: Optional["LastPatchInstallationSummary"] = None,
**kwargs
):
super(VirtualMachinePatchStatus, self).__init__(**kwargs)
self.available_patch_summary = available_patch_summary
self.last_patch_installation_summary = last_patch_installation_summary
self.configuration_statuses = None
class VirtualMachinePublicIPAddressConfiguration(msrest.serialization.Model):
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'PublicIPAddressSku'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'delete_option': {'key': 'properties.deleteOption', 'type': 'str'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'VirtualMachinePublicIPAddressDnsSettingsConfiguration'},
'ip_tags': {'key': 'properties.ipTags', 'type': '[VirtualMachineIpTag]'},
'public_ip_prefix': {'key': 'properties.publicIPPrefix', 'type': 'SubResource'},
'public_ip_address_version': {'key': 'properties.publicIPAddressVersion', 'type': 'str'},
'public_ip_allocation_method': {'key': 'properties.publicIPAllocationMethod', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
sku: Optional["PublicIPAddressSku"] = None,
idle_timeout_in_minutes: Optional[int] = None,
delete_option: Optional[Union[str, "DeleteOptions"]] = None,
dns_settings: Optional["VirtualMachinePublicIPAddressDnsSettingsConfiguration"] = None,
ip_tags: Optional[List["VirtualMachineIpTag"]] = None,
public_ip_prefix: Optional["SubResource"] = None,
public_ip_address_version: Optional[Union[str, "IPVersions"]] = None,
public_ip_allocation_method: Optional[Union[str, "PublicIPAllocationMethod"]] = None,
**kwargs
):
super(VirtualMachinePublicIPAddressConfiguration, self).__init__(**kwargs)
self.name = name
self.sku = sku
self.idle_timeout_in_minutes = idle_timeout_in_minutes
self.delete_option = delete_option
self.dns_settings = dns_settings
self.ip_tags = ip_tags
self.public_ip_prefix = public_ip_prefix
self.public_ip_address_version = public_ip_address_version
self.public_ip_allocation_method = public_ip_allocation_method
class VirtualMachinePublicIPAddressDnsSettingsConfiguration(msrest.serialization.Model):
_validation = {
'domain_name_label': {'required': True},
}
_attribute_map = {
'domain_name_label': {'key': 'domainNameLabel', 'type': 'str'},
}
def __init__(
self,
*,
domain_name_label: str,
**kwargs
):
super(VirtualMachinePublicIPAddressDnsSettingsConfiguration, self).__init__(**kwargs)
self.domain_name_label = domain_name_label
class VirtualMachineReimageParameters(msrest.serialization.Model):
_attribute_map = {
'temp_disk': {'key': 'tempDisk', 'type': 'bool'},
}
def __init__(
self,
*,
temp_disk: Optional[bool] = None,
**kwargs
):
super(VirtualMachineReimageParameters, self).__init__(**kwargs)
self.temp_disk = temp_disk
class VirtualMachineRunCommand(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'source': {'key': 'properties.source', 'type': 'VirtualMachineRunCommandScriptSource'},
'parameters': {'key': 'properties.parameters', 'type': '[RunCommandInputParameter]'},
'protected_parameters': {'key': 'properties.protectedParameters', 'type': '[RunCommandInputParameter]'},
'async_execution': {'key': 'properties.asyncExecution', 'type': 'bool'},
'run_as_user': {'key': 'properties.runAsUser', 'type': 'str'},
'run_as_password': {'key': 'properties.runAsPassword', 'type': 'str'},
'timeout_in_seconds': {'key': 'properties.timeoutInSeconds', 'type': 'int'},
'output_blob_uri': {'key': 'properties.outputBlobUri', 'type': 'str'},
'error_blob_uri': {'key': 'properties.errorBlobUri', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineRunCommandInstanceView'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
source: Optional["VirtualMachineRunCommandScriptSource"] = None,
parameters: Optional[List["RunCommandInputParameter"]] = None,
protected_parameters: Optional[List["RunCommandInputParameter"]] = None,
async_execution: Optional[bool] = False,
run_as_user: Optional[str] = None,
run_as_password: Optional[str] = None,
timeout_in_seconds: Optional[int] = None,
output_blob_uri: Optional[str] = None,
error_blob_uri: Optional[str] = None,
**kwargs
):
super(VirtualMachineRunCommand, self).__init__(location=location, tags=tags, **kwargs)
self.source = source
self.parameters = parameters
self.protected_parameters = protected_parameters
self.async_execution = async_execution
self.run_as_user = run_as_user
self.run_as_password = run_as_password
self.timeout_in_seconds = timeout_in_seconds
self.output_blob_uri = output_blob_uri
self.error_blob_uri = error_blob_uri
self.provisioning_state = None
self.instance_view = None
class VirtualMachineRunCommandInstanceView(msrest.serialization.Model):
_attribute_map = {
'execution_state': {'key': 'executionState', 'type': 'str'},
'execution_message': {'key': 'executionMessage', 'type': 'str'},
'exit_code': {'key': 'exitCode', 'type': 'int'},
'output': {'key': 'output', 'type': 'str'},
'error': {'key': 'error', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(
self,
*,
execution_state: Optional[Union[str, "ExecutionState"]] = None,
execution_message: Optional[str] = None,
exit_code: Optional[int] = None,
output: Optional[str] = None,
error: Optional[str] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
super(VirtualMachineRunCommandInstanceView, self).__init__(**kwargs)
self.execution_state = execution_state
self.execution_message = execution_message
self.exit_code = exit_code
self.output = output
self.error = error
self.start_time = start_time
self.end_time = end_time
self.statuses = statuses
class VirtualMachineRunCommandScriptSource(msrest.serialization.Model):
_attribute_map = {
'script': {'key': 'script', 'type': 'str'},
'script_uri': {'key': 'scriptUri', 'type': 'str'},
'command_id': {'key': 'commandId', 'type': 'str'},
}
def __init__(
self,
*,
script: Optional[str] = None,
script_uri: Optional[str] = None,
command_id: Optional[str] = None,
**kwargs
):
super(VirtualMachineRunCommandScriptSource, self).__init__(**kwargs)
self.script = script
self.script_uri = script_uri
self.command_id = command_id
class VirtualMachineRunCommandsListResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineRunCommand]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachineRunCommand"],
next_link: Optional[str] = None,
**kwargs
):
super(VirtualMachineRunCommandsListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineRunCommandUpdate(UpdateResource):
_validation = {
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'source': {'key': 'properties.source', 'type': 'VirtualMachineRunCommandScriptSource'},
'parameters': {'key': 'properties.parameters', 'type': '[RunCommandInputParameter]'},
'protected_parameters': {'key': 'properties.protectedParameters', 'type': '[RunCommandInputParameter]'},
'async_execution': {'key': 'properties.asyncExecution', 'type': 'bool'},
'run_as_user': {'key': 'properties.runAsUser', 'type': 'str'},
'run_as_password': {'key': 'properties.runAsPassword', 'type': 'str'},
'timeout_in_seconds': {'key': 'properties.timeoutInSeconds', 'type': 'int'},
'output_blob_uri': {'key': 'properties.outputBlobUri', 'type': 'str'},
'error_blob_uri': {'key': 'properties.errorBlobUri', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineRunCommandInstanceView'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
source: Optional["VirtualMachineRunCommandScriptSource"] = None,
parameters: Optional[List["RunCommandInputParameter"]] = None,
protected_parameters: Optional[List["RunCommandInputParameter"]] = None,
async_execution: Optional[bool] = False,
run_as_user: Optional[str] = None,
run_as_password: Optional[str] = None,
timeout_in_seconds: Optional[int] = None,
output_blob_uri: Optional[str] = None,
error_blob_uri: Optional[str] = None,
**kwargs
):
super(VirtualMachineRunCommandUpdate, self).__init__(tags=tags, **kwargs)
self.source = source
self.parameters = parameters
self.protected_parameters = protected_parameters
self.async_execution = async_execution
self.run_as_user = run_as_user
self.run_as_password = run_as_password
self.timeout_in_seconds = timeout_in_seconds
self.output_blob_uri = output_blob_uri
self.error_blob_uri = error_blob_uri
self.provisioning_state = None
self.instance_view = None
class VirtualMachineScaleSet(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'unique_id': {'readonly': True},
'time_created': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'plan': {'key': 'plan', 'type': 'Plan'},
'identity': {'key': 'identity', 'type': 'VirtualMachineScaleSetIdentity'},
'zones': {'key': 'zones', 'type': '[str]'},
'extended_location': {'key': 'extendedLocation', 'type': 'ExtendedLocation'},
'upgrade_policy': {'key': 'properties.upgradePolicy', 'type': 'UpgradePolicy'},
'automatic_repairs_policy': {'key': 'properties.automaticRepairsPolicy', 'type': 'AutomaticRepairsPolicy'},
'virtual_machine_profile': {'key': 'properties.virtualMachineProfile', 'type': 'VirtualMachineScaleSetVMProfile'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'overprovision': {'key': 'properties.overprovision', 'type': 'bool'},
'do_not_run_extensions_on_overprovisioned_v_ms': {'key': 'properties.doNotRunExtensionsOnOverprovisionedVMs', 'type': 'bool'},
'unique_id': {'key': 'properties.uniqueId', 'type': 'str'},
'single_placement_group': {'key': 'properties.singlePlacementGroup', 'type': 'bool'},
'zone_balance': {'key': 'properties.zoneBalance', 'type': 'bool'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'SubResource'},
'host_group': {'key': 'properties.hostGroup', 'type': 'SubResource'},
'additional_capabilities': {'key': 'properties.additionalCapabilities', 'type': 'AdditionalCapabilities'},
'scale_in_policy': {'key': 'properties.scaleInPolicy', 'type': 'ScaleInPolicy'},
'orchestration_mode': {'key': 'properties.orchestrationMode', 'type': 'str'},
'spot_restore_policy': {'key': 'properties.spotRestorePolicy', 'type': 'SpotRestorePolicy'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
plan: Optional["Plan"] = None,
identity: Optional["VirtualMachineScaleSetIdentity"] = None,
zones: Optional[List[str]] = None,
extended_location: Optional["ExtendedLocation"] = None,
upgrade_policy: Optional["UpgradePolicy"] = None,
automatic_repairs_policy: Optional["AutomaticRepairsPolicy"] = None,
virtual_machine_profile: Optional["VirtualMachineScaleSetVMProfile"] = None,
overprovision: Optional[bool] = None,
do_not_run_extensions_on_overprovisioned_v_ms: Optional[bool] = None,
single_placement_group: Optional[bool] = None,
zone_balance: Optional[bool] = None,
platform_fault_domain_count: Optional[int] = None,
proximity_placement_group: Optional["SubResource"] = None,
host_group: Optional["SubResource"] = None,
additional_capabilities: Optional["AdditionalCapabilities"] = None,
scale_in_policy: Optional["ScaleInPolicy"] = None,
orchestration_mode: Optional[Union[str, "OrchestrationMode"]] = None,
spot_restore_policy: Optional["SpotRestorePolicy"] = None,
**kwargs
):
super(VirtualMachineScaleSet, self).__init__(location=location, tags=tags, **kwargs)
self.sku = sku
self.plan = plan
self.identity = identity
self.zones = zones
self.extended_location = extended_location
self.upgrade_policy = upgrade_policy
self.automatic_repairs_policy = automatic_repairs_policy
self.virtual_machine_profile = virtual_machine_profile
self.provisioning_state = None
self.overprovision = overprovision
self.do_not_run_extensions_on_overprovisioned_v_ms = do_not_run_extensions_on_overprovisioned_v_ms
self.unique_id = None
self.single_placement_group = single_placement_group
self.zone_balance = zone_balance
self.platform_fault_domain_count = platform_fault_domain_count
self.proximity_placement_group = proximity_placement_group
self.host_group = host_group
self.additional_capabilities = additional_capabilities
self.scale_in_policy = scale_in_policy
self.orchestration_mode = orchestration_mode
self.spot_restore_policy = spot_restore_policy
self.time_created = None
class VirtualMachineScaleSetDataDisk(msrest.serialization.Model):
_validation = {
'lun': {'required': True},
'create_option': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'lun': {'key': 'lun', 'type': 'int'},
'caching': {'key': 'caching', 'type': 'str'},
'write_accelerator_enabled': {'key': 'writeAcceleratorEnabled', 'type': 'bool'},
'create_option': {'key': 'createOption', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'managed_disk': {'key': 'managedDisk', 'type': 'VirtualMachineScaleSetManagedDiskParameters'},
'disk_iops_read_write': {'key': 'diskIOPSReadWrite', 'type': 'long'},
'disk_m_bps_read_write': {'key': 'diskMBpsReadWrite', 'type': 'long'},
'delete_option': {'key': 'deleteOption', 'type': 'str'},
}
def __init__(
self,
*,
lun: int,
create_option: Union[str, "DiskCreateOptionTypes"],
name: Optional[str] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
write_accelerator_enabled: Optional[bool] = None,
disk_size_gb: Optional[int] = None,
managed_disk: Optional["VirtualMachineScaleSetManagedDiskParameters"] = None,
disk_iops_read_write: Optional[int] = None,
disk_m_bps_read_write: Optional[int] = None,
delete_option: Optional[Union[str, "DiskDeleteOptionTypes"]] = None,
**kwargs
):
super(VirtualMachineScaleSetDataDisk, self).__init__(**kwargs)
self.name = name
self.lun = lun
self.caching = caching
self.write_accelerator_enabled = write_accelerator_enabled
self.create_option = create_option
self.disk_size_gb = disk_size_gb
self.managed_disk = managed_disk
self.disk_iops_read_write = disk_iops_read_write
self.disk_m_bps_read_write = disk_m_bps_read_write
self.delete_option = delete_option
class VirtualMachineScaleSetExtension(SubResourceReadOnly):
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
'publisher': {'key': 'properties.publisher', 'type': 'str'},
'type_properties_type': {'key': 'properties.type', 'type': 'str'},
'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'},
'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'},
'enable_automatic_upgrade': {'key': 'properties.enableAutomaticUpgrade', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': 'object'},
'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'provision_after_extensions': {'key': 'properties.provisionAfterExtensions', 'type': '[str]'},
'suppress_failures': {'key': 'properties.suppressFailures', 'type': 'bool'},
'protected_settings_from_key_vault': {'key': 'properties.protectedSettingsFromKeyVault', 'type': 'object'},
}
def __init__(
self,
*,
name: Optional[str] = None,
force_update_tag: Optional[str] = None,
publisher: Optional[str] = None,
type_properties_type: Optional[str] = None,
type_handler_version: Optional[str] = None,
auto_upgrade_minor_version: Optional[bool] = None,
enable_automatic_upgrade: Optional[bool] = None,
settings: Optional[Any] = None,
protected_settings: Optional[Any] = None,
provision_after_extensions: Optional[List[str]] = None,
suppress_failures: Optional[bool] = None,
protected_settings_from_key_vault: Optional[Any] = None,
**kwargs
):
super(VirtualMachineScaleSetExtension, self).__init__(**kwargs)
self.name = name
self.type = None
self.force_update_tag = force_update_tag
self.publisher = publisher
self.type_properties_type = type_properties_type
self.type_handler_version = type_handler_version
self.auto_upgrade_minor_version = auto_upgrade_minor_version
self.enable_automatic_upgrade = enable_automatic_upgrade
self.settings = settings
self.protected_settings = protected_settings
self.provisioning_state = None
self.provision_after_extensions = provision_after_extensions
self.suppress_failures = suppress_failures
self.protected_settings_from_key_vault = protected_settings_from_key_vault
class VirtualMachineScaleSetExtensionListResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineScaleSetExtension]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachineScaleSetExtension"],
next_link: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetExtensionListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineScaleSetExtensionProfile(msrest.serialization.Model):
_attribute_map = {
'extensions': {'key': 'extensions', 'type': '[VirtualMachineScaleSetExtension]'},
'extensions_time_budget': {'key': 'extensionsTimeBudget', 'type': 'str'},
}
def __init__(
self,
*,
extensions: Optional[List["VirtualMachineScaleSetExtension"]] = None,
extensions_time_budget: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetExtensionProfile, self).__init__(**kwargs)
self.extensions = extensions
self.extensions_time_budget = extensions_time_budget
class VirtualMachineScaleSetExtensionUpdate(SubResourceReadOnly):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
'publisher': {'key': 'properties.publisher', 'type': 'str'},
'type_properties_type': {'key': 'properties.type', 'type': 'str'},
'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'},
'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'},
'enable_automatic_upgrade': {'key': 'properties.enableAutomaticUpgrade', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': 'object'},
'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'provision_after_extensions': {'key': 'properties.provisionAfterExtensions', 'type': '[str]'},
'suppress_failures': {'key': 'properties.suppressFailures', 'type': 'bool'},
'protected_settings_from_key_vault': {'key': 'properties.protectedSettingsFromKeyVault', 'type': 'object'},
}
def __init__(
self,
*,
force_update_tag: Optional[str] = None,
publisher: Optional[str] = None,
type_properties_type: Optional[str] = None,
type_handler_version: Optional[str] = None,
auto_upgrade_minor_version: Optional[bool] = None,
enable_automatic_upgrade: Optional[bool] = None,
settings: Optional[Any] = None,
protected_settings: Optional[Any] = None,
provision_after_extensions: Optional[List[str]] = None,
suppress_failures: Optional[bool] = None,
protected_settings_from_key_vault: Optional[Any] = None,
**kwargs
):
super(VirtualMachineScaleSetExtensionUpdate, self).__init__(**kwargs)
self.name = None
self.type = None
self.force_update_tag = force_update_tag
self.publisher = publisher
self.type_properties_type = type_properties_type
self.type_handler_version = type_handler_version
self.auto_upgrade_minor_version = auto_upgrade_minor_version
self.enable_automatic_upgrade = enable_automatic_upgrade
self.settings = settings
self.protected_settings = protected_settings
self.provisioning_state = None
self.provision_after_extensions = provision_after_extensions
self.suppress_failures = suppress_failures
self.protected_settings_from_key_vault = protected_settings_from_key_vault
class VirtualMachineScaleSetHardwareProfile(msrest.serialization.Model):
_attribute_map = {
'vm_size_properties': {'key': 'vmSizeProperties', 'type': 'VMSizeProperties'},
}
def __init__(
self,
*,
vm_size_properties: Optional["VMSizeProperties"] = None,
**kwargs
):
super(VirtualMachineScaleSetHardwareProfile, self).__init__(**kwargs)
self.vm_size_properties = vm_size_properties
class VirtualMachineScaleSetIdentity(msrest.serialization.Model):
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedIdentitiesValue}'},
}
def __init__(
self,
*,
type: Optional[Union[str, "ResourceIdentityType"]] = None,
user_assigned_identities: Optional[Dict[str, "UserAssignedIdentitiesValue"]] = None,
**kwargs
):
super(VirtualMachineScaleSetIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
self.user_assigned_identities = user_assigned_identities
class VirtualMachineScaleSetInstanceView(msrest.serialization.Model):
_validation = {
'virtual_machine': {'readonly': True},
'extensions': {'readonly': True},
'orchestration_services': {'readonly': True},
}
_attribute_map = {
'virtual_machine': {'key': 'virtualMachine', 'type': 'VirtualMachineScaleSetInstanceViewStatusesSummary'},
'extensions': {'key': 'extensions', 'type': '[VirtualMachineScaleSetVMExtensionsSummary]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
'orchestration_services': {'key': 'orchestrationServices', 'type': '[OrchestrationServiceSummary]'},
}
def __init__(
self,
*,
statuses: Optional[List["InstanceViewStatus"]] = None,
**kwargs
):
super(VirtualMachineScaleSetInstanceView, self).__init__(**kwargs)
self.virtual_machine = None
self.extensions = None
self.statuses = statuses
self.orchestration_services = None
class VirtualMachineScaleSetInstanceViewStatusesSummary(msrest.serialization.Model):
_validation = {
'statuses_summary': {'readonly': True},
}
_attribute_map = {
'statuses_summary': {'key': 'statusesSummary', 'type': '[VirtualMachineStatusCodeCount]'},
}
def __init__(
self,
**kwargs
):
super(VirtualMachineScaleSetInstanceViewStatusesSummary, self).__init__(**kwargs)
self.statuses_summary = None
class VirtualMachineScaleSetIPConfiguration(SubResource):
_validation = {
'name': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'ApiEntityReference'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'public_ip_address_configuration': {'key': 'properties.publicIPAddressConfiguration', 'type': 'VirtualMachineScaleSetPublicIPAddressConfiguration'},
'private_ip_address_version': {'key': 'properties.privateIPAddressVersion', 'type': 'str'},
'application_gateway_backend_address_pools': {'key': 'properties.applicationGatewayBackendAddressPools', 'type': '[SubResource]'},
'application_security_groups': {'key': 'properties.applicationSecurityGroups', 'type': '[SubResource]'},
'load_balancer_backend_address_pools': {'key': 'properties.loadBalancerBackendAddressPools', 'type': '[SubResource]'},
'load_balancer_inbound_nat_pools': {'key': 'properties.loadBalancerInboundNatPools', 'type': '[SubResource]'},
}
def __init__(
self,
*,
name: str,
id: Optional[str] = None,
subnet: Optional["ApiEntityReference"] = None,
primary: Optional[bool] = None,
public_ip_address_configuration: Optional["VirtualMachineScaleSetPublicIPAddressConfiguration"] = None,
private_ip_address_version: Optional[Union[str, "IPVersion"]] = None,
application_gateway_backend_address_pools: Optional[List["SubResource"]] = None,
application_security_groups: Optional[List["SubResource"]] = None,
load_balancer_backend_address_pools: Optional[List["SubResource"]] = None,
load_balancer_inbound_nat_pools: Optional[List["SubResource"]] = None,
**kwargs
):
super(VirtualMachineScaleSetIPConfiguration, self).__init__(id=id, **kwargs)
self.name = name
self.subnet = subnet
self.primary = primary
self.public_ip_address_configuration = public_ip_address_configuration
self.private_ip_address_version = private_ip_address_version
self.application_gateway_backend_address_pools = application_gateway_backend_address_pools
self.application_security_groups = application_security_groups
self.load_balancer_backend_address_pools = load_balancer_backend_address_pools
self.load_balancer_inbound_nat_pools = load_balancer_inbound_nat_pools
class VirtualMachineScaleSetIpTag(msrest.serialization.Model):
_attribute_map = {
'ip_tag_type': {'key': 'ipTagType', 'type': 'str'},
'tag': {'key': 'tag', 'type': 'str'},
}
def __init__(
self,
*,
ip_tag_type: Optional[str] = None,
tag: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetIpTag, self).__init__(**kwargs)
self.ip_tag_type = ip_tag_type
self.tag = tag
class VirtualMachineScaleSetListOSUpgradeHistory(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[UpgradeOperationHistoricalStatusInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["UpgradeOperationHistoricalStatusInfo"],
next_link: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetListOSUpgradeHistory, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineScaleSetListResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineScaleSet]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachineScaleSet"],
next_link: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineScaleSetListSkusResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineScaleSetSku]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachineScaleSetSku"],
next_link: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetListSkusResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineScaleSetListWithLinkResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineScaleSet]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachineScaleSet"],
next_link: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetListWithLinkResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineScaleSetManagedDiskParameters(msrest.serialization.Model):
_attribute_map = {
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
'disk_encryption_set': {'key': 'diskEncryptionSet', 'type': 'DiskEncryptionSetParameters'},
'security_profile': {'key': 'securityProfile', 'type': 'VMDiskSecurityProfile'},
}
def __init__(
self,
*,
storage_account_type: Optional[Union[str, "StorageAccountTypes"]] = None,
disk_encryption_set: Optional["DiskEncryptionSetParameters"] = None,
security_profile: Optional["VMDiskSecurityProfile"] = None,
**kwargs
):
super(VirtualMachineScaleSetManagedDiskParameters, self).__init__(**kwargs)
self.storage_account_type = storage_account_type
self.disk_encryption_set = disk_encryption_set
self.security_profile = security_profile
class VirtualMachineScaleSetNetworkConfiguration(SubResource):
_validation = {
'name': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'enable_accelerated_networking': {'key': 'properties.enableAcceleratedNetworking', 'type': 'bool'},
'enable_fpga': {'key': 'properties.enableFpga', 'type': 'bool'},
'network_security_group': {'key': 'properties.networkSecurityGroup', 'type': 'SubResource'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'VirtualMachineScaleSetNetworkConfigurationDnsSettings'},
'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[VirtualMachineScaleSetIPConfiguration]'},
'enable_ip_forwarding': {'key': 'properties.enableIPForwarding', 'type': 'bool'},
'delete_option': {'key': 'properties.deleteOption', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
id: Optional[str] = None,
primary: Optional[bool] = None,
enable_accelerated_networking: Optional[bool] = None,
enable_fpga: Optional[bool] = None,
network_security_group: Optional["SubResource"] = None,
dns_settings: Optional["VirtualMachineScaleSetNetworkConfigurationDnsSettings"] = None,
ip_configurations: Optional[List["VirtualMachineScaleSetIPConfiguration"]] = None,
enable_ip_forwarding: Optional[bool] = None,
delete_option: Optional[Union[str, "DeleteOptions"]] = None,
**kwargs
):
super(VirtualMachineScaleSetNetworkConfiguration, self).__init__(id=id, **kwargs)
self.name = name
self.primary = primary
self.enable_accelerated_networking = enable_accelerated_networking
self.enable_fpga = enable_fpga
self.network_security_group = network_security_group
self.dns_settings = dns_settings
self.ip_configurations = ip_configurations
self.enable_ip_forwarding = enable_ip_forwarding
self.delete_option = delete_option
class VirtualMachineScaleSetNetworkConfigurationDnsSettings(msrest.serialization.Model):
_attribute_map = {
'dns_servers': {'key': 'dnsServers', 'type': '[str]'},
}
def __init__(
self,
*,
dns_servers: Optional[List[str]] = None,
**kwargs
):
super(VirtualMachineScaleSetNetworkConfigurationDnsSettings, self).__init__(**kwargs)
self.dns_servers = dns_servers
class VirtualMachineScaleSetNetworkProfile(msrest.serialization.Model):
_attribute_map = {
'health_probe': {'key': 'healthProbe', 'type': 'ApiEntityReference'},
'network_interface_configurations': {'key': 'networkInterfaceConfigurations', 'type': '[VirtualMachineScaleSetNetworkConfiguration]'},
'network_api_version': {'key': 'networkApiVersion', 'type': 'str'},
}
def __init__(
self,
*,
health_probe: Optional["ApiEntityReference"] = None,
network_interface_configurations: Optional[List["VirtualMachineScaleSetNetworkConfiguration"]] = None,
network_api_version: Optional[Union[str, "NetworkApiVersion"]] = None,
**kwargs
):
super(VirtualMachineScaleSetNetworkProfile, self).__init__(**kwargs)
self.health_probe = health_probe
self.network_interface_configurations = network_interface_configurations
self.network_api_version = network_api_version
class VirtualMachineScaleSetOSDisk(msrest.serialization.Model):
_validation = {
'create_option': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'str'},
'write_accelerator_enabled': {'key': 'writeAcceleratorEnabled', 'type': 'bool'},
'create_option': {'key': 'createOption', 'type': 'str'},
'diff_disk_settings': {'key': 'diffDiskSettings', 'type': 'DiffDiskSettings'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'os_type': {'key': 'osType', 'type': 'str'},
'image': {'key': 'image', 'type': 'VirtualHardDisk'},
'vhd_containers': {'key': 'vhdContainers', 'type': '[str]'},
'managed_disk': {'key': 'managedDisk', 'type': 'VirtualMachineScaleSetManagedDiskParameters'},
'delete_option': {'key': 'deleteOption', 'type': 'str'},
}
def __init__(
self,
*,
create_option: Union[str, "DiskCreateOptionTypes"],
name: Optional[str] = None,
caching: Optional[Union[str, "CachingTypes"]] = None,
write_accelerator_enabled: Optional[bool] = None,
diff_disk_settings: Optional["DiffDiskSettings"] = None,
disk_size_gb: Optional[int] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
image: Optional["VirtualHardDisk"] = None,
vhd_containers: Optional[List[str]] = None,
managed_disk: Optional["VirtualMachineScaleSetManagedDiskParameters"] = None,
delete_option: Optional[Union[str, "DiskDeleteOptionTypes"]] = None,
**kwargs
):
super(VirtualMachineScaleSetOSDisk, self).__init__(**kwargs)
self.name = name
self.caching = caching
self.write_accelerator_enabled = write_accelerator_enabled
self.create_option = create_option
self.diff_disk_settings = diff_disk_settings
self.disk_size_gb = disk_size_gb
self.os_type = os_type
self.image = image
self.vhd_containers = vhd_containers
self.managed_disk = managed_disk
self.delete_option = delete_option
class VirtualMachineScaleSetOSProfile(msrest.serialization.Model):
_attribute_map = {
'computer_name_prefix': {'key': 'computerNamePrefix', 'type': 'str'},
'admin_username': {'key': 'adminUsername', 'type': 'str'},
'admin_password': {'key': 'adminPassword', 'type': 'str'},
'custom_data': {'key': 'customData', 'type': 'str'},
'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'},
'linux_configuration': {'key': 'linuxConfiguration', 'type': 'LinuxConfiguration'},
'secrets': {'key': 'secrets', 'type': '[VaultSecretGroup]'},
'allow_extension_operations': {'key': 'allowExtensionOperations', 'type': 'bool'},
}
def __init__(
self,
*,
computer_name_prefix: Optional[str] = None,
admin_username: Optional[str] = None,
admin_password: Optional[str] = None,
custom_data: Optional[str] = None,
windows_configuration: Optional["WindowsConfiguration"] = None,
linux_configuration: Optional["LinuxConfiguration"] = None,
secrets: Optional[List["VaultSecretGroup"]] = None,
allow_extension_operations: Optional[bool] = None,
**kwargs
):
super(VirtualMachineScaleSetOSProfile, self).__init__(**kwargs)
self.computer_name_prefix = computer_name_prefix
self.admin_username = admin_username
self.admin_password = admin_password
self.custom_data = custom_data
self.windows_configuration = windows_configuration
self.linux_configuration = linux_configuration
self.secrets = secrets
self.allow_extension_operations = allow_extension_operations
class VirtualMachineScaleSetPublicIPAddressConfiguration(msrest.serialization.Model):
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'PublicIPAddressSku'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings'},
'ip_tags': {'key': 'properties.ipTags', 'type': '[VirtualMachineScaleSetIpTag]'},
'public_ip_prefix': {'key': 'properties.publicIPPrefix', 'type': 'SubResource'},
'public_ip_address_version': {'key': 'properties.publicIPAddressVersion', 'type': 'str'},
'delete_option': {'key': 'properties.deleteOption', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
sku: Optional["PublicIPAddressSku"] = None,
idle_timeout_in_minutes: Optional[int] = None,
dns_settings: Optional["VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings"] = None,
ip_tags: Optional[List["VirtualMachineScaleSetIpTag"]] = None,
public_ip_prefix: Optional["SubResource"] = None,
public_ip_address_version: Optional[Union[str, "IPVersion"]] = None,
delete_option: Optional[Union[str, "DeleteOptions"]] = None,
**kwargs
):
super(VirtualMachineScaleSetPublicIPAddressConfiguration, self).__init__(**kwargs)
self.name = name
self.sku = sku
self.idle_timeout_in_minutes = idle_timeout_in_minutes
self.dns_settings = dns_settings
self.ip_tags = ip_tags
self.public_ip_prefix = public_ip_prefix
self.public_ip_address_version = public_ip_address_version
self.delete_option = delete_option
class VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings(msrest.serialization.Model):
_validation = {
'domain_name_label': {'required': True},
}
_attribute_map = {
'domain_name_label': {'key': 'domainNameLabel', 'type': 'str'},
}
def __init__(
self,
*,
domain_name_label: str,
**kwargs
):
super(VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings, self).__init__(**kwargs)
self.domain_name_label = domain_name_label
class VirtualMachineScaleSetVMReimageParameters(VirtualMachineReimageParameters):
_attribute_map = {
'temp_disk': {'key': 'tempDisk', 'type': 'bool'},
}
def __init__(
self,
*,
temp_disk: Optional[bool] = None,
**kwargs
):
super(VirtualMachineScaleSetVMReimageParameters, self).__init__(temp_disk=temp_disk, **kwargs)
class VirtualMachineScaleSetReimageParameters(VirtualMachineScaleSetVMReimageParameters):
_attribute_map = {
'temp_disk': {'key': 'tempDisk', 'type': 'bool'},
'instance_ids': {'key': 'instanceIds', 'type': '[str]'},
}
def __init__(
self,
*,
temp_disk: Optional[bool] = None,
instance_ids: Optional[List[str]] = None,
**kwargs
):
super(VirtualMachineScaleSetReimageParameters, self).__init__(temp_disk=temp_disk, **kwargs)
self.instance_ids = instance_ids
class VirtualMachineScaleSetSku(msrest.serialization.Model):
_validation = {
'resource_type': {'readonly': True},
'sku': {'readonly': True},
'capacity': {'readonly': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'capacity': {'key': 'capacity', 'type': 'VirtualMachineScaleSetSkuCapacity'},
}
def __init__(
self,
**kwargs
):
super(VirtualMachineScaleSetSku, self).__init__(**kwargs)
self.resource_type = None
self.sku = None
self.capacity = None
class VirtualMachineScaleSetSkuCapacity(msrest.serialization.Model):
_validation = {
'minimum': {'readonly': True},
'maximum': {'readonly': True},
'default_capacity': {'readonly': True},
'scale_type': {'readonly': True},
}
_attribute_map = {
'minimum': {'key': 'minimum', 'type': 'long'},
'maximum': {'key': 'maximum', 'type': 'long'},
'default_capacity': {'key': 'defaultCapacity', 'type': 'long'},
'scale_type': {'key': 'scaleType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualMachineScaleSetSkuCapacity, self).__init__(**kwargs)
self.minimum = None
self.maximum = None
self.default_capacity = None
self.scale_type = None
class VirtualMachineScaleSetStorageProfile(msrest.serialization.Model):
_attribute_map = {
'image_reference': {'key': 'imageReference', 'type': 'ImageReference'},
'os_disk': {'key': 'osDisk', 'type': 'VirtualMachineScaleSetOSDisk'},
'data_disks': {'key': 'dataDisks', 'type': '[VirtualMachineScaleSetDataDisk]'},
}
def __init__(
self,
*,
image_reference: Optional["ImageReference"] = None,
os_disk: Optional["VirtualMachineScaleSetOSDisk"] = None,
data_disks: Optional[List["VirtualMachineScaleSetDataDisk"]] = None,
**kwargs
):
super(VirtualMachineScaleSetStorageProfile, self).__init__(**kwargs)
self.image_reference = image_reference
self.os_disk = os_disk
self.data_disks = data_disks
class VirtualMachineScaleSetUpdate(UpdateResource):
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'plan': {'key': 'plan', 'type': 'Plan'},
'identity': {'key': 'identity', 'type': 'VirtualMachineScaleSetIdentity'},
'upgrade_policy': {'key': 'properties.upgradePolicy', 'type': 'UpgradePolicy'},
'automatic_repairs_policy': {'key': 'properties.automaticRepairsPolicy', 'type': 'AutomaticRepairsPolicy'},
'virtual_machine_profile': {'key': 'properties.virtualMachineProfile', 'type': 'VirtualMachineScaleSetUpdateVMProfile'},
'overprovision': {'key': 'properties.overprovision', 'type': 'bool'},
'do_not_run_extensions_on_overprovisioned_v_ms': {'key': 'properties.doNotRunExtensionsOnOverprovisionedVMs', 'type': 'bool'},
'single_placement_group': {'key': 'properties.singlePlacementGroup', 'type': 'bool'},
'additional_capabilities': {'key': 'properties.additionalCapabilities', 'type': 'AdditionalCapabilities'},
'scale_in_policy': {'key': 'properties.scaleInPolicy', 'type': 'ScaleInPolicy'},
'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'SubResource'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
plan: Optional["Plan"] = None,
identity: Optional["VirtualMachineScaleSetIdentity"] = None,
upgrade_policy: Optional["UpgradePolicy"] = None,
automatic_repairs_policy: Optional["AutomaticRepairsPolicy"] = None,
virtual_machine_profile: Optional["VirtualMachineScaleSetUpdateVMProfile"] = None,
overprovision: Optional[bool] = None,
do_not_run_extensions_on_overprovisioned_v_ms: Optional[bool] = None,
single_placement_group: Optional[bool] = None,
additional_capabilities: Optional["AdditionalCapabilities"] = None,
scale_in_policy: Optional["ScaleInPolicy"] = None,
proximity_placement_group: Optional["SubResource"] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdate, self).__init__(tags=tags, **kwargs)
self.sku = sku
self.plan = plan
self.identity = identity
self.upgrade_policy = upgrade_policy
self.automatic_repairs_policy = automatic_repairs_policy
self.virtual_machine_profile = virtual_machine_profile
self.overprovision = overprovision
self.do_not_run_extensions_on_overprovisioned_v_ms = do_not_run_extensions_on_overprovisioned_v_ms
self.single_placement_group = single_placement_group
self.additional_capabilities = additional_capabilities
self.scale_in_policy = scale_in_policy
self.proximity_placement_group = proximity_placement_group
class VirtualMachineScaleSetUpdateIPConfiguration(SubResource):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'ApiEntityReference'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'public_ip_address_configuration': {'key': 'properties.publicIPAddressConfiguration', 'type': 'VirtualMachineScaleSetUpdatePublicIPAddressConfiguration'},
'private_ip_address_version': {'key': 'properties.privateIPAddressVersion', 'type': 'str'},
'application_gateway_backend_address_pools': {'key': 'properties.applicationGatewayBackendAddressPools', 'type': '[SubResource]'},
'application_security_groups': {'key': 'properties.applicationSecurityGroups', 'type': '[SubResource]'},
'load_balancer_backend_address_pools': {'key': 'properties.loadBalancerBackendAddressPools', 'type': '[SubResource]'},
'load_balancer_inbound_nat_pools': {'key': 'properties.loadBalancerInboundNatPools', 'type': '[SubResource]'},
}
def __init__(
self,
*,
id: Optional[str] = None,
name: Optional[str] = None,
subnet: Optional["ApiEntityReference"] = None,
primary: Optional[bool] = None,
public_ip_address_configuration: Optional["VirtualMachineScaleSetUpdatePublicIPAddressConfiguration"] = None,
private_ip_address_version: Optional[Union[str, "IPVersion"]] = None,
application_gateway_backend_address_pools: Optional[List["SubResource"]] = None,
application_security_groups: Optional[List["SubResource"]] = None,
load_balancer_backend_address_pools: Optional[List["SubResource"]] = None,
load_balancer_inbound_nat_pools: Optional[List["SubResource"]] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdateIPConfiguration, self).__init__(id=id, **kwargs)
self.name = name
self.subnet = subnet
self.primary = primary
self.public_ip_address_configuration = public_ip_address_configuration
self.private_ip_address_version = private_ip_address_version
self.application_gateway_backend_address_pools = application_gateway_backend_address_pools
self.application_security_groups = application_security_groups
self.load_balancer_backend_address_pools = load_balancer_backend_address_pools
self.load_balancer_inbound_nat_pools = load_balancer_inbound_nat_pools
class VirtualMachineScaleSetUpdateNetworkConfiguration(SubResource):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'enable_accelerated_networking': {'key': 'properties.enableAcceleratedNetworking', 'type': 'bool'},
'enable_fpga': {'key': 'properties.enableFpga', 'type': 'bool'},
'network_security_group': {'key': 'properties.networkSecurityGroup', 'type': 'SubResource'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'VirtualMachineScaleSetNetworkConfigurationDnsSettings'},
'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[VirtualMachineScaleSetUpdateIPConfiguration]'},
'enable_ip_forwarding': {'key': 'properties.enableIPForwarding', 'type': 'bool'},
'delete_option': {'key': 'properties.deleteOption', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
name: Optional[str] = None,
primary: Optional[bool] = None,
enable_accelerated_networking: Optional[bool] = None,
enable_fpga: Optional[bool] = None,
network_security_group: Optional["SubResource"] = None,
dns_settings: Optional["VirtualMachineScaleSetNetworkConfigurationDnsSettings"] = None,
ip_configurations: Optional[List["VirtualMachineScaleSetUpdateIPConfiguration"]] = None,
enable_ip_forwarding: Optional[bool] = None,
delete_option: Optional[Union[str, "DeleteOptions"]] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdateNetworkConfiguration, self).__init__(id=id, **kwargs)
self.name = name
self.primary = primary
self.enable_accelerated_networking = enable_accelerated_networking
self.enable_fpga = enable_fpga
self.network_security_group = network_security_group
self.dns_settings = dns_settings
self.ip_configurations = ip_configurations
self.enable_ip_forwarding = enable_ip_forwarding
self.delete_option = delete_option
class VirtualMachineScaleSetUpdateNetworkProfile(msrest.serialization.Model):
_attribute_map = {
'health_probe': {'key': 'healthProbe', 'type': 'ApiEntityReference'},
'network_interface_configurations': {'key': 'networkInterfaceConfigurations', 'type': '[VirtualMachineScaleSetUpdateNetworkConfiguration]'},
'network_api_version': {'key': 'networkApiVersion', 'type': 'str'},
}
def __init__(
self,
*,
health_probe: Optional["ApiEntityReference"] = None,
network_interface_configurations: Optional[List["VirtualMachineScaleSetUpdateNetworkConfiguration"]] = None,
network_api_version: Optional[Union[str, "NetworkApiVersion"]] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdateNetworkProfile, self).__init__(**kwargs)
self.health_probe = health_probe
self.network_interface_configurations = network_interface_configurations
self.network_api_version = network_api_version
class VirtualMachineScaleSetUpdateOSDisk(msrest.serialization.Model):
_attribute_map = {
'caching': {'key': 'caching', 'type': 'str'},
'write_accelerator_enabled': {'key': 'writeAcceleratorEnabled', 'type': 'bool'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'image': {'key': 'image', 'type': 'VirtualHardDisk'},
'vhd_containers': {'key': 'vhdContainers', 'type': '[str]'},
'managed_disk': {'key': 'managedDisk', 'type': 'VirtualMachineScaleSetManagedDiskParameters'},
'delete_option': {'key': 'deleteOption', 'type': 'str'},
}
def __init__(
self,
*,
caching: Optional[Union[str, "CachingTypes"]] = None,
write_accelerator_enabled: Optional[bool] = None,
disk_size_gb: Optional[int] = None,
image: Optional["VirtualHardDisk"] = None,
vhd_containers: Optional[List[str]] = None,
managed_disk: Optional["VirtualMachineScaleSetManagedDiskParameters"] = None,
delete_option: Optional[Union[str, "DiskDeleteOptionTypes"]] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdateOSDisk, self).__init__(**kwargs)
self.caching = caching
self.write_accelerator_enabled = write_accelerator_enabled
self.disk_size_gb = disk_size_gb
self.image = image
self.vhd_containers = vhd_containers
self.managed_disk = managed_disk
self.delete_option = delete_option
class VirtualMachineScaleSetUpdateOSProfile(msrest.serialization.Model):
_attribute_map = {
'custom_data': {'key': 'customData', 'type': 'str'},
'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'},
'linux_configuration': {'key': 'linuxConfiguration', 'type': 'LinuxConfiguration'},
'secrets': {'key': 'secrets', 'type': '[VaultSecretGroup]'},
}
def __init__(
self,
*,
custom_data: Optional[str] = None,
windows_configuration: Optional["WindowsConfiguration"] = None,
linux_configuration: Optional["LinuxConfiguration"] = None,
secrets: Optional[List["VaultSecretGroup"]] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdateOSProfile, self).__init__(**kwargs)
self.custom_data = custom_data
self.windows_configuration = windows_configuration
self.linux_configuration = linux_configuration
self.secrets = secrets
class VirtualMachineScaleSetUpdatePublicIPAddressConfiguration(msrest.serialization.Model):
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings'},
'public_ip_prefix': {'key': 'properties.publicIPPrefix', 'type': 'SubResource'},
'delete_option': {'key': 'properties.deleteOption', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
idle_timeout_in_minutes: Optional[int] = None,
dns_settings: Optional["VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings"] = None,
public_ip_prefix: Optional["SubResource"] = None,
delete_option: Optional[Union[str, "DeleteOptions"]] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdatePublicIPAddressConfiguration, self).__init__(**kwargs)
self.name = name
self.idle_timeout_in_minutes = idle_timeout_in_minutes
self.dns_settings = dns_settings
self.public_ip_prefix = public_ip_prefix
self.delete_option = delete_option
class VirtualMachineScaleSetUpdateStorageProfile(msrest.serialization.Model):
_attribute_map = {
'image_reference': {'key': 'imageReference', 'type': 'ImageReference'},
'os_disk': {'key': 'osDisk', 'type': 'VirtualMachineScaleSetUpdateOSDisk'},
'data_disks': {'key': 'dataDisks', 'type': '[VirtualMachineScaleSetDataDisk]'},
}
def __init__(
self,
*,
image_reference: Optional["ImageReference"] = None,
os_disk: Optional["VirtualMachineScaleSetUpdateOSDisk"] = None,
data_disks: Optional[List["VirtualMachineScaleSetDataDisk"]] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdateStorageProfile, self).__init__(**kwargs)
self.image_reference = image_reference
self.os_disk = os_disk
self.data_disks = data_disks
class VirtualMachineScaleSetUpdateVMProfile(msrest.serialization.Model):
_attribute_map = {
'os_profile': {'key': 'osProfile', 'type': 'VirtualMachineScaleSetUpdateOSProfile'},
'storage_profile': {'key': 'storageProfile', 'type': 'VirtualMachineScaleSetUpdateStorageProfile'},
'network_profile': {'key': 'networkProfile', 'type': 'VirtualMachineScaleSetUpdateNetworkProfile'},
'security_profile': {'key': 'securityProfile', 'type': 'SecurityProfile'},
'diagnostics_profile': {'key': 'diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'extension_profile': {'key': 'extensionProfile', 'type': 'VirtualMachineScaleSetExtensionProfile'},
'license_type': {'key': 'licenseType', 'type': 'str'},
'billing_profile': {'key': 'billingProfile', 'type': 'BillingProfile'},
'scheduled_events_profile': {'key': 'scheduledEventsProfile', 'type': 'ScheduledEventsProfile'},
'user_data': {'key': 'userData', 'type': 'str'},
}
def __init__(
self,
*,
os_profile: Optional["VirtualMachineScaleSetUpdateOSProfile"] = None,
storage_profile: Optional["VirtualMachineScaleSetUpdateStorageProfile"] = None,
network_profile: Optional["VirtualMachineScaleSetUpdateNetworkProfile"] = None,
security_profile: Optional["SecurityProfile"] = None,
diagnostics_profile: Optional["DiagnosticsProfile"] = None,
extension_profile: Optional["VirtualMachineScaleSetExtensionProfile"] = None,
license_type: Optional[str] = None,
billing_profile: Optional["BillingProfile"] = None,
scheduled_events_profile: Optional["ScheduledEventsProfile"] = None,
user_data: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetUpdateVMProfile, self).__init__(**kwargs)
self.os_profile = os_profile
self.storage_profile = storage_profile
self.network_profile = network_profile
self.security_profile = security_profile
self.diagnostics_profile = diagnostics_profile
self.extension_profile = extension_profile
self.license_type = license_type
self.billing_profile = billing_profile
self.scheduled_events_profile = scheduled_events_profile
self.user_data = user_data
class VirtualMachineScaleSetVM(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'instance_id': {'readonly': True},
'sku': {'readonly': True},
'resources': {'readonly': True},
'zones': {'readonly': True},
'latest_model_applied': {'readonly': True},
'vm_id': {'readonly': True},
'instance_view': {'readonly': True},
'provisioning_state': {'readonly': True},
'model_definition_applied': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'instance_id': {'key': 'instanceId', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'plan': {'key': 'plan', 'type': 'Plan'},
'resources': {'key': 'resources', 'type': '[VirtualMachineExtension]'},
'zones': {'key': 'zones', 'type': '[str]'},
'identity': {'key': 'identity', 'type': 'VirtualMachineIdentity'},
'latest_model_applied': {'key': 'properties.latestModelApplied', 'type': 'bool'},
'vm_id': {'key': 'properties.vmId', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineScaleSetVMInstanceView'},
'hardware_profile': {'key': 'properties.hardwareProfile', 'type': 'HardwareProfile'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'StorageProfile'},
'additional_capabilities': {'key': 'properties.additionalCapabilities', 'type': 'AdditionalCapabilities'},
'os_profile': {'key': 'properties.osProfile', 'type': 'OSProfile'},
'security_profile': {'key': 'properties.securityProfile', 'type': 'SecurityProfile'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'NetworkProfile'},
'network_profile_configuration': {'key': 'properties.networkProfileConfiguration', 'type': 'VirtualMachineScaleSetVMNetworkProfileConfiguration'},
'diagnostics_profile': {'key': 'properties.diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'availability_set': {'key': 'properties.availabilitySet', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'model_definition_applied': {'key': 'properties.modelDefinitionApplied', 'type': 'str'},
'protection_policy': {'key': 'properties.protectionPolicy', 'type': 'VirtualMachineScaleSetVMProtectionPolicy'},
'user_data': {'key': 'properties.userData', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
plan: Optional["Plan"] = None,
identity: Optional["VirtualMachineIdentity"] = None,
hardware_profile: Optional["HardwareProfile"] = None,
storage_profile: Optional["StorageProfile"] = None,
additional_capabilities: Optional["AdditionalCapabilities"] = None,
os_profile: Optional["OSProfile"] = None,
security_profile: Optional["SecurityProfile"] = None,
network_profile: Optional["NetworkProfile"] = None,
network_profile_configuration: Optional["VirtualMachineScaleSetVMNetworkProfileConfiguration"] = None,
diagnostics_profile: Optional["DiagnosticsProfile"] = None,
availability_set: Optional["SubResource"] = None,
license_type: Optional[str] = None,
protection_policy: Optional["VirtualMachineScaleSetVMProtectionPolicy"] = None,
user_data: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetVM, self).__init__(location=location, tags=tags, **kwargs)
self.instance_id = None
self.sku = None
self.plan = plan
self.resources = None
self.zones = None
self.identity = identity
self.latest_model_applied = None
self.vm_id = None
self.instance_view = None
self.hardware_profile = hardware_profile
self.storage_profile = storage_profile
self.additional_capabilities = additional_capabilities
self.os_profile = os_profile
self.security_profile = security_profile
self.network_profile = network_profile
self.network_profile_configuration = network_profile_configuration
self.diagnostics_profile = diagnostics_profile
self.availability_set = availability_set
self.provisioning_state = None
self.license_type = license_type
self.model_definition_applied = None
self.protection_policy = protection_policy
self.user_data = user_data
class VirtualMachineScaleSetVMExtension(SubResourceReadOnly):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
'publisher': {'key': 'properties.publisher', 'type': 'str'},
'type_properties_type': {'key': 'properties.type', 'type': 'str'},
'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'},
'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'},
'enable_automatic_upgrade': {'key': 'properties.enableAutomaticUpgrade', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': 'object'},
'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineExtensionInstanceView'},
'suppress_failures': {'key': 'properties.suppressFailures', 'type': 'bool'},
'protected_settings_from_key_vault': {'key': 'properties.protectedSettingsFromKeyVault', 'type': 'object'},
}
def __init__(
self,
*,
force_update_tag: Optional[str] = None,
publisher: Optional[str] = None,
type_properties_type: Optional[str] = None,
type_handler_version: Optional[str] = None,
auto_upgrade_minor_version: Optional[bool] = None,
enable_automatic_upgrade: Optional[bool] = None,
settings: Optional[Any] = None,
protected_settings: Optional[Any] = None,
instance_view: Optional["VirtualMachineExtensionInstanceView"] = None,
suppress_failures: Optional[bool] = None,
protected_settings_from_key_vault: Optional[Any] = None,
**kwargs
):
super(VirtualMachineScaleSetVMExtension, self).__init__(**kwargs)
self.name = None
self.type = None
self.force_update_tag = force_update_tag
self.publisher = publisher
self.type_properties_type = type_properties_type
self.type_handler_version = type_handler_version
self.auto_upgrade_minor_version = auto_upgrade_minor_version
self.enable_automatic_upgrade = enable_automatic_upgrade
self.settings = settings
self.protected_settings = protected_settings
self.provisioning_state = None
self.instance_view = instance_view
self.suppress_failures = suppress_failures
self.protected_settings_from_key_vault = protected_settings_from_key_vault
class VirtualMachineScaleSetVMExtensionsListResult(msrest.serialization.Model):
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineScaleSetVMExtension]'},
}
def __init__(
self,
*,
value: Optional[List["VirtualMachineScaleSetVMExtension"]] = None,
**kwargs
):
super(VirtualMachineScaleSetVMExtensionsListResult, self).__init__(**kwargs)
self.value = value
class VirtualMachineScaleSetVMExtensionsSummary(msrest.serialization.Model):
_validation = {
'name': {'readonly': True},
'statuses_summary': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'statuses_summary': {'key': 'statusesSummary', 'type': '[VirtualMachineStatusCodeCount]'},
}
def __init__(
self,
**kwargs
):
super(VirtualMachineScaleSetVMExtensionsSummary, self).__init__(**kwargs)
self.name = None
self.statuses_summary = None
class VirtualMachineScaleSetVMExtensionUpdate(SubResourceReadOnly):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
'publisher': {'key': 'properties.publisher', 'type': 'str'},
'type_properties_type': {'key': 'properties.type', 'type': 'str'},
'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'},
'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'},
'enable_automatic_upgrade': {'key': 'properties.enableAutomaticUpgrade', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': 'object'},
'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'},
'suppress_failures': {'key': 'properties.suppressFailures', 'type': 'bool'},
'protected_settings_from_key_vault': {'key': 'properties.protectedSettingsFromKeyVault', 'type': 'object'},
}
def __init__(
self,
*,
force_update_tag: Optional[str] = None,
publisher: Optional[str] = None,
type_properties_type: Optional[str] = None,
type_handler_version: Optional[str] = None,
auto_upgrade_minor_version: Optional[bool] = None,
enable_automatic_upgrade: Optional[bool] = None,
settings: Optional[Any] = None,
protected_settings: Optional[Any] = None,
suppress_failures: Optional[bool] = None,
protected_settings_from_key_vault: Optional[Any] = None,
**kwargs
):
super(VirtualMachineScaleSetVMExtensionUpdate, self).__init__(**kwargs)
self.name = None
self.type = None
self.force_update_tag = force_update_tag
self.publisher = publisher
self.type_properties_type = type_properties_type
self.type_handler_version = type_handler_version
self.auto_upgrade_minor_version = auto_upgrade_minor_version
self.enable_automatic_upgrade = enable_automatic_upgrade
self.settings = settings
self.protected_settings = protected_settings
self.suppress_failures = suppress_failures
self.protected_settings_from_key_vault = protected_settings_from_key_vault
class VirtualMachineScaleSetVMInstanceIDs(msrest.serialization.Model):
_attribute_map = {
'instance_ids': {'key': 'instanceIds', 'type': '[str]'},
}
def __init__(
self,
*,
instance_ids: Optional[List[str]] = None,
**kwargs
):
super(VirtualMachineScaleSetVMInstanceIDs, self).__init__(**kwargs)
self.instance_ids = instance_ids
class VirtualMachineScaleSetVMInstanceRequiredIDs(msrest.serialization.Model):
_validation = {
'instance_ids': {'required': True},
}
_attribute_map = {
'instance_ids': {'key': 'instanceIds', 'type': '[str]'},
}
def __init__(
self,
*,
instance_ids: List[str],
**kwargs
):
super(VirtualMachineScaleSetVMInstanceRequiredIDs, self).__init__(**kwargs)
self.instance_ids = instance_ids
class VirtualMachineScaleSetVMInstanceView(msrest.serialization.Model):
_validation = {
'vm_health': {'readonly': True},
'assigned_host': {'readonly': True},
}
_attribute_map = {
'platform_update_domain': {'key': 'platformUpdateDomain', 'type': 'int'},
'platform_fault_domain': {'key': 'platformFaultDomain', 'type': 'int'},
'rdp_thumb_print': {'key': 'rdpThumbPrint', 'type': 'str'},
'vm_agent': {'key': 'vmAgent', 'type': 'VirtualMachineAgentInstanceView'},
'maintenance_redeploy_status': {'key': 'maintenanceRedeployStatus', 'type': 'MaintenanceRedeployStatus'},
'disks': {'key': 'disks', 'type': '[DiskInstanceView]'},
'extensions': {'key': 'extensions', 'type': '[VirtualMachineExtensionInstanceView]'},
'vm_health': {'key': 'vmHealth', 'type': 'VirtualMachineHealthStatus'},
'boot_diagnostics': {'key': 'bootDiagnostics', 'type': 'BootDiagnosticsInstanceView'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
'assigned_host': {'key': 'assignedHost', 'type': 'str'},
'placement_group_id': {'key': 'placementGroupId', 'type': 'str'},
}
def __init__(
self,
*,
platform_update_domain: Optional[int] = None,
platform_fault_domain: Optional[int] = None,
rdp_thumb_print: Optional[str] = None,
vm_agent: Optional["VirtualMachineAgentInstanceView"] = None,
maintenance_redeploy_status: Optional["MaintenanceRedeployStatus"] = None,
disks: Optional[List["DiskInstanceView"]] = None,
extensions: Optional[List["VirtualMachineExtensionInstanceView"]] = None,
boot_diagnostics: Optional["BootDiagnosticsInstanceView"] = None,
statuses: Optional[List["InstanceViewStatus"]] = None,
placement_group_id: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetVMInstanceView, self).__init__(**kwargs)
self.platform_update_domain = platform_update_domain
self.platform_fault_domain = platform_fault_domain
self.rdp_thumb_print = rdp_thumb_print
self.vm_agent = vm_agent
self.maintenance_redeploy_status = maintenance_redeploy_status
self.disks = disks
self.extensions = extensions
self.vm_health = None
self.boot_diagnostics = boot_diagnostics
self.statuses = statuses
self.assigned_host = None
self.placement_group_id = placement_group_id
class VirtualMachineScaleSetVMListResult(msrest.serialization.Model):
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineScaleSetVM]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["VirtualMachineScaleSetVM"],
next_link: Optional[str] = None,
**kwargs
):
super(VirtualMachineScaleSetVMListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VirtualMachineScaleSetVMNetworkProfileConfiguration(msrest.serialization.Model):
_attribute_map = {
'network_interface_configurations': {'key': 'networkInterfaceConfigurations', 'type': '[VirtualMachineScaleSetNetworkConfiguration]'},
}
def __init__(
self,
*,
network_interface_configurations: Optional[List["VirtualMachineScaleSetNetworkConfiguration"]] = None,
**kwargs
):
super(VirtualMachineScaleSetVMNetworkProfileConfiguration, self).__init__(**kwargs)
self.network_interface_configurations = network_interface_configurations
class VirtualMachineScaleSetVMProfile(msrest.serialization.Model):
_attribute_map = {
'os_profile': {'key': 'osProfile', 'type': 'VirtualMachineScaleSetOSProfile'},
'storage_profile': {'key': 'storageProfile', 'type': 'VirtualMachineScaleSetStorageProfile'},
'network_profile': {'key': 'networkProfile', 'type': 'VirtualMachineScaleSetNetworkProfile'},
'security_profile': {'key': 'securityProfile', 'type': 'SecurityProfile'},
'diagnostics_profile': {'key': 'diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'extension_profile': {'key': 'extensionProfile', 'type': 'VirtualMachineScaleSetExtensionProfile'},
'license_type': {'key': 'licenseType', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'str'},
'eviction_policy': {'key': 'evictionPolicy', 'type': 'str'},
'billing_profile': {'key': 'billingProfile', 'type': 'BillingProfile'},
'scheduled_events_profile': {'key': 'scheduledEventsProfile', 'type': 'ScheduledEventsProfile'},
'user_data': {'key': 'userData', 'type': 'str'},
'capacity_reservation': {'key': 'capacityReservation', 'type': 'CapacityReservationProfile'},
'application_profile': {'key': 'applicationProfile', 'type': 'ApplicationProfile'},
'hardware_profile': {'key': 'hardwareProfile', 'type': 'VirtualMachineScaleSetHardwareProfile'},
}
def __init__(
self,
*,
os_profile: Optional["VirtualMachineScaleSetOSProfile"] = None,
storage_profile: Optional["VirtualMachineScaleSetStorageProfile"] = None,
network_profile: Optional["VirtualMachineScaleSetNetworkProfile"] = None,
security_profile: Optional["SecurityProfile"] = None,
diagnostics_profile: Optional["DiagnosticsProfile"] = None,
extension_profile: Optional["VirtualMachineScaleSetExtensionProfile"] = None,
license_type: Optional[str] = None,
priority: Optional[Union[str, "VirtualMachinePriorityTypes"]] = None,
eviction_policy: Optional[Union[str, "VirtualMachineEvictionPolicyTypes"]] = None,
billing_profile: Optional["BillingProfile"] = None,
scheduled_events_profile: Optional["ScheduledEventsProfile"] = None,
user_data: Optional[str] = None,
capacity_reservation: Optional["CapacityReservationProfile"] = None,
application_profile: Optional["ApplicationProfile"] = None,
hardware_profile: Optional["VirtualMachineScaleSetHardwareProfile"] = None,
**kwargs
):
super(VirtualMachineScaleSetVMProfile, self).__init__(**kwargs)
self.os_profile = os_profile
self.storage_profile = storage_profile
self.network_profile = network_profile
self.security_profile = security_profile
self.diagnostics_profile = diagnostics_profile
self.extension_profile = extension_profile
self.license_type = license_type
self.priority = priority
self.eviction_policy = eviction_policy
self.billing_profile = billing_profile
self.scheduled_events_profile = scheduled_events_profile
self.user_data = user_data
self.capacity_reservation = capacity_reservation
self.application_profile = application_profile
self.hardware_profile = hardware_profile
class VirtualMachineScaleSetVMProtectionPolicy(msrest.serialization.Model):
_attribute_map = {
'protect_from_scale_in': {'key': 'protectFromScaleIn', 'type': 'bool'},
'protect_from_scale_set_actions': {'key': 'protectFromScaleSetActions', 'type': 'bool'},
}
def __init__(
self,
*,
protect_from_scale_in: Optional[bool] = None,
protect_from_scale_set_actions: Optional[bool] = None,
**kwargs
):
super(VirtualMachineScaleSetVMProtectionPolicy, self).__init__(**kwargs)
self.protect_from_scale_in = protect_from_scale_in
self.protect_from_scale_set_actions = protect_from_scale_set_actions
class VirtualMachineSize(msrest.serialization.Model):
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'number_of_cores': {'key': 'numberOfCores', 'type': 'int'},
'os_disk_size_in_mb': {'key': 'osDiskSizeInMB', 'type': 'int'},
'resource_disk_size_in_mb': {'key': 'resourceDiskSizeInMB', 'type': 'int'},
'memory_in_mb': {'key': 'memoryInMB', 'type': 'int'},
'max_data_disk_count': {'key': 'maxDataDiskCount', 'type': 'int'},
}
def __init__(
self,
*,
name: Optional[str] = None,
number_of_cores: Optional[int] = None,
os_disk_size_in_mb: Optional[int] = None,
resource_disk_size_in_mb: Optional[int] = None,
memory_in_mb: Optional[int] = None,
max_data_disk_count: Optional[int] = None,
**kwargs
):
super(VirtualMachineSize, self).__init__(**kwargs)
self.name = name
self.number_of_cores = number_of_cores
self.os_disk_size_in_mb = os_disk_size_in_mb
self.resource_disk_size_in_mb = resource_disk_size_in_mb
self.memory_in_mb = memory_in_mb
self.max_data_disk_count = max_data_disk_count
class VirtualMachineSizeListResult(msrest.serialization.Model):
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineSize]'},
}
def __init__(
self,
*,
value: Optional[List["VirtualMachineSize"]] = None,
**kwargs
):
super(VirtualMachineSizeListResult, self).__init__(**kwargs)
self.value = value
class VirtualMachineSoftwarePatchProperties(msrest.serialization.Model):
_validation = {
'patch_id': {'readonly': True},
'name': {'readonly': True},
'version': {'readonly': True},
'kb_id': {'readonly': True},
'classifications': {'readonly': True},
'reboot_behavior': {'readonly': True},
'activity_id': {'readonly': True},
'published_date': {'readonly': True},
'last_modified_date_time': {'readonly': True},
'assessment_state': {'readonly': True},
}
_attribute_map = {
'patch_id': {'key': 'patchId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'kb_id': {'key': 'kbId', 'type': 'str'},
'classifications': {'key': 'classifications', 'type': '[str]'},
'reboot_behavior': {'key': 'rebootBehavior', 'type': 'str'},
'activity_id': {'key': 'activityId', 'type': 'str'},
'published_date': {'key': 'publishedDate', 'type': 'iso-8601'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'assessment_state': {'key': 'assessmentState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualMachineSoftwarePatchProperties, self).__init__(**kwargs)
self.patch_id = None
self.name = None
self.version = None
self.kb_id = None
self.classifications = None
self.reboot_behavior = None
self.activity_id = None
self.published_date = None
self.last_modified_date_time = None
self.assessment_state = None
class VirtualMachineStatusCodeCount(msrest.serialization.Model):
_validation = {
'code': {'readonly': True},
'count': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'count': {'key': 'count', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(VirtualMachineStatusCodeCount, self).__init__(**kwargs)
self.code = None
self.count = None
class VirtualMachineUpdate(UpdateResource):
_validation = {
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
'vm_id': {'readonly': True},
'time_created': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'plan': {'key': 'plan', 'type': 'Plan'},
'identity': {'key': 'identity', 'type': 'VirtualMachineIdentity'},
'zones': {'key': 'zones', 'type': '[str]'},
'hardware_profile': {'key': 'properties.hardwareProfile', 'type': 'HardwareProfile'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'StorageProfile'},
'additional_capabilities': {'key': 'properties.additionalCapabilities', 'type': 'AdditionalCapabilities'},
'os_profile': {'key': 'properties.osProfile', 'type': 'OSProfile'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'NetworkProfile'},
'security_profile': {'key': 'properties.securityProfile', 'type': 'SecurityProfile'},
'diagnostics_profile': {'key': 'properties.diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'availability_set': {'key': 'properties.availabilitySet', 'type': 'SubResource'},
'virtual_machine_scale_set': {'key': 'properties.virtualMachineScaleSet', 'type': 'SubResource'},
'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'SubResource'},
'priority': {'key': 'properties.priority', 'type': 'str'},
'eviction_policy': {'key': 'properties.evictionPolicy', 'type': 'str'},
'billing_profile': {'key': 'properties.billingProfile', 'type': 'BillingProfile'},
'host': {'key': 'properties.host', 'type': 'SubResource'},
'host_group': {'key': 'properties.hostGroup', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineInstanceView'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'vm_id': {'key': 'properties.vmId', 'type': 'str'},
'extensions_time_budget': {'key': 'properties.extensionsTimeBudget', 'type': 'str'},
'platform_fault_domain': {'key': 'properties.platformFaultDomain', 'type': 'int'},
'scheduled_events_profile': {'key': 'properties.scheduledEventsProfile', 'type': 'ScheduledEventsProfile'},
'user_data': {'key': 'properties.userData', 'type': 'str'},
'capacity_reservation': {'key': 'properties.capacityReservation', 'type': 'CapacityReservationProfile'},
'application_profile': {'key': 'properties.applicationProfile', 'type': 'ApplicationProfile'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
plan: Optional["Plan"] = None,
identity: Optional["VirtualMachineIdentity"] = None,
zones: Optional[List[str]] = None,
hardware_profile: Optional["HardwareProfile"] = None,
storage_profile: Optional["StorageProfile"] = None,
additional_capabilities: Optional["AdditionalCapabilities"] = None,
os_profile: Optional["OSProfile"] = None,
network_profile: Optional["NetworkProfile"] = None,
security_profile: Optional["SecurityProfile"] = None,
diagnostics_profile: Optional["DiagnosticsProfile"] = None,
availability_set: Optional["SubResource"] = None,
virtual_machine_scale_set: Optional["SubResource"] = None,
proximity_placement_group: Optional["SubResource"] = None,
priority: Optional[Union[str, "VirtualMachinePriorityTypes"]] = None,
eviction_policy: Optional[Union[str, "VirtualMachineEvictionPolicyTypes"]] = None,
billing_profile: Optional["BillingProfile"] = None,
host: Optional["SubResource"] = None,
host_group: Optional["SubResource"] = None,
license_type: Optional[str] = None,
extensions_time_budget: Optional[str] = None,
platform_fault_domain: Optional[int] = None,
scheduled_events_profile: Optional["ScheduledEventsProfile"] = None,
user_data: Optional[str] = None,
capacity_reservation: Optional["CapacityReservationProfile"] = None,
application_profile: Optional["ApplicationProfile"] = None,
**kwargs
):
super(VirtualMachineUpdate, self).__init__(tags=tags, **kwargs)
self.plan = plan
self.identity = identity
self.zones = zones
self.hardware_profile = hardware_profile
self.storage_profile = storage_profile
self.additional_capabilities = additional_capabilities
self.os_profile = os_profile
self.network_profile = network_profile
self.security_profile = security_profile
self.diagnostics_profile = diagnostics_profile
self.availability_set = availability_set
self.virtual_machine_scale_set = virtual_machine_scale_set
self.proximity_placement_group = proximity_placement_group
self.priority = priority
self.eviction_policy = eviction_policy
self.billing_profile = billing_profile
self.host = host
self.host_group = host_group
self.provisioning_state = None
self.instance_view = None
self.license_type = license_type
self.vm_id = None
self.extensions_time_budget = extensions_time_budget
self.platform_fault_domain = platform_fault_domain
self.scheduled_events_profile = scheduled_events_profile
self.user_data = user_data
self.capacity_reservation = capacity_reservation
self.application_profile = application_profile
self.time_created = None
class VMDiskSecurityProfile(msrest.serialization.Model):
_attribute_map = {
'security_encryption_type': {'key': 'securityEncryptionType', 'type': 'str'},
'disk_encryption_set': {'key': 'diskEncryptionSet', 'type': 'DiskEncryptionSetParameters'},
}
def __init__(
self,
*,
security_encryption_type: Optional[Union[str, "SecurityEncryptionTypes"]] = None,
disk_encryption_set: Optional["DiskEncryptionSetParameters"] = None,
**kwargs
):
super(VMDiskSecurityProfile, self).__init__(**kwargs)
self.security_encryption_type = security_encryption_type
self.disk_encryption_set = disk_encryption_set
class VMGalleryApplication(msrest.serialization.Model):
_validation = {
'package_reference_id': {'required': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'package_reference_id': {'key': 'packageReferenceId', 'type': 'str'},
'configuration_reference': {'key': 'configurationReference', 'type': 'str'},
'treat_failure_as_deployment_failure': {'key': 'treatFailureAsDeploymentFailure', 'type': 'bool'},
'enable_automatic_upgrade': {'key': 'enableAutomaticUpgrade', 'type': 'bool'},
}
def __init__(
self,
*,
package_reference_id: str,
tags: Optional[str] = None,
order: Optional[int] = None,
configuration_reference: Optional[str] = None,
treat_failure_as_deployment_failure: Optional[bool] = None,
enable_automatic_upgrade: Optional[bool] = None,
**kwargs
):
super(VMGalleryApplication, self).__init__(**kwargs)
self.tags = tags
self.order = order
self.package_reference_id = package_reference_id
self.configuration_reference = configuration_reference
self.treat_failure_as_deployment_failure = treat_failure_as_deployment_failure
self.enable_automatic_upgrade = enable_automatic_upgrade
class VmImagesInEdgeZoneListResult(msrest.serialization.Model):
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualMachineImageResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["VirtualMachineImageResource"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(VmImagesInEdgeZoneListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VMScaleSetConvertToSinglePlacementGroupInput(msrest.serialization.Model):
_attribute_map = {
'active_placement_group_id': {'key': 'activePlacementGroupId', 'type': 'str'},
}
def __init__(
self,
*,
active_placement_group_id: Optional[str] = None,
**kwargs
):
super(VMScaleSetConvertToSinglePlacementGroupInput, self).__init__(**kwargs)
self.active_placement_group_id = active_placement_group_id
class VMSizeProperties(msrest.serialization.Model):
_attribute_map = {
'v_cpus_available': {'key': 'vCPUsAvailable', 'type': 'int'},
'v_cpus_per_core': {'key': 'vCPUsPerCore', 'type': 'int'},
}
def __init__(
self,
*,
v_cpus_available: Optional[int] = None,
v_cpus_per_core: Optional[int] = None,
**kwargs
):
super(VMSizeProperties, self).__init__(**kwargs)
self.v_cpus_available = v_cpus_available
self.v_cpus_per_core = v_cpus_per_core
class WindowsConfiguration(msrest.serialization.Model):
_attribute_map = {
'provision_vm_agent': {'key': 'provisionVMAgent', 'type': 'bool'},
'enable_automatic_updates': {'key': 'enableAutomaticUpdates', 'type': 'bool'},
'time_zone': {'key': 'timeZone', 'type': 'str'},
'additional_unattend_content': {'key': 'additionalUnattendContent', 'type': '[AdditionalUnattendContent]'},
'patch_settings': {'key': 'patchSettings', 'type': 'PatchSettings'},
'win_rm': {'key': 'winRM', 'type': 'WinRMConfiguration'},
}
def __init__(
self,
*,
provision_vm_agent: Optional[bool] = None,
enable_automatic_updates: Optional[bool] = None,
time_zone: Optional[str] = None,
additional_unattend_content: Optional[List["AdditionalUnattendContent"]] = None,
patch_settings: Optional["PatchSettings"] = None,
win_rm: Optional["WinRMConfiguration"] = None,
**kwargs
):
super(WindowsConfiguration, self).__init__(**kwargs)
self.provision_vm_agent = provision_vm_agent
self.enable_automatic_updates = enable_automatic_updates
self.time_zone = time_zone
self.additional_unattend_content = additional_unattend_content
self.patch_settings = patch_settings
self.win_rm = win_rm
class WindowsParameters(msrest.serialization.Model):
_attribute_map = {
'classifications_to_include': {'key': 'classificationsToInclude', 'type': '[str]'},
'kb_numbers_to_include': {'key': 'kbNumbersToInclude', 'type': '[str]'},
'kb_numbers_to_exclude': {'key': 'kbNumbersToExclude', 'type': '[str]'},
'exclude_kbs_requiring_reboot': {'key': 'excludeKbsRequiringReboot', 'type': 'bool'},
'max_patch_publish_date': {'key': 'maxPatchPublishDate', 'type': 'iso-8601'},
}
def __init__(
self,
*,
classifications_to_include: Optional[List[Union[str, "VMGuestPatchClassificationWindows"]]] = None,
kb_numbers_to_include: Optional[List[str]] = None,
kb_numbers_to_exclude: Optional[List[str]] = None,
exclude_kbs_requiring_reboot: Optional[bool] = None,
max_patch_publish_date: Optional[datetime.datetime] = None,
**kwargs
):
super(WindowsParameters, self).__init__(**kwargs)
self.classifications_to_include = classifications_to_include
self.kb_numbers_to_include = kb_numbers_to_include
self.kb_numbers_to_exclude = kb_numbers_to_exclude
self.exclude_kbs_requiring_reboot = exclude_kbs_requiring_reboot
self.max_patch_publish_date = max_patch_publish_date
class WindowsVMGuestPatchAutomaticByPlatformSettings(msrest.serialization.Model):
_attribute_map = {
'reboot_setting': {'key': 'rebootSetting', 'type': 'str'},
}
def __init__(
self,
*,
reboot_setting: Optional[Union[str, "WindowsVMGuestPatchAutomaticByPlatformRebootSetting"]] = None,
**kwargs
):
super(WindowsVMGuestPatchAutomaticByPlatformSettings, self).__init__(**kwargs)
self.reboot_setting = reboot_setting
class WinRMConfiguration(msrest.serialization.Model):
_attribute_map = {
'listeners': {'key': 'listeners', 'type': '[WinRMListener]'},
}
def __init__(
self,
*,
listeners: Optional[List["WinRMListener"]] = None,
**kwargs
):
super(WinRMConfiguration, self).__init__(**kwargs)
self.listeners = listeners
class WinRMListener(msrest.serialization.Model):
_attribute_map = {
'protocol': {'key': 'protocol', 'type': 'str'},
'certificate_url': {'key': 'certificateUrl', 'type': 'str'},
}
def __init__(
self,
*,
protocol: Optional[Union[str, "ProtocolTypes"]] = None,
certificate_url: Optional[str] = None,
**kwargs
):
super(WinRMListener, self).__init__(**kwargs)
self.protocol = protocol
self.certificate_url = certificate_url
| true | true |
f7fc767435f6a2472d95d1126b394b806c578b4c | 127,306 | py | Python | gear/__init__.py | drifterza/gear | 9933f068213c9da6c4b0ea59659c13bcf2ebe104 | [
"Apache-2.0"
] | null | null | null | gear/__init__.py | drifterza/gear | 9933f068213c9da6c4b0ea59659c13bcf2ebe104 | [
"Apache-2.0"
] | null | null | null | gear/__init__.py | drifterza/gear | 9933f068213c9da6c4b0ea59659c13bcf2ebe104 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013-2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import logging
import os
import select
import six
import socket
import ssl
import struct
import threading
import time
import uuid as uuid_module
from gear import constants
from gear.acl import ACLError, ACLEntry, ACL # noqa
try:
import Queue as queue_mod
except ImportError:
import queue as queue_mod
try:
import statsd
except ImportError:
statsd = None
PRECEDENCE_NORMAL = 0
PRECEDENCE_LOW = 1
PRECEDENCE_HIGH = 2
class ConnectionError(Exception):
pass
class InvalidDataError(Exception):
pass
class ConfigurationError(Exception):
pass
class NoConnectedServersError(Exception):
pass
class UnknownJobError(Exception):
pass
class InterruptedError(Exception):
pass
class TimeoutError(Exception):
pass
class GearmanError(Exception):
pass
class DisconnectError(Exception):
pass
class RetryIOError(Exception):
pass
def convert_to_bytes(data):
try:
data = data.encode('utf8')
except AttributeError:
pass
return data
class Task(object):
def __init__(self):
self._wait_event = threading.Event()
def setComplete(self):
self._wait_event.set()
def wait(self, timeout=None):
"""Wait for a response from Gearman.
:arg int timeout: If not None, return after this many seconds if no
response has been received (default: None).
"""
self._wait_event.wait(timeout)
return self._wait_event.is_set()
class SubmitJobTask(Task):
def __init__(self, job):
super(SubmitJobTask, self).__init__()
self.job = job
class OptionReqTask(Task):
pass
class Connection(object):
"""A Connection to a Gearman Server.
:arg str client_id: The client ID associated with this connection.
It will be appending to the name of the logger (e.g.,
gear.Connection.client_id). Defaults to 'unknown'.
:arg bool keepalive: Whether to use TCP keepalives
:arg int tcp_keepidle: Idle time after which to start keepalives sending
:arg int tcp_keepintvl: Interval in seconds between TCP keepalives
:arg int tcp_keepcnt: Count of TCP keepalives to send before disconnect
"""
def __init__(self, host, port, ssl_key=None, ssl_cert=None, ssl_ca=None,
client_id='unknown', keepalive=False, tcp_keepidle=7200,
tcp_keepintvl=75, tcp_keepcnt=9):
self.log = logging.getLogger("gear.Connection.%s" % (client_id,))
self.host = host
self.port = port
self.ssl_key = ssl_key
self.ssl_cert = ssl_cert
self.ssl_ca = ssl_ca
self.keepalive = keepalive
self.tcp_keepcnt = tcp_keepcnt
self.tcp_keepintvl = tcp_keepintvl
self.tcp_keepidle = tcp_keepidle
self.use_ssl = False
if all([self.ssl_key, self.ssl_cert, self.ssl_ca]):
self.use_ssl = True
self.input_buffer = b''
self.need_bytes = False
self.echo_lock = threading.Lock()
self.send_lock = threading.Lock()
self._init()
def _init(self):
self.conn = None
self.connected = False
self.connect_time = None
self.related_jobs = {}
self.pending_tasks = []
self.admin_requests = []
self.echo_conditions = {}
self.options = set()
self.changeState("INIT")
def changeState(self, state):
# The state variables are provided as a convenience (and used by
# the Worker implementation). They aren't used or modified within
# the connection object itself except to reset to "INIT" immediately
# after reconnection.
self.log.debug("Setting state to: %s" % state)
self.state = state
self.state_time = time.time()
def __repr__(self):
return '<gear.Connection 0x%x host: %s port: %s>' % (
id(self), self.host, self.port)
def connect(self):
"""Open a connection to the server.
:raises ConnectionError: If unable to open the socket.
"""
self.log.debug("Connecting to %s port %s" % (self.host, self.port))
s = None
for res in socket.getaddrinfo(self.host, self.port,
socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
if self.keepalive and hasattr(socket, 'TCP_KEEPIDLE'):
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
self.tcp_keepidle)
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL,
self.tcp_keepintvl)
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT,
self.tcp_keepcnt)
elif self.keepalive:
self.log.warning('Keepalive requested but not available '
'on this platform')
except socket.error:
s = None
continue
if self.use_ssl:
self.log.debug("Using SSL")
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = False
context.load_cert_chain(self.ssl_cert, self.ssl_key)
context.load_verify_locations(self.ssl_ca)
s = context.wrap_socket(s, server_hostname=self.host)
try:
s.connect(sa)
except socket.error:
s.close()
s = None
continue
break
if s is None:
self.log.debug("Error connecting to %s port %s" % (
self.host, self.port))
raise ConnectionError("Unable to open socket")
self.log.info("Connected to %s port %s" % (self.host, self.port))
self.conn = s
self.connected = True
self.connect_time = time.time()
self.input_buffer = b''
self.need_bytes = False
def disconnect(self):
"""Disconnect from the server and remove all associated state
data.
"""
if self.conn:
try:
self.conn.close()
except Exception:
pass
self.log.info("Disconnected from %s port %s" % (self.host, self.port))
self._init()
def reconnect(self):
"""Disconnect from and reconnect to the server, removing all
associated state data.
"""
self.disconnect()
self.connect()
def sendRaw(self, data):
"""Send raw data over the socket.
:arg bytes data The raw data to send
"""
with self.send_lock:
sent = 0
while sent < len(data):
try:
sent += self.conn.send(data)
except ssl.SSLError as e:
if e.errno == ssl.SSL_ERROR_WANT_READ:
continue
elif e.errno == ssl.SSL_ERROR_WANT_WRITE:
continue
else:
raise
def sendPacket(self, packet):
"""Send a packet to the server.
:arg Packet packet: The :py:class:`Packet` to send.
"""
self.log.info("Sending packet to %s: %s" % (self, packet))
self.sendRaw(packet.toBinary())
def _getAdminRequest(self):
return self.admin_requests.pop(0)
def _readRawBytes(self, bytes_to_read):
while True:
try:
buff = self.conn.recv(bytes_to_read)
except ssl.SSLError as e:
if e.errno == ssl.SSL_ERROR_WANT_READ:
continue
elif e.errno == ssl.SSL_ERROR_WANT_WRITE:
continue
else:
raise
break
return buff
def _putAdminRequest(self, req):
self.admin_requests.insert(0, req)
def readPacket(self):
"""Read one packet or administrative response from the server.
:returns: The :py:class:`Packet` or :py:class:`AdminRequest` read.
:rtype: :py:class:`Packet` or :py:class:`AdminRequest`
"""
# This handles non-blocking or blocking IO.
datalen = 0
code = None
ptype = None
admin = None
admin_request = None
need_bytes = self.need_bytes
raw_bytes = self.input_buffer
try:
while True:
try:
if not raw_bytes or need_bytes:
segment = self._readRawBytes(4096)
if not segment:
# This occurs when the connection is closed. The
# the connect method will reset input_buffer and
# need_bytes for us.
return None
raw_bytes += segment
need_bytes = False
except RetryIOError:
if admin_request:
self._putAdminRequest(admin_request)
raise
if admin is None:
if raw_bytes[0:1] == b'\x00':
admin = False
else:
admin = True
admin_request = self._getAdminRequest()
if admin:
complete, remainder = admin_request.isComplete(raw_bytes)
if remainder is not None:
raw_bytes = remainder
if complete:
return admin_request
else:
length = len(raw_bytes)
if code is None and length >= 12:
code, ptype, datalen = struct.unpack('!4sii',
raw_bytes[:12])
if length >= datalen + 12:
end = 12 + datalen
p = Packet(code, ptype, raw_bytes[12:end],
connection=self)
raw_bytes = raw_bytes[end:]
return p
# If we don't return a packet above then we need more data
need_bytes = True
finally:
self.input_buffer = raw_bytes
self.need_bytes = need_bytes
def hasPendingData(self):
return self.input_buffer != b''
def sendAdminRequest(self, request, timeout=90):
"""Send an administrative request to the server.
:arg AdminRequest request: The :py:class:`AdminRequest` to send.
:arg numeric timeout: Number of seconds to wait until the response
is received. If None, wait forever (default: 90 seconds).
:raises TimeoutError: If the timeout is reached before the response
is received.
"""
self.admin_requests.append(request)
self.sendRaw(request.getCommand())
complete = request.waitForResponse(timeout)
if not complete:
raise TimeoutError()
def echo(self, data=None, timeout=30):
"""Perform an echo test on the server.
This method waits until the echo response has been received or the
timeout has been reached.
:arg bytes data: The data to request be echoed. If None, a random
unique byte string will be generated.
:arg numeric timeout: Number of seconds to wait until the response
is received. If None, wait forever (default: 30 seconds).
:raises TimeoutError: If the timeout is reached before the response
is received.
"""
if data is None:
data = uuid_module.uuid4().hex.encode('utf8')
self.echo_lock.acquire()
try:
if data in self.echo_conditions:
raise InvalidDataError("This client is already waiting on an "
"echo response of: %s" % data)
condition = threading.Condition()
self.echo_conditions[data] = condition
finally:
self.echo_lock.release()
self.sendEchoReq(data)
condition.acquire()
condition.wait(timeout)
condition.release()
if data in self.echo_conditions:
return data
raise TimeoutError()
def sendEchoReq(self, data):
p = Packet(constants.REQ, constants.ECHO_REQ, data)
self.sendPacket(p)
def handleEchoRes(self, data):
condition = None
self.echo_lock.acquire()
try:
condition = self.echo_conditions.get(data)
if condition:
del self.echo_conditions[data]
finally:
self.echo_lock.release()
if not condition:
return False
condition.notifyAll()
return True
def handleOptionRes(self, option):
self.options.add(option)
class AdminRequest(object):
"""Encapsulates a request (and response) sent over the
administrative protocol. This is a base class that may not be
instantiated dircectly; a subclass implementing a specific command
must be used instead.
:arg list arguments: A list of byte string arguments for the command.
The following instance attributes are available:
**response** (bytes)
The response from the server.
**arguments** (bytes)
The argument supplied with the constructor.
**command** (bytes)
The administrative command.
"""
command = None
arguments = []
response = None
_complete_position = 0
def __init__(self, *arguments):
self.wait_event = threading.Event()
self.arguments = arguments
if type(self) == AdminRequest:
raise NotImplementedError("AdminRequest must be subclassed")
def __repr__(self):
return '<gear.AdminRequest 0x%x command: %s>' % (
id(self), self.command)
def getCommand(self):
cmd = self.command
if self.arguments:
cmd += b' ' + b' '.join(self.arguments)
cmd += b'\n'
return cmd
def isComplete(self, data):
x = -1
start = self._complete_position
start = max(self._complete_position - 4, 0)
end_index_newline = data.find(b'\n.\n', start)
end_index_return = data.find(b'\r\n.\r\n', start)
if end_index_newline != -1:
x = end_index_newline + 3
elif end_index_return != -1:
x = end_index_return + 5
elif data.startswith(b'.\n'):
x = 2
elif data.startswith(b'.\r\n'):
x = 3
self._complete_position = len(data)
if x != -1:
self.response = data[:x]
return (True, data[x:])
else:
return (False, None)
def setComplete(self):
self.wait_event.set()
def waitForResponse(self, timeout=None):
self.wait_event.wait(timeout)
return self.wait_event.is_set()
class StatusAdminRequest(AdminRequest):
"""A "status" administrative request.
The response from gearman may be found in the **response** attribute.
"""
command = b'status'
def __init__(self):
super(StatusAdminRequest, self).__init__()
class ShowJobsAdminRequest(AdminRequest):
"""A "show jobs" administrative request.
The response from gearman may be found in the **response** attribute.
"""
command = b'show jobs'
def __init__(self):
super(ShowJobsAdminRequest, self).__init__()
class ShowUniqueJobsAdminRequest(AdminRequest):
"""A "show unique jobs" administrative request.
The response from gearman may be found in the **response** attribute.
"""
command = b'show unique jobs'
def __init__(self):
super(ShowUniqueJobsAdminRequest, self).__init__()
class CancelJobAdminRequest(AdminRequest):
"""A "cancel job" administrative request.
:arg str handle: The job handle to be canceled.
The response from gearman may be found in the **response** attribute.
"""
command = b'cancel job'
def __init__(self, handle):
handle = convert_to_bytes(handle)
super(CancelJobAdminRequest, self).__init__(handle)
def isComplete(self, data):
end_index_newline = data.find(b'\n')
if end_index_newline != -1:
x = end_index_newline + 1
self.response = data[:x]
return (True, data[x:])
else:
return (False, None)
class VersionAdminRequest(AdminRequest):
"""A "version" administrative request.
The response from gearman may be found in the **response** attribute.
"""
command = b'version'
def __init__(self):
super(VersionAdminRequest, self).__init__()
def isComplete(self, data):
end_index_newline = data.find(b'\n')
if end_index_newline != -1:
x = end_index_newline + 1
self.response = data[:x]
return (True, data[x:])
else:
return (False, None)
class WorkersAdminRequest(AdminRequest):
"""A "workers" administrative request.
The response from gearman may be found in the **response** attribute.
"""
command = b'workers'
def __init__(self):
super(WorkersAdminRequest, self).__init__()
class Packet(object):
"""A data packet received from or to be sent over a
:py:class:`Connection`.
:arg bytes code: The Gearman magic code (:py:data:`constants.REQ` or
:py:data:`constants.RES`)
:arg bytes ptype: The packet type (one of the packet types in
constants).
:arg bytes data: The data portion of the packet.
:arg Connection connection: The connection on which the packet
was received (optional).
:raises InvalidDataError: If the magic code is unknown.
"""
def __init__(self, code, ptype, data, connection=None):
if not isinstance(code, bytes) and not isinstance(code, bytearray):
raise TypeError("code must be of type bytes or bytearray")
if code[0:1] != b'\x00':
raise InvalidDataError("First byte of packet must be 0")
self.code = code
self.ptype = ptype
if not isinstance(data, bytes) and not isinstance(data, bytearray):
raise TypeError("data must be of type bytes or bytearray")
self.data = data
self.connection = connection
def __repr__(self):
ptype = constants.types.get(self.ptype, 'UNKNOWN')
try:
extra = self._formatExtraData()
except Exception:
extra = ''
return '<gear.Packet 0x%x type: %s%s>' % (id(self), ptype, extra)
def __eq__(self, other):
if not isinstance(other, Packet):
return False
if (self.code == other.code and
self.ptype == other.ptype and
self.data == other.data):
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
def _formatExtraData(self):
if self.ptype in [constants.JOB_CREATED,
constants.JOB_ASSIGN,
constants.GET_STATUS,
constants.STATUS_RES,
constants.WORK_STATUS,
constants.WORK_COMPLETE,
constants.WORK_FAIL,
constants.WORK_EXCEPTION,
constants.WORK_DATA,
constants.WORK_WARNING]:
return ' handle: %s' % self.getArgument(0)
if self.ptype == constants.JOB_ASSIGN_UNIQ:
return (' handle: %s function: %s unique: %s' %
(self.getArgument(0),
self.getArgument(1),
self.getArgument(2)))
if self.ptype in [constants.SUBMIT_JOB,
constants.SUBMIT_JOB_BG,
constants.SUBMIT_JOB_HIGH,
constants.SUBMIT_JOB_HIGH_BG,
constants.SUBMIT_JOB_LOW,
constants.SUBMIT_JOB_LOW_BG,
constants.SUBMIT_JOB_SCHED,
constants.SUBMIT_JOB_EPOCH]:
return ' function: %s unique: %s' % (self.getArgument(0),
self.getArgument(1))
if self.ptype in [constants.CAN_DO,
constants.CANT_DO,
constants.CAN_DO_TIMEOUT]:
return ' function: %s' % (self.getArgument(0),)
if self.ptype == constants.SET_CLIENT_ID:
return ' id: %s' % (self.getArgument(0),)
if self.ptype in [constants.OPTION_REQ,
constants.OPTION_RES]:
return ' option: %s' % (self.getArgument(0),)
if self.ptype == constants.ERROR:
return ' code: %s message: %s' % (self.getArgument(0),
self.getArgument(1))
return ''
def toBinary(self):
"""Return a Gearman wire protocol binary representation of the packet.
:returns: The packet in binary form.
:rtype: bytes
"""
b = struct.pack('!4sii', self.code, self.ptype, len(self.data))
b = bytearray(b)
b += self.data
return b
def getArgument(self, index, last=False):
"""Get the nth argument from the packet data.
:arg int index: The argument index to look up.
:arg bool last: Whether this is the last argument (and thus
nulls should be ignored)
:returns: The argument value.
:rtype: bytes
"""
parts = self.data.split(b'\x00')
if not last:
return parts[index]
return b'\x00'.join(parts[index:])
def getJob(self):
"""Get the :py:class:`Job` associated with the job handle in
this packet.
:returns: The :py:class:`Job` for this packet.
:rtype: Job
:raises UnknownJobError: If the job is not known.
"""
handle = self.getArgument(0)
job = self.connection.related_jobs.get(handle)
if not job:
raise UnknownJobError()
return job
class BaseClientServer(object):
def __init__(self, client_id=None):
if client_id:
self.client_id = convert_to_bytes(client_id)
self.log = logging.getLogger("gear.BaseClientServer.%s" %
(self.client_id,))
else:
self.client_id = None
self.log = logging.getLogger("gear.BaseClientServer")
self.running = True
self.active_connections = []
self.inactive_connections = []
self.connection_index = -1
# A lock and notification mechanism to handle not having any
# current connections
self.connections_condition = threading.Condition()
# A pipe to wake up the poll loop in case it needs to restart
self.wake_read, self.wake_write = os.pipe()
self.poll_thread = threading.Thread(name="Gearman client poll",
target=self._doPollLoop)
self.poll_thread.daemon = True
self.poll_thread.start()
self.connect_thread = threading.Thread(name="Gearman client connect",
target=self._doConnectLoop)
self.connect_thread.daemon = True
self.connect_thread.start()
def _doConnectLoop(self):
# Outer run method of the reconnection thread
while self.running:
self.connections_condition.acquire()
while self.running and not self.inactive_connections:
self.log.debug("Waiting for change in available servers "
"to reconnect")
self.connections_condition.wait()
self.connections_condition.release()
self.log.debug("Checking if servers need to be reconnected")
try:
if self.running and not self._connectLoop():
# Nothing happened
time.sleep(2)
except Exception:
self.log.exception("Exception in connect loop:")
def _connectLoop(self):
# Inner method of the reconnection loop, triggered by
# a connection change
success = False
for conn in self.inactive_connections[:]:
self.log.debug("Trying to reconnect %s" % conn)
try:
conn.reconnect()
except ConnectionError:
self.log.debug("Unable to connect to %s" % conn)
continue
except Exception:
self.log.exception("Exception while connecting to %s" % conn)
continue
try:
self._onConnect(conn)
except Exception:
self.log.exception("Exception while performing on-connect "
"tasks for %s" % conn)
continue
self.connections_condition.acquire()
self.inactive_connections.remove(conn)
self.active_connections.append(conn)
self.connections_condition.notifyAll()
os.write(self.wake_write, b'1\n')
self.connections_condition.release()
try:
self._onActiveConnection(conn)
except Exception:
self.log.exception("Exception while performing active conn "
"tasks for %s" % conn)
success = True
return success
def _onConnect(self, conn):
# Called immediately after a successful (re-)connection
pass
def _onActiveConnection(self, conn):
# Called immediately after a connection is activated
pass
def _lostConnection(self, conn):
# Called as soon as a connection is detected as faulty. Remove
# it and return ASAP and let the connection thread deal with it.
self.log.debug("Marking %s as disconnected" % conn)
self.connections_condition.acquire()
try:
# NOTE(notmorgan): In the loop below it is possible to change the
# jobs list on the connection. In python 3 .values() is an iter not
# a static list, meaning that a change will break the for loop
# as the object being iterated on will have changed in size.
jobs = list(conn.related_jobs.values())
if conn in self.active_connections:
self.active_connections.remove(conn)
if conn not in self.inactive_connections:
self.inactive_connections.append(conn)
finally:
self.connections_condition.notifyAll()
self.connections_condition.release()
for job in jobs:
self.handleDisconnect(job)
def _doPollLoop(self):
# Outer run method of poll thread.
while self.running:
self.connections_condition.acquire()
while self.running and not self.active_connections:
self.log.debug("Waiting for change in available connections "
"to poll")
self.connections_condition.wait()
self.connections_condition.release()
try:
self._pollLoop()
except socket.error as e:
if e.errno == errno.ECONNRESET:
self.log.debug("Connection reset by peer")
# This will get logged later at info level as
# "Marking ... as disconnected"
except Exception:
self.log.exception("Exception in poll loop:")
def _pollLoop(self):
# Inner method of poll loop
self.log.debug("Preparing to poll")
poll = select.poll()
bitmask = (select.POLLIN | select.POLLERR |
select.POLLHUP | select.POLLNVAL)
# Reverse mapping of fd -> connection
conn_dict = {}
for conn in self.active_connections:
poll.register(conn.conn.fileno(), bitmask)
conn_dict[conn.conn.fileno()] = conn
# Register the wake pipe so that we can break if we need to
# reconfigure connections
poll.register(self.wake_read, bitmask)
while self.running:
self.log.debug("Polling %s connections" %
len(self.active_connections))
ret = poll.poll()
for fd, event in ret:
if fd == self.wake_read:
self.log.debug("Woken by pipe")
while True:
if os.read(self.wake_read, 1) == b'\n':
break
return
conn = conn_dict[fd]
if event & select.POLLIN:
# Process all packets that may have been read in this
# round of recv's by readPacket.
while True:
self.log.debug("Processing input on %s" % conn)
p = conn.readPacket()
if p:
if isinstance(p, Packet):
self.handlePacket(p)
else:
self.handleAdminRequest(p)
else:
self.log.debug("Received no data on %s" % conn)
self._lostConnection(conn)
return
if not conn.hasPendingData():
break
else:
self.log.debug("Received error event on %s" % conn)
self._lostConnection(conn)
return
def handlePacket(self, packet):
"""Handle a received packet.
This method is called whenever a packet is received from any
connection. It normally calls the handle method appropriate
for the specific packet.
:arg Packet packet: The :py:class:`Packet` that was received.
"""
self.log.info("Received packet from %s: %s" % (packet.connection,
packet))
start = time.time()
if packet.ptype == constants.JOB_CREATED:
self.handleJobCreated(packet)
elif packet.ptype == constants.WORK_COMPLETE:
self.handleWorkComplete(packet)
elif packet.ptype == constants.WORK_FAIL:
self.handleWorkFail(packet)
elif packet.ptype == constants.WORK_EXCEPTION:
self.handleWorkException(packet)
elif packet.ptype == constants.WORK_DATA:
self.handleWorkData(packet)
elif packet.ptype == constants.WORK_WARNING:
self.handleWorkWarning(packet)
elif packet.ptype == constants.WORK_STATUS:
self.handleWorkStatus(packet)
elif packet.ptype == constants.STATUS_RES:
self.handleStatusRes(packet)
elif packet.ptype == constants.GET_STATUS:
self.handleGetStatus(packet)
elif packet.ptype == constants.JOB_ASSIGN_UNIQ:
self.handleJobAssignUnique(packet)
elif packet.ptype == constants.JOB_ASSIGN:
self.handleJobAssign(packet)
elif packet.ptype == constants.NO_JOB:
self.handleNoJob(packet)
elif packet.ptype == constants.NOOP:
self.handleNoop(packet)
elif packet.ptype == constants.SUBMIT_JOB:
self.handleSubmitJob(packet)
elif packet.ptype == constants.SUBMIT_JOB_BG:
self.handleSubmitJobBg(packet)
elif packet.ptype == constants.SUBMIT_JOB_HIGH:
self.handleSubmitJobHigh(packet)
elif packet.ptype == constants.SUBMIT_JOB_HIGH_BG:
self.handleSubmitJobHighBg(packet)
elif packet.ptype == constants.SUBMIT_JOB_LOW:
self.handleSubmitJobLow(packet)
elif packet.ptype == constants.SUBMIT_JOB_LOW_BG:
self.handleSubmitJobLowBg(packet)
elif packet.ptype == constants.SUBMIT_JOB_SCHED:
self.handleSubmitJobSched(packet)
elif packet.ptype == constants.SUBMIT_JOB_EPOCH:
self.handleSubmitJobEpoch(packet)
elif packet.ptype == constants.GRAB_JOB_UNIQ:
self.handleGrabJobUniq(packet)
elif packet.ptype == constants.GRAB_JOB:
self.handleGrabJob(packet)
elif packet.ptype == constants.PRE_SLEEP:
self.handlePreSleep(packet)
elif packet.ptype == constants.SET_CLIENT_ID:
self.handleSetClientID(packet)
elif packet.ptype == constants.CAN_DO:
self.handleCanDo(packet)
elif packet.ptype == constants.CAN_DO_TIMEOUT:
self.handleCanDoTimeout(packet)
elif packet.ptype == constants.CANT_DO:
self.handleCantDo(packet)
elif packet.ptype == constants.RESET_ABILITIES:
self.handleResetAbilities(packet)
elif packet.ptype == constants.ECHO_REQ:
self.handleEchoReq(packet)
elif packet.ptype == constants.ECHO_RES:
self.handleEchoRes(packet)
elif packet.ptype == constants.ERROR:
self.handleError(packet)
elif packet.ptype == constants.ALL_YOURS:
self.handleAllYours(packet)
elif packet.ptype == constants.OPTION_REQ:
self.handleOptionReq(packet)
elif packet.ptype == constants.OPTION_RES:
self.handleOptionRes(packet)
else:
self.log.error("Received unknown packet: %s" % packet)
end = time.time()
self.reportTimingStats(packet.ptype, end - start)
def reportTimingStats(self, ptype, duration):
"""Report processing times by packet type
This method is called by handlePacket to report how long
processing took for each packet. The default implementation
does nothing.
:arg bytes ptype: The packet type (one of the packet types in
constants).
:arg float duration: The time (in seconds) it took to process
the packet.
"""
pass
def _defaultPacketHandler(self, packet):
self.log.error("Received unhandled packet: %s" % packet)
def handleJobCreated(self, packet):
return self._defaultPacketHandler(packet)
def handleWorkComplete(self, packet):
return self._defaultPacketHandler(packet)
def handleWorkFail(self, packet):
return self._defaultPacketHandler(packet)
def handleWorkException(self, packet):
return self._defaultPacketHandler(packet)
def handleWorkData(self, packet):
return self._defaultPacketHandler(packet)
def handleWorkWarning(self, packet):
return self._defaultPacketHandler(packet)
def handleWorkStatus(self, packet):
return self._defaultPacketHandler(packet)
def handleStatusRes(self, packet):
return self._defaultPacketHandler(packet)
def handleGetStatus(self, packet):
return self._defaultPacketHandler(packet)
def handleJobAssignUnique(self, packet):
return self._defaultPacketHandler(packet)
def handleJobAssign(self, packet):
return self._defaultPacketHandler(packet)
def handleNoJob(self, packet):
return self._defaultPacketHandler(packet)
def handleNoop(self, packet):
return self._defaultPacketHandler(packet)
def handleSubmitJob(self, packet):
return self._defaultPacketHandler(packet)
def handleSubmitJobBg(self, packet):
return self._defaultPacketHandler(packet)
def handleSubmitJobHigh(self, packet):
return self._defaultPacketHandler(packet)
def handleSubmitJobHighBg(self, packet):
return self._defaultPacketHandler(packet)
def handleSubmitJobLow(self, packet):
return self._defaultPacketHandler(packet)
def handleSubmitJobLowBg(self, packet):
return self._defaultPacketHandler(packet)
def handleSubmitJobSched(self, packet):
return self._defaultPacketHandler(packet)
def handleSubmitJobEpoch(self, packet):
return self._defaultPacketHandler(packet)
def handleGrabJobUniq(self, packet):
return self._defaultPacketHandler(packet)
def handleGrabJob(self, packet):
return self._defaultPacketHandler(packet)
def handlePreSleep(self, packet):
return self._defaultPacketHandler(packet)
def handleSetClientID(self, packet):
return self._defaultPacketHandler(packet)
def handleCanDo(self, packet):
return self._defaultPacketHandler(packet)
def handleCanDoTimeout(self, packet):
return self._defaultPacketHandler(packet)
def handleCantDo(self, packet):
return self._defaultPacketHandler(packet)
def handleResetAbilities(self, packet):
return self._defaultPacketHandler(packet)
def handleEchoReq(self, packet):
return self._defaultPacketHandler(packet)
def handleEchoRes(self, packet):
return self._defaultPacketHandler(packet)
def handleError(self, packet):
return self._defaultPacketHandler(packet)
def handleAllYours(self, packet):
return self._defaultPacketHandler(packet)
def handleOptionReq(self, packet):
return self._defaultPacketHandler(packet)
def handleOptionRes(self, packet):
return self._defaultPacketHandler(packet)
def handleAdminRequest(self, request):
"""Handle an administrative command response from Gearman.
This method is called whenever a response to a previously
issued administrative command is received from one of this
client's connections. It normally releases the wait lock on
the initiating AdminRequest object.
:arg AdminRequest request: The :py:class:`AdminRequest` that
initiated the received response.
"""
self.log.info("Received admin data %s" % request)
request.setComplete()
def shutdown(self):
"""Close all connections and stop all running threads.
The object may no longer be used after shutdown is called.
"""
if self.running:
self.log.debug("Beginning shutdown")
self._shutdown()
self.log.debug("Beginning cleanup")
self._cleanup()
self.log.debug("Finished shutdown")
else:
self.log.warning("Shutdown called when not currently running. "
"Ignoring.")
def _shutdown(self):
# The first part of the shutdown process where all threads
# are told to exit.
self.running = False
self.connections_condition.acquire()
try:
self.connections_condition.notifyAll()
os.write(self.wake_write, b'1\n')
finally:
self.connections_condition.release()
def _cleanup(self):
# The second part of the shutdown process where we wait for all
# threads to exit and then clean up.
self.poll_thread.join()
self.connect_thread.join()
for connection in self.active_connections:
connection.disconnect()
self.active_connections = []
self.inactive_connections = []
os.close(self.wake_read)
os.close(self.wake_write)
class BaseClient(BaseClientServer):
def __init__(self, client_id='unknown'):
super(BaseClient, self).__init__(client_id)
self.log = logging.getLogger("gear.BaseClient.%s" % (self.client_id,))
# A lock to use when sending packets that set the state across
# all known connections. Note that it doesn't necessarily need
# to be used for all broadcasts, only those that affect multi-
# connection state, such as setting options or functions.
self.broadcast_lock = threading.RLock()
def addServer(self, host, port=4730,
ssl_key=None, ssl_cert=None, ssl_ca=None,
keepalive=False, tcp_keepidle=7200, tcp_keepintvl=75,
tcp_keepcnt=9):
"""Add a server to the client's connection pool.
Any number of Gearman servers may be added to a client. The
client will connect to all of them and send jobs to them in a
round-robin fashion. When servers are disconnected, the
client will automatically remove them from the pool,
continuously try to reconnect to them, and return them to the
pool when reconnected. New servers may be added at any time.
This is a non-blocking call that will return regardless of
whether the initial connection succeeded. If you need to
ensure that a connection is ready before proceeding, see
:py:meth:`waitForServer`.
When using SSL connections, all SSL files must be specified.
:arg str host: The hostname or IP address of the server.
:arg int port: The port on which the gearman server is listening.
:arg str ssl_key: Path to the SSL private key.
:arg str ssl_cert: Path to the SSL certificate.
:arg str ssl_ca: Path to the CA certificate.
:arg bool keepalive: Whether to use TCP keepalives
:arg int tcp_keepidle: Idle time after which to start keepalives
sending
:arg int tcp_keepintvl: Interval in seconds between TCP keepalives
:arg int tcp_keepcnt: Count of TCP keepalives to send before disconnect
:raises ConfigurationError: If the host/port combination has
already been added to the client.
"""
self.log.debug("Adding server %s port %s" % (host, port))
self.connections_condition.acquire()
try:
for conn in self.active_connections + self.inactive_connections:
if conn.host == host and conn.port == port:
raise ConfigurationError("Host/port already specified")
conn = Connection(host, port, ssl_key, ssl_cert, ssl_ca,
self.client_id, keepalive, tcp_keepidle,
tcp_keepintvl, tcp_keepcnt)
self.inactive_connections.append(conn)
self.connections_condition.notifyAll()
finally:
self.connections_condition.release()
def _checkTimeout(self, start_time, timeout):
if time.time() - start_time > timeout:
raise TimeoutError()
def waitForServer(self, timeout=None):
"""Wait for at least one server to be connected.
Block until at least one gearman server is connected.
:arg numeric timeout: Number of seconds to wait for a connection.
If None, wait forever (default: no timeout).
:raises TimeoutError: If the timeout is reached before any server
connects.
"""
connected = False
start_time = time.time()
while self.running:
self.connections_condition.acquire()
while self.running and not self.active_connections:
if timeout is not None:
self._checkTimeout(start_time, timeout)
self.log.debug("Waiting for at least one active connection")
self.connections_condition.wait(timeout=1)
if self.active_connections:
self.log.debug("Active connection found")
connected = True
self.connections_condition.release()
if connected:
return
def getConnection(self):
"""Return a connected server.
Finds the next scheduled connected server in the round-robin
rotation and returns it. It is not usually necessary to use
this method external to the library, as more consumer-oriented
methods such as submitJob already use it internally, but is
available nonetheless if necessary.
:returns: The next scheduled :py:class:`Connection` object.
:rtype: :py:class:`Connection`
:raises NoConnectedServersError: If there are not currently
connected servers.
"""
conn = None
try:
self.connections_condition.acquire()
if not self.active_connections:
raise NoConnectedServersError("No connected Gearman servers")
self.connection_index += 1
if self.connection_index >= len(self.active_connections):
self.connection_index = 0
conn = self.active_connections[self.connection_index]
finally:
self.connections_condition.release()
return conn
def broadcast(self, packet):
"""Send a packet to all currently connected servers.
:arg Packet packet: The :py:class:`Packet` to send.
"""
connections = self.active_connections[:]
for connection in connections:
try:
self.sendPacket(packet, connection)
except Exception:
# Error handling is all done by sendPacket
pass
def sendPacket(self, packet, connection):
"""Send a packet to a single connection, removing it from the
list of active connections if that fails.
:arg Packet packet: The :py:class:`Packet` to send.
:arg Connection connection: The :py:class:`Connection` on
which to send the packet.
"""
try:
connection.sendPacket(packet)
return
except Exception:
self.log.exception("Exception while sending packet %s to %s" %
(packet, connection))
# If we can't send the packet, discard the connection
self._lostConnection(connection)
raise
def handleEchoRes(self, packet):
"""Handle an ECHO_RES packet.
Causes the blocking :py:meth:`Connection.echo` invocation to
return.
:arg Packet packet: The :py:class:`Packet` that was received.
:returns: None
"""
packet.connection.handleEchoRes(packet.getArgument(0, True))
def handleError(self, packet):
"""Handle an ERROR packet.
Logs the error.
:arg Packet packet: The :py:class:`Packet` that was received.
:returns: None
"""
self.log.error("Received ERROR packet: %s: %s" %
(packet.getArgument(0),
packet.getArgument(1)))
try:
task = packet.connection.pending_tasks.pop(0)
task.setComplete()
except Exception:
self.log.exception("Exception while handling error packet:")
self._lostConnection(packet.connection)
class Client(BaseClient):
"""A Gearman client.
You may wish to subclass this class in order to override the
default event handlers to react to Gearman events. Be sure to
call the superclass event handlers so that they may perform
job-related housekeeping.
:arg str client_id: The client ID to provide to Gearman. It will
appear in administrative output and be appended to the name of
the logger (e.g., gear.Client.client_id). Defaults to
'unknown'.
"""
def __init__(self, client_id='unknown'):
super(Client, self).__init__(client_id)
self.log = logging.getLogger("gear.Client.%s" % (self.client_id,))
self.options = set()
def __repr__(self):
return '<gear.Client 0x%x>' % id(self)
def _onConnect(self, conn):
# Called immediately after a successful (re-)connection
self.broadcast_lock.acquire()
try:
super(Client, self)._onConnect(conn)
for name in self.options:
self._setOptionConnection(name, conn)
finally:
self.broadcast_lock.release()
def _setOptionConnection(self, name, conn):
# Set an option on a connection
packet = Packet(constants.REQ, constants.OPTION_REQ, name)
task = OptionReqTask()
try:
conn.pending_tasks.append(task)
self.sendPacket(packet, conn)
except Exception:
# Error handling is all done by sendPacket
task = None
return task
def setOption(self, name, timeout=30):
"""Set an option for all connections.
:arg str name: The option name to set.
:arg int timeout: How long to wait (in seconds) for a response
from the server before giving up (default: 30 seconds).
:returns: True if the option was set on all connections,
otherwise False
:rtype: bool
"""
tasks = {}
name = convert_to_bytes(name)
self.broadcast_lock.acquire()
try:
self.options.add(name)
connections = self.active_connections[:]
for connection in connections:
task = self._setOptionConnection(name, connection)
if task:
tasks[task] = connection
finally:
self.broadcast_lock.release()
success = True
for task in tasks.keys():
complete = task.wait(timeout)
conn = tasks[task]
if not complete:
self.log.error("Connection %s timed out waiting for a "
"response to an option request: %s" %
(conn, name))
self._lostConnection(conn)
continue
if name not in conn.options:
success = False
return success
def submitJob(self, job, background=False, precedence=PRECEDENCE_NORMAL,
timeout=30):
"""Submit a job to a Gearman server.
Submits the provided job to the next server in this client's
round-robin connection pool.
If the job is a foreground job, updates will be made to the
supplied :py:class:`Job` object as they are received.
:arg Job job: The :py:class:`Job` to submit.
:arg bool background: Whether the job should be backgrounded.
:arg int precedence: Whether the job should have normal, low, or
high precedence. One of :py:data:`PRECEDENCE_NORMAL`,
:py:data:`PRECEDENCE_LOW`, or :py:data:`PRECEDENCE_HIGH`
:arg int timeout: How long to wait (in seconds) for a response
from the server before giving up (default: 30 seconds).
:raises ConfigurationError: If an invalid precendence value
is supplied.
"""
if job.unique is None:
unique = b''
else:
unique = job.binary_unique
data = b'\x00'.join((job.binary_name, unique, job.binary_arguments))
if background:
if precedence == PRECEDENCE_NORMAL:
cmd = constants.SUBMIT_JOB_BG
elif precedence == PRECEDENCE_LOW:
cmd = constants.SUBMIT_JOB_LOW_BG
elif precedence == PRECEDENCE_HIGH:
cmd = constants.SUBMIT_JOB_HIGH_BG
else:
raise ConfigurationError("Invalid precedence value")
else:
if precedence == PRECEDENCE_NORMAL:
cmd = constants.SUBMIT_JOB
elif precedence == PRECEDENCE_LOW:
cmd = constants.SUBMIT_JOB_LOW
elif precedence == PRECEDENCE_HIGH:
cmd = constants.SUBMIT_JOB_HIGH
else:
raise ConfigurationError("Invalid precedence value")
packet = Packet(constants.REQ, cmd, data)
attempted_connections = set()
while True:
if attempted_connections == set(self.active_connections):
break
conn = self.getConnection()
task = SubmitJobTask(job)
conn.pending_tasks.append(task)
attempted_connections.add(conn)
try:
self.sendPacket(packet, conn)
except Exception:
# Error handling is all done by sendPacket
continue
complete = task.wait(timeout)
if not complete:
self.log.error("Connection %s timed out waiting for a "
"response to a submit job request: %s" %
(conn, job))
self._lostConnection(conn)
continue
if not job.handle:
self.log.error("Connection %s sent an error in "
"response to a submit job request: %s" %
(conn, job))
continue
job.connection = conn
return
raise GearmanError("Unable to submit job to any connected servers")
def handleJobCreated(self, packet):
"""Handle a JOB_CREATED packet.
Updates the appropriate :py:class:`Job` with the newly
returned job handle.
:arg Packet packet: The :py:class:`Packet` that was received.
:returns: The :py:class:`Job` object associated with the job request.
:rtype: :py:class:`Job`
"""
task = packet.connection.pending_tasks.pop(0)
if not isinstance(task, SubmitJobTask):
msg = ("Unexpected response received to submit job "
"request: %s" % packet)
self.log.error(msg)
self._lostConnection(packet.connection)
raise GearmanError(msg)
job = task.job
job.handle = packet.data
packet.connection.related_jobs[job.handle] = job
task.setComplete()
self.log.debug("Job created; %s" % job)
return job
def handleWorkComplete(self, packet):
"""Handle a WORK_COMPLETE packet.
Updates the referenced :py:class:`Job` with the returned data
and removes it from the list of jobs associated with the
connection.
:arg Packet packet: The :py:class:`Packet` that was received.
:returns: The :py:class:`Job` object associated with the job request.
:rtype: :py:class:`Job`
"""
job = packet.getJob()
data = packet.getArgument(1, True)
if data:
job.data.append(data)
job.complete = True
job.failure = False
del packet.connection.related_jobs[job.handle]
self.log.debug("Job complete; %s data: %s" %
(job, job.data))
return job
def handleWorkFail(self, packet):
"""Handle a WORK_FAIL packet.
Updates the referenced :py:class:`Job` with the returned data
and removes it from the list of jobs associated with the
connection.
:arg Packet packet: The :py:class:`Packet` that was received.
:returns: The :py:class:`Job` object associated with the job request.
:rtype: :py:class:`Job`
"""
job = packet.getJob()
job.complete = True
job.failure = True
del packet.connection.related_jobs[job.handle]
self.log.debug("Job failed; %s" % job)
return job
def handleWorkException(self, packet):
"""Handle a WORK_Exception packet.
Updates the referenced :py:class:`Job` with the returned data
and removes it from the list of jobs associated with the
connection.
:arg Packet packet: The :py:class:`Packet` that was received.
:returns: The :py:class:`Job` object associated with the job request.
:rtype: :py:class:`Job`
"""
job = packet.getJob()
job.exception = packet.getArgument(1, True)
job.complete = True
job.failure = True
del packet.connection.related_jobs[job.handle]
self.log.debug("Job exception; %s exception: %s" %
(job, job.exception))
return job
def handleWorkData(self, packet):
"""Handle a WORK_DATA packet.
Updates the referenced :py:class:`Job` with the returned data.
:arg Packet packet: The :py:class:`Packet` that was received.
:returns: The :py:class:`Job` object associated with the job request.
:rtype: :py:class:`Job`
"""
job = packet.getJob()
data = packet.getArgument(1, True)
if data:
job.data.append(data)
self.log.debug("Job data; job: %s data: %s" %
(job, job.data))
return job
def handleWorkWarning(self, packet):
"""Handle a WORK_WARNING packet.
Updates the referenced :py:class:`Job` with the returned data.
:arg Packet packet: The :py:class:`Packet` that was received.
:returns: The :py:class:`Job` object associated with the job request.
:rtype: :py:class:`Job`
"""
job = packet.getJob()
data = packet.getArgument(1, True)
if data:
job.data.append(data)
job.warning = True
self.log.debug("Job warning; %s data: %s" %
(job, job.data))
return job
def handleWorkStatus(self, packet):
"""Handle a WORK_STATUS packet.
Updates the referenced :py:class:`Job` with the returned data.
:arg Packet packet: The :py:class:`Packet` that was received.
:returns: The :py:class:`Job` object associated with the job request.
:rtype: :py:class:`Job`
"""
job = packet.getJob()
job.numerator = packet.getArgument(1)
job.denominator = packet.getArgument(2)
try:
job.fraction_complete = (float(job.numerator) /
float(job.denominator))
except Exception:
job.fraction_complete = None
self.log.debug("Job status; %s complete: %s/%s" %
(job, job.numerator, job.denominator))
return job
def handleStatusRes(self, packet):
"""Handle a STATUS_RES packet.
Updates the referenced :py:class:`Job` with the returned data.
:arg Packet packet: The :py:class:`Packet` that was received.
:returns: The :py:class:`Job` object associated with the job request.
:rtype: :py:class:`Job`
"""
job = packet.getJob()
job.known = (packet.getArgument(1) == b'1')
job.running = (packet.getArgument(2) == b'1')
job.numerator = packet.getArgument(3)
job.denominator = packet.getArgument(4)
try:
job.fraction_complete = (float(job.numerator) /
float(job.denominator))
except Exception:
job.fraction_complete = None
return job
def handleOptionRes(self, packet):
"""Handle an OPTION_RES packet.
Updates the set of options for the connection.
:arg Packet packet: The :py:class:`Packet` that was received.
:returns: None.
"""
task = packet.connection.pending_tasks.pop(0)
if not isinstance(task, OptionReqTask):
msg = ("Unexpected response received to option "
"request: %s" % packet)
self.log.error(msg)
self._lostConnection(packet.connection)
raise GearmanError(msg)
packet.connection.handleOptionRes(packet.getArgument(0))
task.setComplete()
def handleDisconnect(self, job):
"""Handle a Gearman server disconnection.
If the Gearman server is disconnected, this will be called for any
jobs currently associated with the server.
:arg Job packet: The :py:class:`Job` that was running when the server
disconnected.
"""
return job
class FunctionRecord(object):
"""Represents a function that should be registered with Gearman.
This class only directly needs to be instatiated for use with
:py:meth:`Worker.setFunctions`. If a timeout value is supplied,
the function will be registered with CAN_DO_TIMEOUT.
:arg str name: The name of the function to register.
:arg numeric timeout: The timeout value (optional).
"""
def __init__(self, name, timeout=None):
self.name = name
self.timeout = timeout
def __repr__(self):
return '<gear.FunctionRecord 0x%x name: %s timeout: %s>' % (
id(self), self.name, self.timeout)
class BaseJob(object):
def __init__(self, name, arguments, unique=None, handle=None):
self._name = convert_to_bytes(name)
self._validate_arguments(arguments)
self._arguments = convert_to_bytes(arguments)
self._unique = convert_to_bytes(unique)
self.handle = handle
self.connection = None
def _validate_arguments(self, arguments):
if (not isinstance(arguments, bytes) and
not isinstance(arguments, bytearray)):
raise TypeError("arguments must be of type bytes or bytearray")
@property
def arguments(self):
return self._arguments
@arguments.setter
def arguments(self, value):
self._arguments = value
@property
def unique(self):
return self._unique
@unique.setter
def unique(self, value):
self._unique = value
@property
def name(self):
if isinstance(self._name, six.binary_type):
return self._name.decode('utf-8')
return self._name
@name.setter
def name(self, value):
if isinstance(value, six.text_type):
value = value.encode('utf-8')
self._name = value
@property
def binary_name(self):
return self._name
@property
def binary_arguments(self):
return self._arguments
@property
def binary_unique(self):
return self._unique
def __repr__(self):
return '<gear.Job 0x%x handle: %s name: %s unique: %s>' % (
id(self), self.handle, self.name, self.unique)
class WorkerJob(BaseJob):
"""A job that Gearman has assigned to a Worker. Not intended to
be instantiated directly, but rather returned by
:py:meth:`Worker.getJob`.
:arg str handle: The job handle assigned by gearman.
:arg str name: The name of the job.
:arg bytes arguments: The opaque data blob passed to the worker
as arguments.
:arg str unique: A byte string to uniquely identify the job to Gearman
(optional).
The following instance attributes are available:
**name** (str)
The name of the job. Assumed to be utf-8.
**arguments** (bytes)
The opaque data blob passed to the worker as arguments.
**unique** (str or None)
The unique ID of the job (if supplied).
**handle** (bytes)
The Gearman job handle.
**connection** (:py:class:`Connection` or None)
The connection associated with the job. Only set after the job
has been submitted to a Gearman server.
"""
def __init__(self, handle, name, arguments, unique=None):
super(WorkerJob, self).__init__(name, arguments, unique, handle)
def sendWorkData(self, data=b''):
"""Send a WORK_DATA packet to the client.
:arg bytes data: The data to be sent to the client (optional).
"""
data = self.handle + b'\x00' + data
p = Packet(constants.REQ, constants.WORK_DATA, data)
self.connection.sendPacket(p)
def sendWorkWarning(self, data=b''):
"""Send a WORK_WARNING packet to the client.
:arg bytes data: The data to be sent to the client (optional).
"""
data = self.handle + b'\x00' + data
p = Packet(constants.REQ, constants.WORK_WARNING, data)
self.connection.sendPacket(p)
def sendWorkStatus(self, numerator, denominator):
"""Send a WORK_STATUS packet to the client.
Sends a numerator and denominator that together represent the
fraction complete of the job.
:arg numeric numerator: The numerator of the fraction complete.
:arg numeric denominator: The denominator of the fraction complete.
"""
data = (self.handle + b'\x00' +
str(numerator).encode('utf8') + b'\x00' +
str(denominator).encode('utf8'))
p = Packet(constants.REQ, constants.WORK_STATUS, data)
self.connection.sendPacket(p)
def sendWorkComplete(self, data=b''):
"""Send a WORK_COMPLETE packet to the client.
:arg bytes data: The data to be sent to the client (optional).
"""
data = self.handle + b'\x00' + data
p = Packet(constants.REQ, constants.WORK_COMPLETE, data)
self.connection.sendPacket(p)
def sendWorkFail(self):
"Send a WORK_FAIL packet to the client."
p = Packet(constants.REQ, constants.WORK_FAIL, self.handle)
self.connection.sendPacket(p)
def sendWorkException(self, data=b''):
"""Send a WORK_EXCEPTION packet to the client.
:arg bytes data: The exception data to be sent to the client
(optional).
"""
data = self.handle + b'\x00' + data
p = Packet(constants.REQ, constants.WORK_EXCEPTION, data)
self.connection.sendPacket(p)
class Worker(BaseClient):
"""A Gearman worker.
:arg str client_id: The client ID to provide to Gearman. It will
appear in administrative output and be appended to the name of
the logger (e.g., gear.Worker.client_id).
:arg str worker_id: The client ID to provide to Gearman. It will
appear in administrative output and be appended to the name of
the logger (e.g., gear.Worker.client_id). This parameter name
is deprecated, use client_id instead.
"""
job_class = WorkerJob
def __init__(self, client_id=None, worker_id=None):
if not client_id or worker_id:
raise Exception("A client_id must be provided")
if worker_id:
client_id = worker_id
super(Worker, self).__init__(client_id)
self.log = logging.getLogger("gear.Worker.%s" % (self.client_id,))
self.worker_id = client_id
self.functions = {}
self.job_lock = threading.Lock()
self.waiting_for_jobs = 0
self.job_queue = queue_mod.Queue()
def __repr__(self):
return '<gear.Worker 0x%x>' % id(self)
def registerFunction(self, name, timeout=None):
"""Register a function with Gearman.
If a timeout value is supplied, the function will be
registered with CAN_DO_TIMEOUT.
:arg str name: The name of the function to register.
:arg numeric timeout: The timeout value (optional).
"""
name = convert_to_bytes(name)
self.functions[name] = FunctionRecord(name, timeout)
if timeout:
self._sendCanDoTimeout(name, timeout)
else:
self._sendCanDo(name)
connections = self.active_connections[:]
for connection in connections:
if connection.state == "SLEEP":
connection.changeState("IDLE")
self._updateStateMachines()
def unRegisterFunction(self, name):
"""Remove a function from Gearman's registry.
:arg str name: The name of the function to remove.
"""
name = convert_to_bytes(name)
del self.functions[name]
self._sendCantDo(name)
def setFunctions(self, functions):
"""Replace the set of functions registered with Gearman.
Accepts a list of :py:class:`FunctionRecord` objects which
represents the complete set of functions that should be
registered with Gearman. Any existing functions will be
unregistered and these registered in their place. If the
empty list is supplied, then the Gearman registered function
set will be cleared.
:arg list functions: A list of :py:class:`FunctionRecord` objects.
"""
self._sendResetAbilities()
self.functions = {}
for f in functions:
if not isinstance(f, FunctionRecord):
raise InvalidDataError(
"An iterable of FunctionRecords is required.")
self.functions[f.name] = f
for f in self.functions.values():
if f.timeout:
self._sendCanDoTimeout(f.name, f.timeout)
else:
self._sendCanDo(f.name)
def _sendCanDo(self, name):
self.broadcast_lock.acquire()
try:
p = Packet(constants.REQ, constants.CAN_DO, name)
self.broadcast(p)
finally:
self.broadcast_lock.release()
def _sendCanDoTimeout(self, name, timeout):
self.broadcast_lock.acquire()
try:
data = name + b'\x00' + timeout
p = Packet(constants.REQ, constants.CAN_DO_TIMEOUT, data)
self.broadcast(p)
finally:
self.broadcast_lock.release()
def _sendCantDo(self, name):
self.broadcast_lock.acquire()
try:
p = Packet(constants.REQ, constants.CANT_DO, name)
self.broadcast(p)
finally:
self.broadcast_lock.release()
def _sendResetAbilities(self):
self.broadcast_lock.acquire()
try:
p = Packet(constants.REQ, constants.RESET_ABILITIES, b'')
self.broadcast(p)
finally:
self.broadcast_lock.release()
def _sendPreSleep(self, connection):
p = Packet(constants.REQ, constants.PRE_SLEEP, b'')
self.sendPacket(p, connection)
def _sendGrabJobUniq(self, connection=None):
p = Packet(constants.REQ, constants.GRAB_JOB_UNIQ, b'')
if connection:
self.sendPacket(p, connection)
else:
self.broadcast(p)
def _onConnect(self, conn):
self.broadcast_lock.acquire()
try:
# Called immediately after a successful (re-)connection
p = Packet(constants.REQ, constants.SET_CLIENT_ID, self.client_id)
conn.sendPacket(p)
super(Worker, self)._onConnect(conn)
for f in self.functions.values():
if f.timeout:
data = f.name + b'\x00' + f.timeout
p = Packet(constants.REQ, constants.CAN_DO_TIMEOUT, data)
else:
p = Packet(constants.REQ, constants.CAN_DO, f.name)
conn.sendPacket(p)
conn.changeState("IDLE")
finally:
self.broadcast_lock.release()
# Any exceptions will be handled by the calling function, and the
# connection will not be put into the pool.
def _onActiveConnection(self, conn):
self.job_lock.acquire()
try:
if self.waiting_for_jobs > 0:
self._updateStateMachines()
finally:
self.job_lock.release()
def _updateStateMachines(self):
connections = self.active_connections[:]
for connection in connections:
if (connection.state == "IDLE" and self.waiting_for_jobs > 0):
self._sendGrabJobUniq(connection)
connection.changeState("GRAB_WAIT")
if (connection.state != "IDLE" and self.waiting_for_jobs < 1):
connection.changeState("IDLE")
def getJob(self):
"""Get a job from Gearman.
Blocks until a job is received. This method is re-entrant, so
it is safe to call this method on a single worker from
multiple threads. In that case, one of them at random will
receive the job assignment.
:returns: The :py:class:`WorkerJob` assigned.
:rtype: :py:class:`WorkerJob`.
:raises InterruptedError: If interrupted (by
:py:meth:`stopWaitingForJobs`) before a job is received.
"""
self.job_lock.acquire()
try:
# self.running gets cleared during _shutdown(), before the
# stopWaitingForJobs() is called. This check has to
# happen with the job_lock held, otherwise there would be
# a window for race conditions between manipulation of
# "running" and "waiting_for_jobs".
if not self.running:
raise InterruptedError()
self.waiting_for_jobs += 1
self.log.debug("Get job; number of threads waiting for jobs: %s" %
self.waiting_for_jobs)
try:
job = self.job_queue.get(False)
except queue_mod.Empty:
job = None
if not job:
self._updateStateMachines()
finally:
self.job_lock.release()
if not job:
job = self.job_queue.get()
self.log.debug("Received job: %s" % job)
if job is None:
raise InterruptedError()
return job
def stopWaitingForJobs(self):
"""Interrupts all running :py:meth:`getJob` calls, which will raise
an exception.
"""
self.job_lock.acquire()
try:
while True:
connections = self.active_connections[:]
now = time.time()
ok = True
for connection in connections:
if connection.state == "GRAB_WAIT":
# Replies to GRAB_JOB should be fast, give up if we've
# been waiting for more than 5 seconds.
if now - connection.state_time > 5:
self._lostConnection(connection)
else:
ok = False
if ok:
break
else:
self.job_lock.release()
time.sleep(0.1)
self.job_lock.acquire()
while self.waiting_for_jobs > 0:
self.waiting_for_jobs -= 1
self.job_queue.put(None)
self._updateStateMachines()
finally:
self.job_lock.release()
def _shutdown(self):
self.job_lock.acquire()
try:
# The upstream _shutdown() will clear the "running" bool. Because
# that is a variable which is used for proper synchronization of
# the exit within getJob() which might be about to be called from a
# separate thread, it's important to call it with a proper lock
# being held.
super(Worker, self)._shutdown()
finally:
self.job_lock.release()
self.stopWaitingForJobs()
def handleNoop(self, packet):
"""Handle a NOOP packet.
Sends a GRAB_JOB_UNIQ packet on the same connection.
GRAB_JOB_UNIQ will return jobs regardless of whether they have
been specified with a unique identifier when submitted. If
they were not, then :py:attr:`WorkerJob.unique` attribute
will be None.
:arg Packet packet: The :py:class:`Packet` that was received.
"""
self.job_lock.acquire()
try:
if packet.connection.state == "SLEEP":
self.log.debug("Sending GRAB_JOB_UNIQ")
self._sendGrabJobUniq(packet.connection)
packet.connection.changeState("GRAB_WAIT")
else:
self.log.debug("Received unexpecetd NOOP packet on %s" %
packet.connection)
finally:
self.job_lock.release()
def handleNoJob(self, packet):
"""Handle a NO_JOB packet.
Sends a PRE_SLEEP packet on the same connection.
:arg Packet packet: The :py:class:`Packet` that was received.
"""
self.job_lock.acquire()
try:
if packet.connection.state == "GRAB_WAIT":
self.log.debug("Sending PRE_SLEEP")
self._sendPreSleep(packet.connection)
packet.connection.changeState("SLEEP")
else:
self.log.debug("Received unexpected NO_JOB packet on %s" %
packet.connection)
finally:
self.job_lock.release()
def handleJobAssign(self, packet):
"""Handle a JOB_ASSIGN packet.
Adds a WorkerJob to the internal queue to be picked up by any
threads waiting in :py:meth:`getJob`.
:arg Packet packet: The :py:class:`Packet` that was received.
"""
handle = packet.getArgument(0)
name = packet.getArgument(1)
arguments = packet.getArgument(2, True)
return self._handleJobAssignment(packet, handle, name,
arguments, None)
def handleJobAssignUnique(self, packet):
"""Handle a JOB_ASSIGN_UNIQ packet.
Adds a WorkerJob to the internal queue to be picked up by any
threads waiting in :py:meth:`getJob`.
:arg Packet packet: The :py:class:`Packet` that was received.
"""
handle = packet.getArgument(0)
name = packet.getArgument(1)
unique = packet.getArgument(2)
if unique == b'':
unique = None
arguments = packet.getArgument(3, True)
return self._handleJobAssignment(packet, handle, name,
arguments, unique)
def _handleJobAssignment(self, packet, handle, name, arguments, unique):
job = self.job_class(handle, name, arguments, unique)
job.connection = packet.connection
self.job_lock.acquire()
try:
packet.connection.changeState("IDLE")
self.waiting_for_jobs -= 1
self.log.debug("Job assigned; number of threads waiting for "
"jobs: %s" % self.waiting_for_jobs)
self.job_queue.put(job)
self._updateStateMachines()
finally:
self.job_lock.release()
class Job(BaseJob):
"""A job to run or being run by Gearman.
:arg str name: The name of the job.
:arg bytes arguments: The opaque data blob to be passed to the worker
as arguments.
:arg str unique: A byte string to uniquely identify the job to Gearman
(optional).
The following instance attributes are available:
**name** (str)
The name of the job. Assumed to be utf-8.
**arguments** (bytes)
The opaque data blob passed to the worker as arguments.
**unique** (str or None)
The unique ID of the job (if supplied).
**handle** (bytes or None)
The Gearman job handle. None if no job handle has been received yet.
**data** (list of byte-arrays)
The result data returned from Gearman. Each packet appends an
element to the list. Depending on the nature of the data, the
elements may need to be concatenated before use. This is returned
as a snapshot copy of the data to prevent accidental attempts at
modification which will be lost.
**exception** (bytes or None)
Exception information returned from Gearman. None if no exception
has been received.
**warning** (bool)
Whether the worker has reported a warning.
**complete** (bool)
Whether the job is complete.
**failure** (bool)
Whether the job has failed. Only set when complete is True.
**numerator** (bytes or None)
The numerator of the completion ratio reported by the worker.
Only set when a status update is sent by the worker.
**denominator** (bytes or None)
The denominator of the completion ratio reported by the
worker. Only set when a status update is sent by the worker.
**fraction_complete** (float or None)
The fractional complete ratio reported by the worker. Only set when
a status update is sent by the worker.
**known** (bool or None)
Whether the job is known to Gearman. Only set by handleStatusRes() in
response to a getStatus() query.
**running** (bool or None)
Whether the job is running. Only set by handleStatusRes() in
response to a getStatus() query.
**connection** (:py:class:`Connection` or None)
The connection associated with the job. Only set after the job
has been submitted to a Gearman server.
"""
data_type = list
def __init__(self, name, arguments, unique=None):
super(Job, self).__init__(name, arguments, unique)
self._data = self.data_type()
self._exception = None
self.warning = False
self.complete = False
self.failure = False
self.numerator = None
self.denominator = None
self.fraction_complete = None
self.known = None
self.running = None
@property
def binary_data(self):
for value in self._data:
if isinstance(value, six.text_type):
value = value.encode('utf-8')
yield value
@property
def data(self):
return self._data
@data.setter
def data(self, value):
if not isinstance(value, self.data_type):
raise ValueError(
"data attribute must be {}".format(self.data_type))
self._data = value
@property
def exception(self):
return self._exception
@exception.setter
def exception(self, value):
self._exception = value
class TextJobArguments(object):
"""Assumes utf-8 arguments in addition to name
If one is always dealing in valid utf-8, using this job class relieves one
of the need to encode/decode constantly."""
def _validate_arguments(self, arguments):
pass
@property
def arguments(self):
args = self._arguments
if isinstance(args, six.binary_type):
return args.decode('utf-8')
return args
@arguments.setter
def arguments(self, value):
if not isinstance(value, six.binary_type):
value = value.encode('utf-8')
self._arguments = value
class TextJobUnique(object):
"""Assumes utf-8 unique
If one is always dealing in valid utf-8, using this job class relieves one
of the need to encode/decode constantly."""
@property
def unique(self):
unique = self._unique
if isinstance(unique, six.binary_type):
return unique.decode('utf-8')
return unique
@unique.setter
def unique(self, value):
if not isinstance(value, six.binary_type):
value = value.encode('utf-8')
self._unique = value
class TextList(list):
def append(self, x):
if isinstance(x, six.binary_type):
x = x.decode('utf-8')
super(TextList, self).append(x)
def extend(self, iterable):
def _iter():
for value in iterable:
if isinstance(value, six.binary_type):
yield value.decode('utf-8')
else:
yield value
super(TextList, self).extend(_iter)
def insert(self, i, x):
if isinstance(x, six.binary_type):
x = x.decode('utf-8')
super(TextList, self).insert(i, x)
class TextJob(TextJobArguments, TextJobUnique, Job):
""" Sends and receives UTF-8 arguments and data.
Use this instead of Job when you only expect to send valid UTF-8 through
gearman. It will automatically encode arguments and work data as UTF-8, and
any jobs fetched from this worker will have their arguments and data
decoded assuming they are valid UTF-8, and thus return strings.
Attributes and method signatures are thes ame as Job except as noted here:
** arguments ** (str) This will be returned as a string.
** data ** (tuple of str) This will be returned as a tuble of strings.
"""
data_type = TextList
@property
def exception(self):
exception = self._exception
if isinstance(exception, six.binary_type):
return exception.decode('utf-8')
return exception
@exception.setter
def exception(self, value):
if not isinstance(value, six.binary_type):
value = value.encode('utf-8')
self._exception = value
class TextWorkerJob(TextJobArguments, TextJobUnique, WorkerJob):
""" Sends and receives UTF-8 arguments and data.
See TextJob. sendWorkData and sendWorkWarning accept strings
and will encode them as UTF-8.
"""
def sendWorkData(self, data=''):
"""Send a WORK_DATA packet to the client.
:arg str data: The data to be sent to the client (optional).
"""
if isinstance(data, six.text_type):
data = data.encode('utf8')
return super(TextWorkerJob, self).sendWorkData(data)
def sendWorkWarning(self, data=''):
"""Send a WORK_WARNING packet to the client.
:arg str data: The data to be sent to the client (optional).
"""
if isinstance(data, six.text_type):
data = data.encode('utf8')
return super(TextWorkerJob, self).sendWorkWarning(data)
def sendWorkComplete(self, data=''):
"""Send a WORK_COMPLETE packet to the client.
:arg str data: The data to be sent to the client (optional).
"""
if isinstance(data, six.text_type):
data = data.encode('utf8')
return super(TextWorkerJob, self).sendWorkComplete(data)
def sendWorkException(self, data=''):
"""Send a WORK_EXCEPTION packet to the client.
:arg str data: The data to be sent to the client (optional).
"""
if isinstance(data, six.text_type):
data = data.encode('utf8')
return super(TextWorkerJob, self).sendWorkException(data)
class TextWorker(Worker):
""" Sends and receives UTF-8 only.
See TextJob.
"""
job_class = TextWorkerJob
class BaseBinaryJob(object):
""" For the case where non-utf-8 job names are needed. It will function
exactly like Job, except that the job name will not be decoded."""
@property
def name(self):
return self._name
class BinaryWorkerJob(BaseBinaryJob, WorkerJob):
pass
class BinaryJob(BaseBinaryJob, Job):
pass
# Below are classes for use in the server implementation:
class ServerJob(BinaryJob):
"""A job record for use in a server.
:arg str name: The name of the job.
:arg bytes arguments: The opaque data blob to be passed to the worker
as arguments.
:arg str unique: A byte string to uniquely identify the job to Gearman
(optional).
The following instance attributes are available:
**name** (str)
The name of the job.
**arguments** (bytes)
The opaque data blob passed to the worker as arguments.
**unique** (str or None)
The unique ID of the job (if supplied).
**handle** (bytes or None)
The Gearman job handle. None if no job handle has been received yet.
**data** (list of byte-arrays)
The result data returned from Gearman. Each packet appends an
element to the list. Depending on the nature of the data, the
elements may need to be concatenated before use.
**exception** (bytes or None)
Exception information returned from Gearman. None if no exception
has been received.
**warning** (bool)
Whether the worker has reported a warning.
**complete** (bool)
Whether the job is complete.
**failure** (bool)
Whether the job has failed. Only set when complete is True.
**numerator** (bytes or None)
The numerator of the completion ratio reported by the worker.
Only set when a status update is sent by the worker.
**denominator** (bytes or None)
The denominator of the completion ratio reported by the
worker. Only set when a status update is sent by the worker.
**fraction_complete** (float or None)
The fractional complete ratio reported by the worker. Only set when
a status update is sent by the worker.
**known** (bool or None)
Whether the job is known to Gearman. Only set by handleStatusRes() in
response to a getStatus() query.
**running** (bool or None)
Whether the job is running. Only set by handleStatusRes() in
response to a getStatus() query.
**client_connection** :py:class:`Connection`
The client connection associated with the job.
**worker_connection** (:py:class:`Connection` or None)
The worker connection associated with the job. Only set after the job
has been assigned to a worker.
"""
def __init__(self, handle, name, arguments, client_connection,
unique=None):
super(ServerJob, self).__init__(name, arguments, unique)
self.handle = handle
self.client_connection = client_connection
self.worker_connection = None
del self.connection
class ServerAdminRequest(AdminRequest):
"""An administrative request sent to a server."""
def __init__(self, connection):
super(ServerAdminRequest, self).__init__()
self.connection = connection
def isComplete(self, data):
end_index_newline = data.find(b'\n')
if end_index_newline != -1:
self.command = data[:end_index_newline]
# Remove newline from data
x = end_index_newline + 1
return (True, data[x:])
else:
return (False, None)
class NonBlockingConnection(Connection):
"""A Non-blocking connection to a Gearman Client."""
def __init__(self, host, port, ssl_key=None, ssl_cert=None,
ssl_ca=None, client_id='unknown'):
super(NonBlockingConnection, self).__init__(
host, port, ssl_key,
ssl_cert, ssl_ca, client_id)
self.send_queue = []
def connect(self):
super(NonBlockingConnection, self).connect()
if self.connected and self.conn:
self.conn.setblocking(0)
def _readRawBytes(self, bytes_to_read):
try:
buff = self.conn.recv(bytes_to_read)
except ssl.SSLError as e:
if e.errno == ssl.SSL_ERROR_WANT_READ:
raise RetryIOError()
elif e.errno == ssl.SSL_ERROR_WANT_WRITE:
raise RetryIOError()
raise
except socket.error as e:
if e.errno == errno.EAGAIN:
# Read operation would block, we're done until
# epoll flags this connection again
raise RetryIOError()
raise
return buff
def sendPacket(self, packet):
"""Append a packet to this connection's send queue. The Client or
Server must manage actually sending the data.
:arg :py:class:`Packet` packet The packet to send
"""
self.log.debug("Queuing packet to %s: %s" % (self, packet))
self.send_queue.append(packet.toBinary())
self.sendQueuedData()
def sendRaw(self, data):
"""Append raw data to this connection's send queue. The Client or
Server must manage actually sending the data.
:arg bytes data The raw data to send
"""
self.log.debug("Queuing data to %s: %s" % (self, data))
self.send_queue.append(data)
self.sendQueuedData()
def sendQueuedData(self):
"""Send previously queued data to the socket."""
try:
while len(self.send_queue):
data = self.send_queue.pop(0)
r = 0
try:
r = self.conn.send(data)
except ssl.SSLError as e:
if e.errno == ssl.SSL_ERROR_WANT_READ:
raise RetryIOError()
elif e.errno == ssl.SSL_ERROR_WANT_WRITE:
raise RetryIOError()
else:
raise
except socket.error as e:
if e.errno == errno.EAGAIN:
self.log.debug("Write operation on %s would block"
% self)
raise RetryIOError()
else:
raise
finally:
data = data[r:]
if data:
self.send_queue.insert(0, data)
except RetryIOError:
pass
class ServerConnection(NonBlockingConnection):
"""A Connection to a Gearman Client."""
def __init__(self, addr, conn, use_ssl, client_id):
if client_id:
self.log = logging.getLogger("gear.ServerConnection.%s" %
(client_id,))
else:
self.log = logging.getLogger("gear.ServerConnection")
self.send_queue = []
self.admin_requests = []
self.host = addr[0]
self.port = addr[1]
self.conn = conn
self.conn.setblocking(0)
self.input_buffer = b''
self.need_bytes = False
self.use_ssl = use_ssl
self.client_id = None
self.functions = set()
self.related_jobs = {}
self.ssl_subject = None
if self.use_ssl:
for x in conn.getpeercert()['subject']:
if x[0][0] == 'commonName':
self.ssl_subject = x[0][1]
self.log.debug("SSL subject: %s" % self.ssl_subject)
self.changeState("INIT")
def _getAdminRequest(self):
return ServerAdminRequest(self)
def _putAdminRequest(self, req):
# The server does not need to keep track of admin requests
# that have been partially received; it will simply create a
# new instance the next time it tries to read.
pass
def __repr__(self):
return '<gear.ServerConnection 0x%x name: %s host: %s port: %s>' % (
id(self), self.client_id, self.host, self.port)
class Server(BaseClientServer):
"""A simple gearman server implementation for testing
(not for production use).
:arg int port: The TCP port on which to listen.
:arg str ssl_key: Path to the SSL private key.
:arg str ssl_cert: Path to the SSL certificate.
:arg str ssl_ca: Path to the CA certificate.
:arg str statsd_host: statsd hostname. None means disabled
(the default).
:arg str statsd_port: statsd port (defaults to 8125).
:arg str statsd_prefix: statsd key prefix.
:arg str client_id: The ID associated with this server.
It will be appending to the name of the logger (e.g.,
gear.Server.server_id). Defaults to None (unused).
:arg ACL acl: An :py:class:`ACL` object if the server should apply
access control rules to its connections.
:arg str host: Host name or IPv4/IPv6 address to bind to. Defaults
to "whatever getaddrinfo() returns", which might be IPv4-only.
:arg bool keepalive: Whether to use TCP keepalives
:arg int tcp_keepidle: Idle time after which to start keepalives sending
:arg int tcp_keepintvl: Interval in seconds between TCP keepalives
:arg int tcp_keepcnt: Count of TCP keepalives to send before disconnect
"""
edge_bitmask = select.EPOLLET
error_bitmask = (select.EPOLLERR | select.EPOLLHUP | edge_bitmask)
read_bitmask = (select.EPOLLIN | error_bitmask)
readwrite_bitmask = (select.EPOLLOUT | read_bitmask)
def __init__(self, port=4730, ssl_key=None, ssl_cert=None, ssl_ca=None,
statsd_host=None, statsd_port=8125, statsd_prefix=None,
server_id=None, acl=None, host=None, keepalive=False,
tcp_keepidle=7200, tcp_keepintvl=75, tcp_keepcnt=9):
self.port = port
self.ssl_key = ssl_key
self.ssl_cert = ssl_cert
self.ssl_ca = ssl_ca
self.high_queue = []
self.normal_queue = []
self.low_queue = []
self.jobs = {}
self.running_jobs = 0
self.waiting_jobs = 0
self.total_jobs = 0
self.functions = set()
self.max_handle = 0
self.acl = acl
self.connect_wake_read, self.connect_wake_write = os.pipe()
self.poll = select.epoll()
# Reverse mapping of fd -> connection
self.connection_map = {}
self.use_ssl = False
if all([self.ssl_key, self.ssl_cert, self.ssl_ca]):
self.use_ssl = True
# Get all valid passive listen addresses, then sort by family to prefer
# ipv6 if available.
addrs = socket.getaddrinfo(host, self.port, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0,
socket.AI_PASSIVE |
socket.AI_ADDRCONFIG)
addrs.sort(key=lambda addr: addr[0], reverse=True)
for res in addrs:
af, socktype, proto, canonname, sa = res
try:
self.socket = socket.socket(af, socktype, proto)
self.socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
if keepalive and hasattr(socket, 'TCP_KEEPIDLE'):
self.socket.setsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE, 1)
self.socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE, tcp_keepidle)
self.socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPINTVL, tcp_keepintvl)
self.socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPCNT, tcp_keepcnt)
elif keepalive:
self.log.warning('Keepalive requested but not available '
'on this platform')
except socket.error:
self.socket = None
continue
try:
self.socket.bind(sa)
self.socket.listen(1)
except socket.error:
self.socket.close()
self.socket = None
continue
break
if self.socket is None:
raise Exception("Could not open socket")
if port == 0:
self.port = self.socket.getsockname()[1]
super(Server, self).__init__(server_id)
# Register the wake pipe so that we can break if we need to
# reconfigure connections
self.poll.register(self.wake_read, self.read_bitmask)
if server_id:
self.log = logging.getLogger("gear.Server.%s" % (self.client_id,))
else:
self.log = logging.getLogger("gear.Server")
if statsd_host:
if not statsd:
self.log.error("Unable to import statsd module")
self.statsd = None
else:
self.statsd = statsd.StatsClient(statsd_host,
statsd_port,
statsd_prefix)
else:
self.statsd = None
def _doConnectLoop(self):
while self.running:
try:
self.connectLoop()
except Exception:
self.log.exception("Exception in connect loop:")
time.sleep(1)
def connectLoop(self):
poll = select.poll()
bitmask = (select.POLLIN | select.POLLERR |
select.POLLHUP | select.POLLNVAL)
# Register the wake pipe so that we can break if we need to
# shutdown.
poll.register(self.connect_wake_read, bitmask)
poll.register(self.socket.fileno(), bitmask)
while self.running:
ret = poll.poll()
for fd, event in ret:
if fd == self.connect_wake_read:
self.log.debug("Accept woken by pipe")
while True:
if os.read(self.connect_wake_read, 1) == b'\n':
break
return
if event & select.POLLIN:
self.log.debug("Accepting new connection")
c, addr = self.socket.accept()
if self.use_ssl:
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_cert_chain(self.ssl_cert, self.ssl_key)
context.load_verify_locations(self.ssl_ca)
c = context.wrap_socket(c, server_side=True)
conn = ServerConnection(addr, c, self.use_ssl,
self.client_id)
self.log.info("Accepted connection %s" % (conn,))
self.connections_condition.acquire()
try:
self.active_connections.append(conn)
self._registerConnection(conn)
self.connections_condition.notifyAll()
finally:
self.connections_condition.release()
def readFromConnection(self, conn):
while True:
self.log.debug("Processing input on %s" % conn)
try:
p = conn.readPacket()
except RetryIOError:
# Read operation would block, we're done until
# epoll flags this connection again
return
if p:
if isinstance(p, Packet):
self.handlePacket(p)
else:
self.handleAdminRequest(p)
else:
self.log.debug("Received no data on %s" % conn)
raise DisconnectError()
def writeToConnection(self, conn):
self.log.debug("Processing output on %s" % conn)
conn.sendQueuedData()
def _processPollEvent(self, conn, event):
# This should do whatever is necessary to process a connection
# that has triggered a poll event. It should generally not
# raise exceptions so as to avoid restarting the poll loop.
# The exception handlers here can raise exceptions and if they
# do, it's okay, the poll loop will be restarted.
try:
if event & (select.EPOLLERR | select.EPOLLHUP):
self.log.debug("Received error event on %s: %s" % (
conn, event))
raise DisconnectError()
if event & (select.POLLIN | select.POLLOUT):
self.readFromConnection(conn)
self.writeToConnection(conn)
except socket.error as e:
if e.errno == errno.ECONNRESET:
self.log.debug("Connection reset by peer: %s" % (conn,))
self._lostConnection(conn)
return
raise
except DisconnectError:
# Our inner method says we should quietly drop
# this connection
self._lostConnection(conn)
return
except Exception:
self.log.exception("Exception reading or writing "
"from %s:" % (conn,))
self._lostConnection(conn)
return
def _flushAllConnections(self):
# If we need to restart the poll loop, we need to make sure
# there are no pending data on any connection. Simulate poll
# in+out events on every connection.
#
# If this method raises an exception, the poll loop wil
# restart again.
#
# No need to get the lock since this is called within the poll
# loop and therefore the list in guaranteed never to shrink.
connections = self.active_connections[:]
for conn in connections:
self._processPollEvent(conn, select.POLLIN | select.POLLOUT)
def _doPollLoop(self):
# Outer run method of poll thread.
while self.running:
try:
self._pollLoop()
except Exception:
self.log.exception("Exception in poll loop:")
def _pollLoop(self):
# Inner method of poll loop.
self.log.debug("Preparing to poll")
# Ensure there are no pending data.
self._flushAllConnections()
while self.running:
self.log.debug("Polling %s connections" %
len(self.active_connections))
ret = self.poll.poll()
# Since we're using edge-triggering, we need to make sure
# that every file descriptor in 'ret' is processed.
for fd, event in ret:
if fd == self.wake_read:
# This means we're exiting, so we can ignore the
# rest of 'ret'.
self.log.debug("Woken by pipe")
while True:
if os.read(self.wake_read, 1) == b'\n':
break
return
# In the unlikely event this raises an exception, the
# loop will be restarted.
conn = self.connection_map[fd]
self._processPollEvent(conn, event)
def _shutdown(self):
super(Server, self)._shutdown()
os.write(self.connect_wake_write, b'1\n')
def _cleanup(self):
super(Server, self)._cleanup()
self.socket.close()
os.close(self.connect_wake_read)
os.close(self.connect_wake_write)
def _registerConnection(self, conn):
# Register the connection with the poll object
# Call while holding the connection condition
self.log.debug("Registering %s" % conn)
self.connection_map[conn.conn.fileno()] = conn
self.poll.register(conn.conn.fileno(), self.readwrite_bitmask)
def _unregisterConnection(self, conn):
# Unregister the connection with the poll object
# Call while holding the connection condition
self.log.debug("Unregistering %s" % conn)
fd = conn.conn.fileno()
if fd not in self.connection_map:
return
try:
self.poll.unregister(fd)
except KeyError:
pass
try:
del self.connection_map[fd]
except KeyError:
pass
def _lostConnection(self, conn):
# Called as soon as a connection is detected as faulty.
self.log.info("Marking %s as disconnected" % conn)
self.connections_condition.acquire()
self._unregisterConnection(conn)
try:
# NOTE(notmorgan): In the loop below it is possible to change the
# jobs list on the connection. In python 3 .values() is an iter not
# a static list, meaning that a change will break the for loop
# as the object being iterated on will have changed in size.
jobs = list(conn.related_jobs.values())
if conn in self.active_connections:
self.active_connections.remove(conn)
finally:
self.connections_condition.notifyAll()
self.connections_condition.release()
for job in jobs:
if job.worker_connection == conn:
# the worker disconnected, alert the client
try:
p = Packet(constants.REQ, constants.WORK_FAIL, job.handle)
if job.client_connection:
job.client_connection.sendPacket(p)
except Exception:
self.log.exception("Sending WORK_FAIL to client after "
"worker disconnect failed:")
self._removeJob(job)
try:
conn.conn.shutdown(socket.SHUT_RDWR)
except socket.error as e:
if e.errno != errno.ENOTCONN:
self.log.exception("Unable to shutdown socket "
"for connection %s" % (conn,))
except Exception:
self.log.exception("Unable to shutdown socket "
"for connection %s" % (conn,))
try:
conn.conn.close()
except Exception:
self.log.exception("Unable to close socket "
"for connection %s" % (conn,))
self._updateStats()
def _removeJob(self, job, dequeue=True):
# dequeue is tri-state: True, False, or a specific queue
if job.client_connection:
try:
del job.client_connection.related_jobs[job.handle]
except KeyError:
pass
if job.worker_connection:
try:
del job.worker_connection.related_jobs[job.handle]
except KeyError:
pass
try:
del self.jobs[job.handle]
except KeyError:
pass
if dequeue is True:
# Search all queues for the job
try:
self.high_queue.remove(job)
except ValueError:
pass
try:
self.normal_queue.remove(job)
except ValueError:
pass
try:
self.low_queue.remove(job)
except ValueError:
pass
elif dequeue is not False:
# A specific queue was supplied
dequeue.remove(job)
# If dequeue is false, no need to remove from any queue
self.total_jobs -= 1
if job.running:
self.running_jobs -= 1
else:
self.waiting_jobs -= 1
def getQueue(self):
"""Returns a copy of all internal queues in a flattened form.
:returns: The Gearman queue.
:rtype: list of :py:class:`WorkerJob`.
"""
ret = []
for queue in [self.high_queue, self.normal_queue, self.low_queue]:
ret += queue
return ret
def handleAdminRequest(self, request):
self.log.info("Received admin request %s" % (request,))
if request.command.startswith(b'cancel job'):
self.handleCancelJob(request)
elif request.command.startswith(b'status'):
self.handleStatus(request)
elif request.command.startswith(b'workers'):
self.handleWorkers(request)
elif request.command.startswith(b'acl list'):
self.handleACLList(request)
elif request.command.startswith(b'acl grant'):
self.handleACLGrant(request)
elif request.command.startswith(b'acl revoke'):
self.handleACLRevoke(request)
elif request.command.startswith(b'acl self-revoke'):
self.handleACLSelfRevoke(request)
self.log.debug("Finished handling admin request %s" % (request,))
def _cancelJob(self, request, job, queue):
if self.acl:
if not self.acl.canInvoke(request.connection.ssl_subject,
job.name):
self.log.info("Rejecting cancel job from %s for %s "
"due to ACL" %
(request.connection.ssl_subject, job.name))
request.connection.sendRaw(b'ERR PERMISSION_DENIED\n')
return
self._removeJob(job, dequeue=queue)
self._updateStats()
request.connection.sendRaw(b'OK\n')
return
def handleCancelJob(self, request):
words = request.command.split()
handle = words[2]
if handle in self.jobs:
for queue in [self.high_queue, self.normal_queue, self.low_queue]:
for job in queue:
if handle == job.handle:
return self._cancelJob(request, job, queue)
request.connection.sendRaw(b'ERR UNKNOWN_JOB\n')
def handleACLList(self, request):
if self.acl is None:
request.connection.sendRaw(b'ERR ACL_DISABLED\n')
return
for entry in self.acl.getEntries():
l = "%s\tregister=%s\tinvoke=%s\tgrant=%s\n" % (
entry.subject, entry.register, entry.invoke, entry.grant)
request.connection.sendRaw(l.encode('utf8'))
request.connection.sendRaw(b'.\n')
def handleACLGrant(self, request):
# acl grant register worker .*
words = request.command.split(None, 4)
verb = words[2]
subject = words[3]
if self.acl is None:
request.connection.sendRaw(b'ERR ACL_DISABLED\n')
return
if not self.acl.canGrant(request.connection.ssl_subject):
request.connection.sendRaw(b'ERR PERMISSION_DENIED\n')
return
try:
if verb == 'invoke':
self.acl.grantInvoke(subject, words[4])
elif verb == 'register':
self.acl.grantRegister(subject, words[4])
elif verb == 'grant':
self.acl.grantGrant(subject)
else:
request.connection.sendRaw(b'ERR UNKNOWN_ACL_VERB\n')
return
except ACLError as e:
self.log.info("Error in grant command: %s" % (e.message,))
request.connection.sendRaw(b'ERR UNABLE %s\n' % (e.message,))
return
request.connection.sendRaw(b'OK\n')
def handleACLRevoke(self, request):
# acl revoke register worker
words = request.command.split()
verb = words[2]
subject = words[3]
if self.acl is None:
request.connection.sendRaw(b'ERR ACL_DISABLED\n')
return
if subject != request.connection.ssl_subject:
if not self.acl.canGrant(request.connection.ssl_subject):
request.connection.sendRaw(b'ERR PERMISSION_DENIED\n')
return
try:
if verb == 'invoke':
self.acl.revokeInvoke(subject)
elif verb == 'register':
self.acl.revokeRegister(subject)
elif verb == 'grant':
self.acl.revokeGrant(subject)
elif verb == 'all':
try:
self.acl.remove(subject)
except ACLError:
pass
else:
request.connection.sendRaw(b'ERR UNKNOWN_ACL_VERB\n')
return
except ACLError as e:
self.log.info("Error in revoke command: %s" % (e.message,))
request.connection.sendRaw(b'ERR UNABLE %s\n' % (e.message,))
return
request.connection.sendRaw(b'OK\n')
def handleACLSelfRevoke(self, request):
# acl self-revoke register
words = request.command.split()
verb = words[2]
if self.acl is None:
request.connection.sendRaw(b'ERR ACL_DISABLED\n')
return
subject = request.connection.ssl_subject
try:
if verb == 'invoke':
self.acl.revokeInvoke(subject)
elif verb == 'register':
self.acl.revokeRegister(subject)
elif verb == 'grant':
self.acl.revokeGrant(subject)
elif verb == 'all':
try:
self.acl.remove(subject)
except ACLError:
pass
else:
request.connection.sendRaw(b'ERR UNKNOWN_ACL_VERB\n')
return
except ACLError as e:
self.log.info("Error in self-revoke command: %s" % (e.message,))
request.connection.sendRaw(b'ERR UNABLE %s\n' % (e.message,))
return
request.connection.sendRaw(b'OK\n')
def _getFunctionStats(self):
functions = {}
for function in self.functions:
# Total, running, workers
functions[function] = [0, 0, 0]
for job in self.jobs.values():
if job.name not in functions:
functions[job.name] = [0, 0, 0]
functions[job.name][0] += 1
if job.running:
functions[job.name][1] += 1
for connection in self.active_connections:
for function in connection.functions:
if function not in functions:
functions[function] = [0, 0, 0]
functions[function][2] += 1
return functions
def handleStatus(self, request):
functions = self._getFunctionStats()
for name, values in functions.items():
request.connection.sendRaw(
("%s\t%s\t%s\t%s\n" %
(name.decode('utf-8'), values[0], values[1],
values[2])).encode('utf8'))
request.connection.sendRaw(b'.\n')
def handleWorkers(self, request):
for connection in self.active_connections:
fd = connection.conn.fileno()
ip = connection.host
client_id = connection.client_id or b'-'
functions = b' '.join(connection.functions).decode('utf8')
request.connection.sendRaw(("%s %s %s : %s\n" %
(fd, ip, client_id.decode('utf8'),
functions))
.encode('utf8'))
request.connection.sendRaw(b'.\n')
def wakeConnection(self, connection):
p = Packet(constants.RES, constants.NOOP, b'')
if connection.state == 'SLEEP':
connection.changeState("AWAKE")
connection.sendPacket(p)
def wakeConnections(self, job=None):
p = Packet(constants.RES, constants.NOOP, b'')
for connection in self.active_connections:
if connection.state == 'SLEEP':
if ((job and job.name in connection.functions) or
(job is None)):
connection.changeState("AWAKE")
connection.sendPacket(p)
def reportTimingStats(self, ptype, duration):
"""Report processing times by packet type
This method is called by handlePacket to report how long
processing took for each packet. If statsd is configured,
timing and counts are reported with the key
"prefix.packet.NAME".
:arg bytes ptype: The packet type (one of the packet types in
constants).
:arg float duration: The time (in seconds) it took to process
the packet.
"""
if not self.statsd:
return
ptype = constants.types.get(ptype, 'UNKNOWN')
key = 'packet.%s' % ptype
self.statsd.timing(key, int(duration * 1000))
self.statsd.incr(key)
def _updateStats(self):
if not self.statsd:
return
# prefix.queue.total
# prefix.queue.running
# prefix.queue.waiting
self.statsd.gauge('queue.total', self.total_jobs)
self.statsd.gauge('queue.running', self.running_jobs)
self.statsd.gauge('queue.waiting', self.waiting_jobs)
def _handleSubmitJob(self, packet, precedence, background=False):
name = packet.getArgument(0)
unique = packet.getArgument(1)
if not unique:
unique = None
arguments = packet.getArgument(2, True)
if self.acl:
if not self.acl.canInvoke(packet.connection.ssl_subject, name):
self.log.info("Rejecting SUBMIT_JOB from %s for %s "
"due to ACL" %
(packet.connection.ssl_subject, name))
self.sendError(packet.connection, 0,
'Permission denied by ACL')
return
self.max_handle += 1
handle = ('H:%s:%s' % (packet.connection.host,
self.max_handle)).encode('utf8')
if not background:
conn = packet.connection
else:
conn = None
job = ServerJob(handle, name, arguments, conn, unique)
p = Packet(constants.RES, constants.JOB_CREATED, handle)
packet.connection.sendPacket(p)
self.jobs[handle] = job
self.total_jobs += 1
self.waiting_jobs += 1
if not background:
packet.connection.related_jobs[handle] = job
if precedence == PRECEDENCE_HIGH:
self.high_queue.append(job)
elif precedence == PRECEDENCE_NORMAL:
self.normal_queue.append(job)
elif precedence == PRECEDENCE_LOW:
self.low_queue.append(job)
self._updateStats()
self.wakeConnections(job)
def handleSubmitJob(self, packet):
return self._handleSubmitJob(packet, PRECEDENCE_NORMAL)
def handleSubmitJobHigh(self, packet):
return self._handleSubmitJob(packet, PRECEDENCE_HIGH)
def handleSubmitJobLow(self, packet):
return self._handleSubmitJob(packet, PRECEDENCE_LOW)
def handleSubmitJobBg(self, packet):
return self._handleSubmitJob(packet, PRECEDENCE_NORMAL,
background=True)
def handleSubmitJobHighBg(self, packet):
return self._handleSubmitJob(packet, PRECEDENCE_HIGH, background=True)
def handleSubmitJobLowBg(self, packet):
return self._handleSubmitJob(packet, PRECEDENCE_LOW, background=True)
def getJobForConnection(self, connection, peek=False):
for queue in [self.high_queue, self.normal_queue, self.low_queue]:
for job in queue:
if job.name in connection.functions:
if not peek:
queue.remove(job)
connection.related_jobs[job.handle] = job
job.worker_connection = connection
job.running = True
self.waiting_jobs -= 1
self.running_jobs += 1
self._updateStats()
return job
return None
def handleGrabJobUniq(self, packet):
job = self.getJobForConnection(packet.connection)
if job:
self.sendJobAssignUniq(packet.connection, job)
else:
self.sendNoJob(packet.connection)
def sendJobAssignUniq(self, connection, job):
unique = job.binary_unique
if not unique:
unique = b''
data = b'\x00'.join((job.handle, job.name, unique, job.arguments))
p = Packet(constants.RES, constants.JOB_ASSIGN_UNIQ, data)
connection.sendPacket(p)
def sendNoJob(self, connection):
p = Packet(constants.RES, constants.NO_JOB, b'')
connection.sendPacket(p)
def handlePreSleep(self, packet):
packet.connection.changeState("SLEEP")
if self.getJobForConnection(packet.connection, peek=True):
self.wakeConnection(packet.connection)
def handleWorkComplete(self, packet):
self.handlePassthrough(packet, True)
def handleWorkFail(self, packet):
self.handlePassthrough(packet, True)
def handleWorkException(self, packet):
self.handlePassthrough(packet, True)
def handleWorkData(self, packet):
self.handlePassthrough(packet)
def handleWorkWarning(self, packet):
self.handlePassthrough(packet)
def handleWorkStatus(self, packet):
handle = packet.getArgument(0)
job = self.jobs.get(handle)
if not job:
self.log.info("Received packet %s for unknown job" % (packet,))
return
job.numerator = packet.getArgument(1)
job.denominator = packet.getArgument(2)
self.handlePassthrough(packet)
def handlePassthrough(self, packet, finished=False):
handle = packet.getArgument(0)
job = self.jobs.get(handle)
if not job:
self.log.info("Received packet %s for unknown job" % (packet,))
return
packet.code = constants.RES
if job.client_connection:
job.client_connection.sendPacket(packet)
if finished:
self._removeJob(job, dequeue=False)
self._updateStats()
def handleSetClientID(self, packet):
name = packet.getArgument(0)
packet.connection.client_id = name
def sendError(self, connection, code, text):
data = (str(code).encode('utf8') + b'\x00' +
str(text).encode('utf8') + b'\x00')
p = Packet(constants.RES, constants.ERROR, data)
connection.sendPacket(p)
def handleCanDo(self, packet):
name = packet.getArgument(0)
if self.acl:
if not self.acl.canRegister(packet.connection.ssl_subject, name):
self.log.info("Ignoring CAN_DO from %s for %s due to ACL" %
(packet.connection.ssl_subject, name))
# CAN_DO normally does not merit a response so it is
# not clear that it is appropriate to send an ERROR
# response at this point.
return
self.log.debug("Adding function %s to %s" % (name, packet.connection))
packet.connection.functions.add(name)
self.functions.add(name)
def handleCantDo(self, packet):
name = packet.getArgument(0)
self.log.debug("Removing function %s from %s" %
(name, packet.connection))
packet.connection.functions.remove(name)
def handleResetAbilities(self, packet):
self.log.debug("Resetting functions for %s" % packet.connection)
packet.connection.functions = set()
def handleGetStatus(self, packet):
handle = packet.getArgument(0)
self.log.debug("Getting status for %s" % handle)
known = 0
running = 0
numerator = b''
denominator = b''
job = self.jobs.get(handle)
if job:
known = 1
if job.running:
running = 1
numerator = job.numerator or b''
denominator = job.denominator or b''
data = (handle + b'\x00' +
str(known).encode('utf8') + b'\x00' +
str(running).encode('utf8') + b'\x00' +
numerator + b'\x00' +
denominator)
p = Packet(constants.RES, constants.STATUS_RES, data)
packet.connection.sendPacket(p)
| 36.094698 | 79 | 0.584395 |
import errno
import logging
import os
import select
import six
import socket
import ssl
import struct
import threading
import time
import uuid as uuid_module
from gear import constants
from gear.acl import ACLError, ACLEntry, ACL
try:
import Queue as queue_mod
except ImportError:
import queue as queue_mod
try:
import statsd
except ImportError:
statsd = None
PRECEDENCE_NORMAL = 0
PRECEDENCE_LOW = 1
PRECEDENCE_HIGH = 2
class ConnectionError(Exception):
pass
class InvalidDataError(Exception):
pass
class ConfigurationError(Exception):
pass
class NoConnectedServersError(Exception):
pass
class UnknownJobError(Exception):
pass
class InterruptedError(Exception):
pass
class TimeoutError(Exception):
pass
class GearmanError(Exception):
pass
class DisconnectError(Exception):
pass
class RetryIOError(Exception):
pass
def convert_to_bytes(data):
try:
data = data.encode('utf8')
except AttributeError:
pass
return data
class Task(object):
def __init__(self):
self._wait_event = threading.Event()
def setComplete(self):
self._wait_event.set()
def wait(self, timeout=None):
self._wait_event.wait(timeout)
return self._wait_event.is_set()
class SubmitJobTask(Task):
def __init__(self, job):
super(SubmitJobTask, self).__init__()
self.job = job
class OptionReqTask(Task):
pass
class Connection(object):
def __init__(self, host, port, ssl_key=None, ssl_cert=None, ssl_ca=None,
client_id='unknown', keepalive=False, tcp_keepidle=7200,
tcp_keepintvl=75, tcp_keepcnt=9):
self.log = logging.getLogger("gear.Connection.%s" % (client_id,))
self.host = host
self.port = port
self.ssl_key = ssl_key
self.ssl_cert = ssl_cert
self.ssl_ca = ssl_ca
self.keepalive = keepalive
self.tcp_keepcnt = tcp_keepcnt
self.tcp_keepintvl = tcp_keepintvl
self.tcp_keepidle = tcp_keepidle
self.use_ssl = False
if all([self.ssl_key, self.ssl_cert, self.ssl_ca]):
self.use_ssl = True
self.input_buffer = b''
self.need_bytes = False
self.echo_lock = threading.Lock()
self.send_lock = threading.Lock()
self._init()
def _init(self):
self.conn = None
self.connected = False
self.connect_time = None
self.related_jobs = {}
self.pending_tasks = []
self.admin_requests = []
self.echo_conditions = {}
self.options = set()
self.changeState("INIT")
def changeState(self, state):
# the connection object itself except to reset to "INIT" immediately
# after reconnection.
self.log.debug("Setting state to: %s" % state)
self.state = state
self.state_time = time.time()
def __repr__(self):
return '<gear.Connection 0x%x host: %s port: %s>' % (
id(self), self.host, self.port)
def connect(self):
self.log.debug("Connecting to %s port %s" % (self.host, self.port))
s = None
for res in socket.getaddrinfo(self.host, self.port,
socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
if self.keepalive and hasattr(socket, 'TCP_KEEPIDLE'):
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
self.tcp_keepidle)
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL,
self.tcp_keepintvl)
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT,
self.tcp_keepcnt)
elif self.keepalive:
self.log.warning('Keepalive requested but not available '
'on this platform')
except socket.error:
s = None
continue
if self.use_ssl:
self.log.debug("Using SSL")
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = False
context.load_cert_chain(self.ssl_cert, self.ssl_key)
context.load_verify_locations(self.ssl_ca)
s = context.wrap_socket(s, server_hostname=self.host)
try:
s.connect(sa)
except socket.error:
s.close()
s = None
continue
break
if s is None:
self.log.debug("Error connecting to %s port %s" % (
self.host, self.port))
raise ConnectionError("Unable to open socket")
self.log.info("Connected to %s port %s" % (self.host, self.port))
self.conn = s
self.connected = True
self.connect_time = time.time()
self.input_buffer = b''
self.need_bytes = False
def disconnect(self):
if self.conn:
try:
self.conn.close()
except Exception:
pass
self.log.info("Disconnected from %s port %s" % (self.host, self.port))
self._init()
def reconnect(self):
self.disconnect()
self.connect()
def sendRaw(self, data):
with self.send_lock:
sent = 0
while sent < len(data):
try:
sent += self.conn.send(data)
except ssl.SSLError as e:
if e.errno == ssl.SSL_ERROR_WANT_READ:
continue
elif e.errno == ssl.SSL_ERROR_WANT_WRITE:
continue
else:
raise
def sendPacket(self, packet):
self.log.info("Sending packet to %s: %s" % (self, packet))
self.sendRaw(packet.toBinary())
def _getAdminRequest(self):
return self.admin_requests.pop(0)
def _readRawBytes(self, bytes_to_read):
while True:
try:
buff = self.conn.recv(bytes_to_read)
except ssl.SSLError as e:
if e.errno == ssl.SSL_ERROR_WANT_READ:
continue
elif e.errno == ssl.SSL_ERROR_WANT_WRITE:
continue
else:
raise
break
return buff
def _putAdminRequest(self, req):
self.admin_requests.insert(0, req)
def readPacket(self):
# This handles non-blocking or blocking IO.
datalen = 0
code = None
ptype = None
admin = None
admin_request = None
need_bytes = self.need_bytes
raw_bytes = self.input_buffer
try:
while True:
try:
if not raw_bytes or need_bytes:
segment = self._readRawBytes(4096)
if not segment:
# This occurs when the connection is closed. The
# the connect method will reset input_buffer and
# need_bytes for us.
return None
raw_bytes += segment
need_bytes = False
except RetryIOError:
if admin_request:
self._putAdminRequest(admin_request)
raise
if admin is None:
if raw_bytes[0:1] == b'\x00':
admin = False
else:
admin = True
admin_request = self._getAdminRequest()
if admin:
complete, remainder = admin_request.isComplete(raw_bytes)
if remainder is not None:
raw_bytes = remainder
if complete:
return admin_request
else:
length = len(raw_bytes)
if code is None and length >= 12:
code, ptype, datalen = struct.unpack('!4sii',
raw_bytes[:12])
if length >= datalen + 12:
end = 12 + datalen
p = Packet(code, ptype, raw_bytes[12:end],
connection=self)
raw_bytes = raw_bytes[end:]
return p
# If we don't return a packet above then we need more data
need_bytes = True
finally:
self.input_buffer = raw_bytes
self.need_bytes = need_bytes
def hasPendingData(self):
return self.input_buffer != b''
def sendAdminRequest(self, request, timeout=90):
self.admin_requests.append(request)
self.sendRaw(request.getCommand())
complete = request.waitForResponse(timeout)
if not complete:
raise TimeoutError()
def echo(self, data=None, timeout=30):
if data is None:
data = uuid_module.uuid4().hex.encode('utf8')
self.echo_lock.acquire()
try:
if data in self.echo_conditions:
raise InvalidDataError("This client is already waiting on an "
"echo response of: %s" % data)
condition = threading.Condition()
self.echo_conditions[data] = condition
finally:
self.echo_lock.release()
self.sendEchoReq(data)
condition.acquire()
condition.wait(timeout)
condition.release()
if data in self.echo_conditions:
return data
raise TimeoutError()
def sendEchoReq(self, data):
p = Packet(constants.REQ, constants.ECHO_REQ, data)
self.sendPacket(p)
def handleEchoRes(self, data):
condition = None
self.echo_lock.acquire()
try:
condition = self.echo_conditions.get(data)
if condition:
del self.echo_conditions[data]
finally:
self.echo_lock.release()
if not condition:
return False
condition.notifyAll()
return True
def handleOptionRes(self, option):
self.options.add(option)
class AdminRequest(object):
command = None
arguments = []
response = None
_complete_position = 0
def __init__(self, *arguments):
self.wait_event = threading.Event()
self.arguments = arguments
if type(self) == AdminRequest:
raise NotImplementedError("AdminRequest must be subclassed")
def __repr__(self):
return '<gear.AdminRequest 0x%x command: %s>' % (
id(self), self.command)
def getCommand(self):
cmd = self.command
if self.arguments:
cmd += b' ' + b' '.join(self.arguments)
cmd += b'\n'
return cmd
def isComplete(self, data):
x = -1
start = self._complete_position
start = max(self._complete_position - 4, 0)
end_index_newline = data.find(b'\n.\n', start)
end_index_return = data.find(b'\r\n.\r\n', start)
if end_index_newline != -1:
x = end_index_newline + 3
elif end_index_return != -1:
x = end_index_return + 5
elif data.startswith(b'.\n'):
x = 2
elif data.startswith(b'.\r\n'):
x = 3
self._complete_position = len(data)
if x != -1:
self.response = data[:x]
return (True, data[x:])
else:
return (False, None)
def setComplete(self):
self.wait_event.set()
def waitForResponse(self, timeout=None):
self.wait_event.wait(timeout)
return self.wait_event.is_set()
class StatusAdminRequest(AdminRequest):
command = b'status'
def __init__(self):
super(StatusAdminRequest, self).__init__()
class ShowJobsAdminRequest(AdminRequest):
command = b'show jobs'
def __init__(self):
super(ShowJobsAdminRequest, self).__init__()
class ShowUniqueJobsAdminRequest(AdminRequest):
command = b'show unique jobs'
def __init__(self):
super(ShowUniqueJobsAdminRequest, self).__init__()
class CancelJobAdminRequest(AdminRequest):
command = b'cancel job'
def __init__(self, handle):
handle = convert_to_bytes(handle)
super(CancelJobAdminRequest, self).__init__(handle)
def isComplete(self, data):
end_index_newline = data.find(b'\n')
if end_index_newline != -1:
x = end_index_newline + 1
self.response = data[:x]
return (True, data[x:])
else:
return (False, None)
class VersionAdminRequest(AdminRequest):
command = b'version'
def __init__(self):
super(VersionAdminRequest, self).__init__()
def isComplete(self, data):
end_index_newline = data.find(b'\n')
if end_index_newline != -1:
x = end_index_newline + 1
self.response = data[:x]
return (True, data[x:])
else:
return (False, None)
class WorkersAdminRequest(AdminRequest):
command = b'workers'
def __init__(self):
super(WorkersAdminRequest, self).__init__()
class Packet(object):
def __init__(self, code, ptype, data, connection=None):
if not isinstance(code, bytes) and not isinstance(code, bytearray):
raise TypeError("code must be of type bytes or bytearray")
if code[0:1] != b'\x00':
raise InvalidDataError("First byte of packet must be 0")
self.code = code
self.ptype = ptype
if not isinstance(data, bytes) and not isinstance(data, bytearray):
raise TypeError("data must be of type bytes or bytearray")
self.data = data
self.connection = connection
def __repr__(self):
ptype = constants.types.get(self.ptype, 'UNKNOWN')
try:
extra = self._formatExtraData()
except Exception:
extra = ''
return '<gear.Packet 0x%x type: %s%s>' % (id(self), ptype, extra)
def __eq__(self, other):
if not isinstance(other, Packet):
return False
if (self.code == other.code and
self.ptype == other.ptype and
self.data == other.data):
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
def _formatExtraData(self):
if self.ptype in [constants.JOB_CREATED,
constants.JOB_ASSIGN,
constants.GET_STATUS,
constants.STATUS_RES,
constants.WORK_STATUS,
constants.WORK_COMPLETE,
constants.WORK_FAIL,
constants.WORK_EXCEPTION,
constants.WORK_DATA,
constants.WORK_WARNING]:
return ' handle: %s' % self.getArgument(0)
if self.ptype == constants.JOB_ASSIGN_UNIQ:
return (' handle: %s function: %s unique: %s' %
(self.getArgument(0),
self.getArgument(1),
self.getArgument(2)))
if self.ptype in [constants.SUBMIT_JOB,
constants.SUBMIT_JOB_BG,
constants.SUBMIT_JOB_HIGH,
constants.SUBMIT_JOB_HIGH_BG,
constants.SUBMIT_JOB_LOW,
constants.SUBMIT_JOB_LOW_BG,
constants.SUBMIT_JOB_SCHED,
constants.SUBMIT_JOB_EPOCH]:
return ' function: %s unique: %s' % (self.getArgument(0),
self.getArgument(1))
if self.ptype in [constants.CAN_DO,
constants.CANT_DO,
constants.CAN_DO_TIMEOUT]:
return ' function: %s' % (self.getArgument(0),)
if self.ptype == constants.SET_CLIENT_ID:
return ' id: %s' % (self.getArgument(0),)
if self.ptype in [constants.OPTION_REQ,
constants.OPTION_RES]:
return ' option: %s' % (self.getArgument(0),)
if self.ptype == constants.ERROR:
return ' code: %s message: %s' % (self.getArgument(0),
self.getArgument(1))
return ''
def toBinary(self):
b = struct.pack('!4sii', self.code, self.ptype, len(self.data))
b = bytearray(b)
b += self.data
return b
def getArgument(self, index, last=False):
parts = self.data.split(b'\x00')
if not last:
return parts[index]
return b'\x00'.join(parts[index:])
def getJob(self):
handle = self.getArgument(0)
job = self.connection.related_jobs.get(handle)
if not job:
raise UnknownJobError()
return job
class BaseClientServer(object):
def __init__(self, client_id=None):
if client_id:
self.client_id = convert_to_bytes(client_id)
self.log = logging.getLogger("gear.BaseClientServer.%s" %
(self.client_id,))
else:
self.client_id = None
self.log = logging.getLogger("gear.BaseClientServer")
self.running = True
self.active_connections = []
self.inactive_connections = []
self.connection_index = -1
self.connections_condition = threading.Condition()
self.wake_read, self.wake_write = os.pipe()
self.poll_thread = threading.Thread(name="Gearman client poll",
target=self._doPollLoop)
self.poll_thread.daemon = True
self.poll_thread.start()
self.connect_thread = threading.Thread(name="Gearman client connect",
target=self._doConnectLoop)
self.connect_thread.daemon = True
self.connect_thread.start()
def _doConnectLoop(self):
while self.running:
self.connections_condition.acquire()
while self.running and not self.inactive_connections:
self.log.debug("Waiting for change in available servers "
"to reconnect")
self.connections_condition.wait()
self.connections_condition.release()
self.log.debug("Checking if servers need to be reconnected")
try:
if self.running and not self._connectLoop():
time.sleep(2)
except Exception:
self.log.exception("Exception in connect loop:")
def _connectLoop(self):
success = False
for conn in self.inactive_connections[:]:
self.log.debug("Trying to reconnect %s" % conn)
try:
conn.reconnect()
except ConnectionError:
self.log.debug("Unable to connect to %s" % conn)
continue
except Exception:
self.log.exception("Exception while connecting to %s" % conn)
continue
try:
self._onConnect(conn)
except Exception:
self.log.exception("Exception while performing on-connect "
"tasks for %s" % conn)
continue
self.connections_condition.acquire()
self.inactive_connections.remove(conn)
self.active_connections.append(conn)
self.connections_condition.notifyAll()
os.write(self.wake_write, b'1\n')
self.connections_condition.release()
try:
self._onActiveConnection(conn)
except Exception:
self.log.exception("Exception while performing active conn "
"tasks for %s" % conn)
success = True
return success
def _onConnect(self, conn):
pass
def _onActiveConnection(self, conn):
pass
def _lostConnection(self, conn):
self.log.debug("Marking %s as disconnected" % conn)
self.connections_condition.acquire()
try:
jobs = list(conn.related_jobs.values())
if conn in self.active_connections:
self.active_connections.remove(conn)
if conn not in self.inactive_connections:
self.inactive_connections.append(conn)
finally:
self.connections_condition.notifyAll()
self.connections_condition.release()
for job in jobs:
self.handleDisconnect(job)
def _doPollLoop(self):
while self.running:
self.connections_condition.acquire()
while self.running and not self.active_connections:
self.log.debug("Waiting for change in available connections "
"to poll")
self.connections_condition.wait()
self.connections_condition.release()
try:
self._pollLoop()
except socket.error as e:
if e.errno == errno.ECONNRESET:
self.log.debug("Connection reset by peer")
except Exception:
self.log.exception("Exception in poll loop:")
def _pollLoop(self):
self.log.debug("Preparing to poll")
poll = select.poll()
bitmask = (select.POLLIN | select.POLLERR |
select.POLLHUP | select.POLLNVAL)
conn_dict = {}
for conn in self.active_connections:
poll.register(conn.conn.fileno(), bitmask)
conn_dict[conn.conn.fileno()] = conn
poll.register(self.wake_read, bitmask)
while self.running:
self.log.debug("Polling %s connections" %
len(self.active_connections))
ret = poll.poll()
for fd, event in ret:
if fd == self.wake_read:
self.log.debug("Woken by pipe")
while True:
if os.read(self.wake_read, 1) == b'\n':
break
return
conn = conn_dict[fd]
if event & select.POLLIN:
while True:
self.log.debug("Processing input on %s" % conn)
p = conn.readPacket()
if p:
if isinstance(p, Packet):
self.handlePacket(p)
else:
self.handleAdminRequest(p)
else:
self.log.debug("Received no data on %s" % conn)
self._lostConnection(conn)
return
if not conn.hasPendingData():
break
else:
self.log.debug("Received error event on %s" % conn)
self._lostConnection(conn)
return
def handlePacket(self, packet):
self.log.info("Received packet from %s: %s" % (packet.connection,
packet))
start = time.time()
if packet.ptype == constants.JOB_CREATED:
self.handleJobCreated(packet)
elif packet.ptype == constants.WORK_COMPLETE:
self.handleWorkComplete(packet)
elif packet.ptype == constants.WORK_FAIL:
self.handleWorkFail(packet)
elif packet.ptype == constants.WORK_EXCEPTION:
self.handleWorkException(packet)
elif packet.ptype == constants.WORK_DATA:
self.handleWorkData(packet)
elif packet.ptype == constants.WORK_WARNING:
self.handleWorkWarning(packet)
elif packet.ptype == constants.WORK_STATUS:
self.handleWorkStatus(packet)
elif packet.ptype == constants.STATUS_RES:
self.handleStatusRes(packet)
elif packet.ptype == constants.GET_STATUS:
self.handleGetStatus(packet)
elif packet.ptype == constants.JOB_ASSIGN_UNIQ:
self.handleJobAssignUnique(packet)
elif packet.ptype == constants.JOB_ASSIGN:
self.handleJobAssign(packet)
elif packet.ptype == constants.NO_JOB:
self.handleNoJob(packet)
elif packet.ptype == constants.NOOP:
self.handleNoop(packet)
elif packet.ptype == constants.SUBMIT_JOB:
self.handleSubmitJob(packet)
elif packet.ptype == constants.SUBMIT_JOB_BG:
self.handleSubmitJobBg(packet)
elif packet.ptype == constants.SUBMIT_JOB_HIGH:
self.handleSubmitJobHigh(packet)
elif packet.ptype == constants.SUBMIT_JOB_HIGH_BG:
self.handleSubmitJobHighBg(packet)
elif packet.ptype == constants.SUBMIT_JOB_LOW:
self.handleSubmitJobLow(packet)
elif packet.ptype == constants.SUBMIT_JOB_LOW_BG:
self.handleSubmitJobLowBg(packet)
elif packet.ptype == constants.SUBMIT_JOB_SCHED:
self.handleSubmitJobSched(packet)
elif packet.ptype == constants.SUBMIT_JOB_EPOCH:
self.handleSubmitJobEpoch(packet)
elif packet.ptype == constants.GRAB_JOB_UNIQ:
self.handleGrabJobUniq(packet)
elif packet.ptype == constants.GRAB_JOB:
self.handleGrabJob(packet)
elif packet.ptype == constants.PRE_SLEEP:
self.handlePreSleep(packet)
elif packet.ptype == constants.SET_CLIENT_ID:
self.handleSetClientID(packet)
elif packet.ptype == constants.CAN_DO:
self.handleCanDo(packet)
elif packet.ptype == constants.CAN_DO_TIMEOUT:
self.handleCanDoTimeout(packet)
elif packet.ptype == constants.CANT_DO:
self.handleCantDo(packet)
elif packet.ptype == constants.RESET_ABILITIES:
self.handleResetAbilities(packet)
elif packet.ptype == constants.ECHO_REQ:
self.handleEchoReq(packet)
elif packet.ptype == constants.ECHO_RES:
self.handleEchoRes(packet)
elif packet.ptype == constants.ERROR:
self.handleError(packet)
elif packet.ptype == constants.ALL_YOURS:
self.handleAllYours(packet)
elif packet.ptype == constants.OPTION_REQ:
self.handleOptionReq(packet)
elif packet.ptype == constants.OPTION_RES:
self.handleOptionRes(packet)
else:
self.log.error("Received unknown packet: %s" % packet)
end = time.time()
self.reportTimingStats(packet.ptype, end - start)
def reportTimingStats(self, ptype, duration):
pass
def _defaultPacketHandler(self, packet):
self.log.error("Received unhandled packet: %s" % packet)
def handleJobCreated(self, packet):
return self._defaultPacketHandler(packet)
def handleWorkComplete(self, packet):
return self._defaultPacketHandler(packet)
def handleWorkFail(self, packet):
return self._defaultPacketHandler(packet)
def handleWorkException(self, packet):
return self._defaultPacketHandler(packet)
def handleWorkData(self, packet):
return self._defaultPacketHandler(packet)
def handleWorkWarning(self, packet):
return self._defaultPacketHandler(packet)
def handleWorkStatus(self, packet):
return self._defaultPacketHandler(packet)
def handleStatusRes(self, packet):
return self._defaultPacketHandler(packet)
def handleGetStatus(self, packet):
return self._defaultPacketHandler(packet)
def handleJobAssignUnique(self, packet):
return self._defaultPacketHandler(packet)
def handleJobAssign(self, packet):
return self._defaultPacketHandler(packet)
def handleNoJob(self, packet):
return self._defaultPacketHandler(packet)
def handleNoop(self, packet):
return self._defaultPacketHandler(packet)
def handleSubmitJob(self, packet):
return self._defaultPacketHandler(packet)
def handleSubmitJobBg(self, packet):
return self._defaultPacketHandler(packet)
def handleSubmitJobHigh(self, packet):
return self._defaultPacketHandler(packet)
def handleSubmitJobHighBg(self, packet):
return self._defaultPacketHandler(packet)
def handleSubmitJobLow(self, packet):
return self._defaultPacketHandler(packet)
def handleSubmitJobLowBg(self, packet):
return self._defaultPacketHandler(packet)
def handleSubmitJobSched(self, packet):
return self._defaultPacketHandler(packet)
def handleSubmitJobEpoch(self, packet):
return self._defaultPacketHandler(packet)
def handleGrabJobUniq(self, packet):
return self._defaultPacketHandler(packet)
def handleGrabJob(self, packet):
return self._defaultPacketHandler(packet)
def handlePreSleep(self, packet):
return self._defaultPacketHandler(packet)
def handleSetClientID(self, packet):
return self._defaultPacketHandler(packet)
def handleCanDo(self, packet):
return self._defaultPacketHandler(packet)
def handleCanDoTimeout(self, packet):
return self._defaultPacketHandler(packet)
def handleCantDo(self, packet):
return self._defaultPacketHandler(packet)
def handleResetAbilities(self, packet):
return self._defaultPacketHandler(packet)
def handleEchoReq(self, packet):
return self._defaultPacketHandler(packet)
def handleEchoRes(self, packet):
return self._defaultPacketHandler(packet)
def handleError(self, packet):
return self._defaultPacketHandler(packet)
def handleAllYours(self, packet):
return self._defaultPacketHandler(packet)
def handleOptionReq(self, packet):
return self._defaultPacketHandler(packet)
def handleOptionRes(self, packet):
return self._defaultPacketHandler(packet)
def handleAdminRequest(self, request):
self.log.info("Received admin data %s" % request)
request.setComplete()
def shutdown(self):
if self.running:
self.log.debug("Beginning shutdown")
self._shutdown()
self.log.debug("Beginning cleanup")
self._cleanup()
self.log.debug("Finished shutdown")
else:
self.log.warning("Shutdown called when not currently running. "
"Ignoring.")
def _shutdown(self):
# The first part of the shutdown process where all threads
# are told to exit.
self.running = False
self.connections_condition.acquire()
try:
self.connections_condition.notifyAll()
os.write(self.wake_write, b'1\n')
finally:
self.connections_condition.release()
def _cleanup(self):
# The second part of the shutdown process where we wait for all
# threads to exit and then clean up.
self.poll_thread.join()
self.connect_thread.join()
for connection in self.active_connections:
connection.disconnect()
self.active_connections = []
self.inactive_connections = []
os.close(self.wake_read)
os.close(self.wake_write)
class BaseClient(BaseClientServer):
def __init__(self, client_id='unknown'):
super(BaseClient, self).__init__(client_id)
self.log = logging.getLogger("gear.BaseClient.%s" % (self.client_id,))
# A lock to use when sending packets that set the state across
# all known connections. Note that it doesn't necessarily need
self.broadcast_lock = threading.RLock()
def addServer(self, host, port=4730,
ssl_key=None, ssl_cert=None, ssl_ca=None,
keepalive=False, tcp_keepidle=7200, tcp_keepintvl=75,
tcp_keepcnt=9):
self.log.debug("Adding server %s port %s" % (host, port))
self.connections_condition.acquire()
try:
for conn in self.active_connections + self.inactive_connections:
if conn.host == host and conn.port == port:
raise ConfigurationError("Host/port already specified")
conn = Connection(host, port, ssl_key, ssl_cert, ssl_ca,
self.client_id, keepalive, tcp_keepidle,
tcp_keepintvl, tcp_keepcnt)
self.inactive_connections.append(conn)
self.connections_condition.notifyAll()
finally:
self.connections_condition.release()
def _checkTimeout(self, start_time, timeout):
if time.time() - start_time > timeout:
raise TimeoutError()
def waitForServer(self, timeout=None):
connected = False
start_time = time.time()
while self.running:
self.connections_condition.acquire()
while self.running and not self.active_connections:
if timeout is not None:
self._checkTimeout(start_time, timeout)
self.log.debug("Waiting for at least one active connection")
self.connections_condition.wait(timeout=1)
if self.active_connections:
self.log.debug("Active connection found")
connected = True
self.connections_condition.release()
if connected:
return
def getConnection(self):
conn = None
try:
self.connections_condition.acquire()
if not self.active_connections:
raise NoConnectedServersError("No connected Gearman servers")
self.connection_index += 1
if self.connection_index >= len(self.active_connections):
self.connection_index = 0
conn = self.active_connections[self.connection_index]
finally:
self.connections_condition.release()
return conn
def broadcast(self, packet):
connections = self.active_connections[:]
for connection in connections:
try:
self.sendPacket(packet, connection)
except Exception:
pass
def sendPacket(self, packet, connection):
try:
connection.sendPacket(packet)
return
except Exception:
self.log.exception("Exception while sending packet %s to %s" %
(packet, connection))
self._lostConnection(connection)
raise
def handleEchoRes(self, packet):
packet.connection.handleEchoRes(packet.getArgument(0, True))
def handleError(self, packet):
self.log.error("Received ERROR packet: %s: %s" %
(packet.getArgument(0),
packet.getArgument(1)))
try:
task = packet.connection.pending_tasks.pop(0)
task.setComplete()
except Exception:
self.log.exception("Exception while handling error packet:")
self._lostConnection(packet.connection)
class Client(BaseClient):
def __init__(self, client_id='unknown'):
super(Client, self).__init__(client_id)
self.log = logging.getLogger("gear.Client.%s" % (self.client_id,))
self.options = set()
def __repr__(self):
return '<gear.Client 0x%x>' % id(self)
def _onConnect(self, conn):
# Called immediately after a successful (re-)connection
self.broadcast_lock.acquire()
try:
super(Client, self)._onConnect(conn)
for name in self.options:
self._setOptionConnection(name, conn)
finally:
self.broadcast_lock.release()
def _setOptionConnection(self, name, conn):
# Set an option on a connection
packet = Packet(constants.REQ, constants.OPTION_REQ, name)
task = OptionReqTask()
try:
conn.pending_tasks.append(task)
self.sendPacket(packet, conn)
except Exception:
# Error handling is all done by sendPacket
task = None
return task
def setOption(self, name, timeout=30):
tasks = {}
name = convert_to_bytes(name)
self.broadcast_lock.acquire()
try:
self.options.add(name)
connections = self.active_connections[:]
for connection in connections:
task = self._setOptionConnection(name, connection)
if task:
tasks[task] = connection
finally:
self.broadcast_lock.release()
success = True
for task in tasks.keys():
complete = task.wait(timeout)
conn = tasks[task]
if not complete:
self.log.error("Connection %s timed out waiting for a "
"response to an option request: %s" %
(conn, name))
self._lostConnection(conn)
continue
if name not in conn.options:
success = False
return success
def submitJob(self, job, background=False, precedence=PRECEDENCE_NORMAL,
timeout=30):
if job.unique is None:
unique = b''
else:
unique = job.binary_unique
data = b'\x00'.join((job.binary_name, unique, job.binary_arguments))
if background:
if precedence == PRECEDENCE_NORMAL:
cmd = constants.SUBMIT_JOB_BG
elif precedence == PRECEDENCE_LOW:
cmd = constants.SUBMIT_JOB_LOW_BG
elif precedence == PRECEDENCE_HIGH:
cmd = constants.SUBMIT_JOB_HIGH_BG
else:
raise ConfigurationError("Invalid precedence value")
else:
if precedence == PRECEDENCE_NORMAL:
cmd = constants.SUBMIT_JOB
elif precedence == PRECEDENCE_LOW:
cmd = constants.SUBMIT_JOB_LOW
elif precedence == PRECEDENCE_HIGH:
cmd = constants.SUBMIT_JOB_HIGH
else:
raise ConfigurationError("Invalid precedence value")
packet = Packet(constants.REQ, cmd, data)
attempted_connections = set()
while True:
if attempted_connections == set(self.active_connections):
break
conn = self.getConnection()
task = SubmitJobTask(job)
conn.pending_tasks.append(task)
attempted_connections.add(conn)
try:
self.sendPacket(packet, conn)
except Exception:
# Error handling is all done by sendPacket
continue
complete = task.wait(timeout)
if not complete:
self.log.error("Connection %s timed out waiting for a "
"response to a submit job request: %s" %
(conn, job))
self._lostConnection(conn)
continue
if not job.handle:
self.log.error("Connection %s sent an error in "
"response to a submit job request: %s" %
(conn, job))
continue
job.connection = conn
return
raise GearmanError("Unable to submit job to any connected servers")
def handleJobCreated(self, packet):
task = packet.connection.pending_tasks.pop(0)
if not isinstance(task, SubmitJobTask):
msg = ("Unexpected response received to submit job "
"request: %s" % packet)
self.log.error(msg)
self._lostConnection(packet.connection)
raise GearmanError(msg)
job = task.job
job.handle = packet.data
packet.connection.related_jobs[job.handle] = job
task.setComplete()
self.log.debug("Job created; %s" % job)
return job
def handleWorkComplete(self, packet):
job = packet.getJob()
data = packet.getArgument(1, True)
if data:
job.data.append(data)
job.complete = True
job.failure = False
del packet.connection.related_jobs[job.handle]
self.log.debug("Job complete; %s data: %s" %
(job, job.data))
return job
def handleWorkFail(self, packet):
job = packet.getJob()
job.complete = True
job.failure = True
del packet.connection.related_jobs[job.handle]
self.log.debug("Job failed; %s" % job)
return job
def handleWorkException(self, packet):
job = packet.getJob()
job.exception = packet.getArgument(1, True)
job.complete = True
job.failure = True
del packet.connection.related_jobs[job.handle]
self.log.debug("Job exception; %s exception: %s" %
(job, job.exception))
return job
def handleWorkData(self, packet):
job = packet.getJob()
data = packet.getArgument(1, True)
if data:
job.data.append(data)
self.log.debug("Job data; job: %s data: %s" %
(job, job.data))
return job
def handleWorkWarning(self, packet):
job = packet.getJob()
data = packet.getArgument(1, True)
if data:
job.data.append(data)
job.warning = True
self.log.debug("Job warning; %s data: %s" %
(job, job.data))
return job
def handleWorkStatus(self, packet):
job = packet.getJob()
job.numerator = packet.getArgument(1)
job.denominator = packet.getArgument(2)
try:
job.fraction_complete = (float(job.numerator) /
float(job.denominator))
except Exception:
job.fraction_complete = None
self.log.debug("Job status; %s complete: %s/%s" %
(job, job.numerator, job.denominator))
return job
def handleStatusRes(self, packet):
job = packet.getJob()
job.known = (packet.getArgument(1) == b'1')
job.running = (packet.getArgument(2) == b'1')
job.numerator = packet.getArgument(3)
job.denominator = packet.getArgument(4)
try:
job.fraction_complete = (float(job.numerator) /
float(job.denominator))
except Exception:
job.fraction_complete = None
return job
def handleOptionRes(self, packet):
task = packet.connection.pending_tasks.pop(0)
if not isinstance(task, OptionReqTask):
msg = ("Unexpected response received to option "
"request: %s" % packet)
self.log.error(msg)
self._lostConnection(packet.connection)
raise GearmanError(msg)
packet.connection.handleOptionRes(packet.getArgument(0))
task.setComplete()
def handleDisconnect(self, job):
return job
class FunctionRecord(object):
def __init__(self, name, timeout=None):
self.name = name
self.timeout = timeout
def __repr__(self):
return '<gear.FunctionRecord 0x%x name: %s timeout: %s>' % (
id(self), self.name, self.timeout)
class BaseJob(object):
def __init__(self, name, arguments, unique=None, handle=None):
self._name = convert_to_bytes(name)
self._validate_arguments(arguments)
self._arguments = convert_to_bytes(arguments)
self._unique = convert_to_bytes(unique)
self.handle = handle
self.connection = None
def _validate_arguments(self, arguments):
if (not isinstance(arguments, bytes) and
not isinstance(arguments, bytearray)):
raise TypeError("arguments must be of type bytes or bytearray")
@property
def arguments(self):
return self._arguments
@arguments.setter
def arguments(self, value):
self._arguments = value
@property
def unique(self):
return self._unique
@unique.setter
def unique(self, value):
self._unique = value
@property
def name(self):
if isinstance(self._name, six.binary_type):
return self._name.decode('utf-8')
return self._name
@name.setter
def name(self, value):
if isinstance(value, six.text_type):
value = value.encode('utf-8')
self._name = value
@property
def binary_name(self):
return self._name
@property
def binary_arguments(self):
return self._arguments
@property
def binary_unique(self):
return self._unique
def __repr__(self):
return '<gear.Job 0x%x handle: %s name: %s unique: %s>' % (
id(self), self.handle, self.name, self.unique)
class WorkerJob(BaseJob):
def __init__(self, handle, name, arguments, unique=None):
super(WorkerJob, self).__init__(name, arguments, unique, handle)
def sendWorkData(self, data=b''):
data = self.handle + b'\x00' + data
p = Packet(constants.REQ, constants.WORK_DATA, data)
self.connection.sendPacket(p)
def sendWorkWarning(self, data=b''):
data = self.handle + b'\x00' + data
p = Packet(constants.REQ, constants.WORK_WARNING, data)
self.connection.sendPacket(p)
def sendWorkStatus(self, numerator, denominator):
data = (self.handle + b'\x00' +
str(numerator).encode('utf8') + b'\x00' +
str(denominator).encode('utf8'))
p = Packet(constants.REQ, constants.WORK_STATUS, data)
self.connection.sendPacket(p)
def sendWorkComplete(self, data=b''):
data = self.handle + b'\x00' + data
p = Packet(constants.REQ, constants.WORK_COMPLETE, data)
self.connection.sendPacket(p)
def sendWorkFail(self):
p = Packet(constants.REQ, constants.WORK_FAIL, self.handle)
self.connection.sendPacket(p)
def sendWorkException(self, data=b''):
data = self.handle + b'\x00' + data
p = Packet(constants.REQ, constants.WORK_EXCEPTION, data)
self.connection.sendPacket(p)
class Worker(BaseClient):
job_class = WorkerJob
def __init__(self, client_id=None, worker_id=None):
if not client_id or worker_id:
raise Exception("A client_id must be provided")
if worker_id:
client_id = worker_id
super(Worker, self).__init__(client_id)
self.log = logging.getLogger("gear.Worker.%s" % (self.client_id,))
self.worker_id = client_id
self.functions = {}
self.job_lock = threading.Lock()
self.waiting_for_jobs = 0
self.job_queue = queue_mod.Queue()
def __repr__(self):
return '<gear.Worker 0x%x>' % id(self)
def registerFunction(self, name, timeout=None):
name = convert_to_bytes(name)
self.functions[name] = FunctionRecord(name, timeout)
if timeout:
self._sendCanDoTimeout(name, timeout)
else:
self._sendCanDo(name)
connections = self.active_connections[:]
for connection in connections:
if connection.state == "SLEEP":
connection.changeState("IDLE")
self._updateStateMachines()
def unRegisterFunction(self, name):
name = convert_to_bytes(name)
del self.functions[name]
self._sendCantDo(name)
def setFunctions(self, functions):
self._sendResetAbilities()
self.functions = {}
for f in functions:
if not isinstance(f, FunctionRecord):
raise InvalidDataError(
"An iterable of FunctionRecords is required.")
self.functions[f.name] = f
for f in self.functions.values():
if f.timeout:
self._sendCanDoTimeout(f.name, f.timeout)
else:
self._sendCanDo(f.name)
def _sendCanDo(self, name):
self.broadcast_lock.acquire()
try:
p = Packet(constants.REQ, constants.CAN_DO, name)
self.broadcast(p)
finally:
self.broadcast_lock.release()
def _sendCanDoTimeout(self, name, timeout):
self.broadcast_lock.acquire()
try:
data = name + b'\x00' + timeout
p = Packet(constants.REQ, constants.CAN_DO_TIMEOUT, data)
self.broadcast(p)
finally:
self.broadcast_lock.release()
def _sendCantDo(self, name):
self.broadcast_lock.acquire()
try:
p = Packet(constants.REQ, constants.CANT_DO, name)
self.broadcast(p)
finally:
self.broadcast_lock.release()
def _sendResetAbilities(self):
self.broadcast_lock.acquire()
try:
p = Packet(constants.REQ, constants.RESET_ABILITIES, b'')
self.broadcast(p)
finally:
self.broadcast_lock.release()
def _sendPreSleep(self, connection):
p = Packet(constants.REQ, constants.PRE_SLEEP, b'')
self.sendPacket(p, connection)
def _sendGrabJobUniq(self, connection=None):
p = Packet(constants.REQ, constants.GRAB_JOB_UNIQ, b'')
if connection:
self.sendPacket(p, connection)
else:
self.broadcast(p)
def _onConnect(self, conn):
self.broadcast_lock.acquire()
try:
# Called immediately after a successful (re-)connection
p = Packet(constants.REQ, constants.SET_CLIENT_ID, self.client_id)
conn.sendPacket(p)
super(Worker, self)._onConnect(conn)
for f in self.functions.values():
if f.timeout:
data = f.name + b'\x00' + f.timeout
p = Packet(constants.REQ, constants.CAN_DO_TIMEOUT, data)
else:
p = Packet(constants.REQ, constants.CAN_DO, f.name)
conn.sendPacket(p)
conn.changeState("IDLE")
finally:
self.broadcast_lock.release()
# Any exceptions will be handled by the calling function, and the
# connection will not be put into the pool.
def _onActiveConnection(self, conn):
self.job_lock.acquire()
try:
if self.waiting_for_jobs > 0:
self._updateStateMachines()
finally:
self.job_lock.release()
def _updateStateMachines(self):
connections = self.active_connections[:]
for connection in connections:
if (connection.state == "IDLE" and self.waiting_for_jobs > 0):
self._sendGrabJobUniq(connection)
connection.changeState("GRAB_WAIT")
if (connection.state != "IDLE" and self.waiting_for_jobs < 1):
connection.changeState("IDLE")
def getJob(self):
self.job_lock.acquire()
try:
# self.running gets cleared during _shutdown(), before the
# stopWaitingForJobs() is called. This check has to
# happen with the job_lock held, otherwise there would be
# a window for race conditions between manipulation of
# "running" and "waiting_for_jobs".
if not self.running:
raise InterruptedError()
self.waiting_for_jobs += 1
self.log.debug("Get job; number of threads waiting for jobs: %s" %
self.waiting_for_jobs)
try:
job = self.job_queue.get(False)
except queue_mod.Empty:
job = None
if not job:
self._updateStateMachines()
finally:
self.job_lock.release()
if not job:
job = self.job_queue.get()
self.log.debug("Received job: %s" % job)
if job is None:
raise InterruptedError()
return job
def stopWaitingForJobs(self):
self.job_lock.acquire()
try:
while True:
connections = self.active_connections[:]
now = time.time()
ok = True
for connection in connections:
if connection.state == "GRAB_WAIT":
# Replies to GRAB_JOB should be fast, give up if we've
if now - connection.state_time > 5:
self._lostConnection(connection)
else:
ok = False
if ok:
break
else:
self.job_lock.release()
time.sleep(0.1)
self.job_lock.acquire()
while self.waiting_for_jobs > 0:
self.waiting_for_jobs -= 1
self.job_queue.put(None)
self._updateStateMachines()
finally:
self.job_lock.release()
def _shutdown(self):
self.job_lock.acquire()
try:
# being held.
super(Worker, self)._shutdown()
finally:
self.job_lock.release()
self.stopWaitingForJobs()
def handleNoop(self, packet):
self.job_lock.acquire()
try:
if packet.connection.state == "SLEEP":
self.log.debug("Sending GRAB_JOB_UNIQ")
self._sendGrabJobUniq(packet.connection)
packet.connection.changeState("GRAB_WAIT")
else:
self.log.debug("Received unexpecetd NOOP packet on %s" %
packet.connection)
finally:
self.job_lock.release()
def handleNoJob(self, packet):
self.job_lock.acquire()
try:
if packet.connection.state == "GRAB_WAIT":
self.log.debug("Sending PRE_SLEEP")
self._sendPreSleep(packet.connection)
packet.connection.changeState("SLEEP")
else:
self.log.debug("Received unexpected NO_JOB packet on %s" %
packet.connection)
finally:
self.job_lock.release()
def handleJobAssign(self, packet):
handle = packet.getArgument(0)
name = packet.getArgument(1)
arguments = packet.getArgument(2, True)
return self._handleJobAssignment(packet, handle, name,
arguments, None)
def handleJobAssignUnique(self, packet):
handle = packet.getArgument(0)
name = packet.getArgument(1)
unique = packet.getArgument(2)
if unique == b'':
unique = None
arguments = packet.getArgument(3, True)
return self._handleJobAssignment(packet, handle, name,
arguments, unique)
def _handleJobAssignment(self, packet, handle, name, arguments, unique):
job = self.job_class(handle, name, arguments, unique)
job.connection = packet.connection
self.job_lock.acquire()
try:
packet.connection.changeState("IDLE")
self.waiting_for_jobs -= 1
self.log.debug("Job assigned; number of threads waiting for "
"jobs: %s" % self.waiting_for_jobs)
self.job_queue.put(job)
self._updateStateMachines()
finally:
self.job_lock.release()
class Job(BaseJob):
data_type = list
def __init__(self, name, arguments, unique=None):
super(Job, self).__init__(name, arguments, unique)
self._data = self.data_type()
self._exception = None
self.warning = False
self.complete = False
self.failure = False
self.numerator = None
self.denominator = None
self.fraction_complete = None
self.known = None
self.running = None
@property
def binary_data(self):
for value in self._data:
if isinstance(value, six.text_type):
value = value.encode('utf-8')
yield value
@property
def data(self):
return self._data
@data.setter
def data(self, value):
if not isinstance(value, self.data_type):
raise ValueError(
"data attribute must be {}".format(self.data_type))
self._data = value
@property
def exception(self):
return self._exception
@exception.setter
def exception(self, value):
self._exception = value
class TextJobArguments(object):
def _validate_arguments(self, arguments):
pass
@property
def arguments(self):
args = self._arguments
if isinstance(args, six.binary_type):
return args.decode('utf-8')
return args
@arguments.setter
def arguments(self, value):
if not isinstance(value, six.binary_type):
value = value.encode('utf-8')
self._arguments = value
class TextJobUnique(object):
@property
def unique(self):
unique = self._unique
if isinstance(unique, six.binary_type):
return unique.decode('utf-8')
return unique
@unique.setter
def unique(self, value):
if not isinstance(value, six.binary_type):
value = value.encode('utf-8')
self._unique = value
class TextList(list):
def append(self, x):
if isinstance(x, six.binary_type):
x = x.decode('utf-8')
super(TextList, self).append(x)
def extend(self, iterable):
def _iter():
for value in iterable:
if isinstance(value, six.binary_type):
yield value.decode('utf-8')
else:
yield value
super(TextList, self).extend(_iter)
def insert(self, i, x):
if isinstance(x, six.binary_type):
x = x.decode('utf-8')
super(TextList, self).insert(i, x)
class TextJob(TextJobArguments, TextJobUnique, Job):
data_type = TextList
@property
def exception(self):
exception = self._exception
if isinstance(exception, six.binary_type):
return exception.decode('utf-8')
return exception
@exception.setter
def exception(self, value):
if not isinstance(value, six.binary_type):
value = value.encode('utf-8')
self._exception = value
class TextWorkerJob(TextJobArguments, TextJobUnique, WorkerJob):
def sendWorkData(self, data=''):
if isinstance(data, six.text_type):
data = data.encode('utf8')
return super(TextWorkerJob, self).sendWorkData(data)
def sendWorkWarning(self, data=''):
if isinstance(data, six.text_type):
data = data.encode('utf8')
return super(TextWorkerJob, self).sendWorkWarning(data)
def sendWorkComplete(self, data=''):
if isinstance(data, six.text_type):
data = data.encode('utf8')
return super(TextWorkerJob, self).sendWorkComplete(data)
def sendWorkException(self, data=''):
if isinstance(data, six.text_type):
data = data.encode('utf8')
return super(TextWorkerJob, self).sendWorkException(data)
class TextWorker(Worker):
job_class = TextWorkerJob
class BaseBinaryJob(object):
@property
def name(self):
return self._name
class BinaryWorkerJob(BaseBinaryJob, WorkerJob):
pass
class BinaryJob(BaseBinaryJob, Job):
pass
# Below are classes for use in the server implementation:
class ServerJob(BinaryJob):
def __init__(self, handle, name, arguments, client_connection,
unique=None):
super(ServerJob, self).__init__(name, arguments, unique)
self.handle = handle
self.client_connection = client_connection
self.worker_connection = None
del self.connection
class ServerAdminRequest(AdminRequest):
def __init__(self, connection):
super(ServerAdminRequest, self).__init__()
self.connection = connection
def isComplete(self, data):
end_index_newline = data.find(b'\n')
if end_index_newline != -1:
self.command = data[:end_index_newline]
# Remove newline from data
x = end_index_newline + 1
return (True, data[x:])
else:
return (False, None)
class NonBlockingConnection(Connection):
def __init__(self, host, port, ssl_key=None, ssl_cert=None,
ssl_ca=None, client_id='unknown'):
super(NonBlockingConnection, self).__init__(
host, port, ssl_key,
ssl_cert, ssl_ca, client_id)
self.send_queue = []
def connect(self):
super(NonBlockingConnection, self).connect()
if self.connected and self.conn:
self.conn.setblocking(0)
def _readRawBytes(self, bytes_to_read):
try:
buff = self.conn.recv(bytes_to_read)
except ssl.SSLError as e:
if e.errno == ssl.SSL_ERROR_WANT_READ:
raise RetryIOError()
elif e.errno == ssl.SSL_ERROR_WANT_WRITE:
raise RetryIOError()
raise
except socket.error as e:
if e.errno == errno.EAGAIN:
# Read operation would block, we're done until
raise RetryIOError()
raise
return buff
def sendPacket(self, packet):
self.log.debug("Queuing packet to %s: %s" % (self, packet))
self.send_queue.append(packet.toBinary())
self.sendQueuedData()
def sendRaw(self, data):
self.log.debug("Queuing data to %s: %s" % (self, data))
self.send_queue.append(data)
self.sendQueuedData()
def sendQueuedData(self):
try:
while len(self.send_queue):
data = self.send_queue.pop(0)
r = 0
try:
r = self.conn.send(data)
except ssl.SSLError as e:
if e.errno == ssl.SSL_ERROR_WANT_READ:
raise RetryIOError()
elif e.errno == ssl.SSL_ERROR_WANT_WRITE:
raise RetryIOError()
else:
raise
except socket.error as e:
if e.errno == errno.EAGAIN:
self.log.debug("Write operation on %s would block"
% self)
raise RetryIOError()
else:
raise
finally:
data = data[r:]
if data:
self.send_queue.insert(0, data)
except RetryIOError:
pass
class ServerConnection(NonBlockingConnection):
def __init__(self, addr, conn, use_ssl, client_id):
if client_id:
self.log = logging.getLogger("gear.ServerConnection.%s" %
(client_id,))
else:
self.log = logging.getLogger("gear.ServerConnection")
self.send_queue = []
self.admin_requests = []
self.host = addr[0]
self.port = addr[1]
self.conn = conn
self.conn.setblocking(0)
self.input_buffer = b''
self.need_bytes = False
self.use_ssl = use_ssl
self.client_id = None
self.functions = set()
self.related_jobs = {}
self.ssl_subject = None
if self.use_ssl:
for x in conn.getpeercert()['subject']:
if x[0][0] == 'commonName':
self.ssl_subject = x[0][1]
self.log.debug("SSL subject: %s" % self.ssl_subject)
self.changeState("INIT")
def _getAdminRequest(self):
return ServerAdminRequest(self)
def _putAdminRequest(self, req):
pass
def __repr__(self):
return '<gear.ServerConnection 0x%x name: %s host: %s port: %s>' % (
id(self), self.client_id, self.host, self.port)
class Server(BaseClientServer):
edge_bitmask = select.EPOLLET
error_bitmask = (select.EPOLLERR | select.EPOLLHUP | edge_bitmask)
read_bitmask = (select.EPOLLIN | error_bitmask)
readwrite_bitmask = (select.EPOLLOUT | read_bitmask)
def __init__(self, port=4730, ssl_key=None, ssl_cert=None, ssl_ca=None,
statsd_host=None, statsd_port=8125, statsd_prefix=None,
server_id=None, acl=None, host=None, keepalive=False,
tcp_keepidle=7200, tcp_keepintvl=75, tcp_keepcnt=9):
self.port = port
self.ssl_key = ssl_key
self.ssl_cert = ssl_cert
self.ssl_ca = ssl_ca
self.high_queue = []
self.normal_queue = []
self.low_queue = []
self.jobs = {}
self.running_jobs = 0
self.waiting_jobs = 0
self.total_jobs = 0
self.functions = set()
self.max_handle = 0
self.acl = acl
self.connect_wake_read, self.connect_wake_write = os.pipe()
self.poll = select.epoll()
self.connection_map = {}
self.use_ssl = False
if all([self.ssl_key, self.ssl_cert, self.ssl_ca]):
self.use_ssl = True
addrs = socket.getaddrinfo(host, self.port, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0,
socket.AI_PASSIVE |
socket.AI_ADDRCONFIG)
addrs.sort(key=lambda addr: addr[0], reverse=True)
for res in addrs:
af, socktype, proto, canonname, sa = res
try:
self.socket = socket.socket(af, socktype, proto)
self.socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
if keepalive and hasattr(socket, 'TCP_KEEPIDLE'):
self.socket.setsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE, 1)
self.socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE, tcp_keepidle)
self.socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPINTVL, tcp_keepintvl)
self.socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPCNT, tcp_keepcnt)
elif keepalive:
self.log.warning('Keepalive requested but not available '
'on this platform')
except socket.error:
self.socket = None
continue
try:
self.socket.bind(sa)
self.socket.listen(1)
except socket.error:
self.socket.close()
self.socket = None
continue
break
if self.socket is None:
raise Exception("Could not open socket")
if port == 0:
self.port = self.socket.getsockname()[1]
super(Server, self).__init__(server_id)
self.poll.register(self.wake_read, self.read_bitmask)
if server_id:
self.log = logging.getLogger("gear.Server.%s" % (self.client_id,))
else:
self.log = logging.getLogger("gear.Server")
if statsd_host:
if not statsd:
self.log.error("Unable to import statsd module")
self.statsd = None
else:
self.statsd = statsd.StatsClient(statsd_host,
statsd_port,
statsd_prefix)
else:
self.statsd = None
def _doConnectLoop(self):
while self.running:
try:
self.connectLoop()
except Exception:
self.log.exception("Exception in connect loop:")
time.sleep(1)
def connectLoop(self):
poll = select.poll()
bitmask = (select.POLLIN | select.POLLERR |
select.POLLHUP | select.POLLNVAL)
poll.register(self.connect_wake_read, bitmask)
poll.register(self.socket.fileno(), bitmask)
while self.running:
ret = poll.poll()
for fd, event in ret:
if fd == self.connect_wake_read:
self.log.debug("Accept woken by pipe")
while True:
if os.read(self.connect_wake_read, 1) == b'\n':
break
return
if event & select.POLLIN:
self.log.debug("Accepting new connection")
c, addr = self.socket.accept()
if self.use_ssl:
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_cert_chain(self.ssl_cert, self.ssl_key)
context.load_verify_locations(self.ssl_ca)
c = context.wrap_socket(c, server_side=True)
conn = ServerConnection(addr, c, self.use_ssl,
self.client_id)
self.log.info("Accepted connection %s" % (conn,))
self.connections_condition.acquire()
try:
self.active_connections.append(conn)
self._registerConnection(conn)
self.connections_condition.notifyAll()
finally:
self.connections_condition.release()
def readFromConnection(self, conn):
while True:
self.log.debug("Processing input on %s" % conn)
try:
p = conn.readPacket()
except RetryIOError:
# epoll flags this connection again
return
if p:
if isinstance(p, Packet):
self.handlePacket(p)
else:
self.handleAdminRequest(p)
else:
self.log.debug("Received no data on %s" % conn)
raise DisconnectError()
def writeToConnection(self, conn):
self.log.debug("Processing output on %s" % conn)
conn.sendQueuedData()
def _processPollEvent(self, conn, event):
# This should do whatever is necessary to process a connection
# that has triggered a poll event. It should generally not
# raise exceptions so as to avoid restarting the poll loop.
# The exception handlers here can raise exceptions and if they
# do, it's okay, the poll loop will be restarted.
try:
if event & (select.EPOLLERR | select.EPOLLHUP):
self.log.debug("Received error event on %s: %s" % (
conn, event))
raise DisconnectError()
if event & (select.POLLIN | select.POLLOUT):
self.readFromConnection(conn)
self.writeToConnection(conn)
except socket.error as e:
if e.errno == errno.ECONNRESET:
self.log.debug("Connection reset by peer: %s" % (conn,))
self._lostConnection(conn)
return
raise
except DisconnectError:
self._lostConnection(conn)
return
except Exception:
self.log.exception("Exception reading or writing "
"from %s:" % (conn,))
self._lostConnection(conn)
return
def _flushAllConnections(self):
connections = self.active_connections[:]
for conn in connections:
self._processPollEvent(conn, select.POLLIN | select.POLLOUT)
def _doPollLoop(self):
while self.running:
try:
self._pollLoop()
except Exception:
self.log.exception("Exception in poll loop:")
def _pollLoop(self):
self.log.debug("Preparing to poll")
self._flushAllConnections()
while self.running:
self.log.debug("Polling %s connections" %
len(self.active_connections))
ret = self.poll.poll()
# that every file descriptor in 'ret' is processed.
for fd, event in ret:
if fd == self.wake_read:
# This means we're exiting, so we can ignore the
self.log.debug("Woken by pipe")
while True:
if os.read(self.wake_read, 1) == b'\n':
break
return
conn = self.connection_map[fd]
self._processPollEvent(conn, event)
def _shutdown(self):
super(Server, self)._shutdown()
os.write(self.connect_wake_write, b'1\n')
def _cleanup(self):
super(Server, self)._cleanup()
self.socket.close()
os.close(self.connect_wake_read)
os.close(self.connect_wake_write)
def _registerConnection(self, conn):
self.log.debug("Registering %s" % conn)
self.connection_map[conn.conn.fileno()] = conn
self.poll.register(conn.conn.fileno(), self.readwrite_bitmask)
def _unregisterConnection(self, conn):
self.log.debug("Unregistering %s" % conn)
fd = conn.conn.fileno()
if fd not in self.connection_map:
return
try:
self.poll.unregister(fd)
except KeyError:
pass
try:
del self.connection_map[fd]
except KeyError:
pass
def _lostConnection(self, conn):
self.log.info("Marking %s as disconnected" % conn)
self.connections_condition.acquire()
self._unregisterConnection(conn)
try:
jobs = list(conn.related_jobs.values())
if conn in self.active_connections:
self.active_connections.remove(conn)
finally:
self.connections_condition.notifyAll()
self.connections_condition.release()
for job in jobs:
if job.worker_connection == conn:
try:
p = Packet(constants.REQ, constants.WORK_FAIL, job.handle)
if job.client_connection:
job.client_connection.sendPacket(p)
except Exception:
self.log.exception("Sending WORK_FAIL to client after "
"worker disconnect failed:")
self._removeJob(job)
try:
conn.conn.shutdown(socket.SHUT_RDWR)
except socket.error as e:
if e.errno != errno.ENOTCONN:
self.log.exception("Unable to shutdown socket "
"for connection %s" % (conn,))
except Exception:
self.log.exception("Unable to shutdown socket "
"for connection %s" % (conn,))
try:
conn.conn.close()
except Exception:
self.log.exception("Unable to close socket "
"for connection %s" % (conn,))
self._updateStats()
def _removeJob(self, job, dequeue=True):
if job.client_connection:
try:
del job.client_connection.related_jobs[job.handle]
except KeyError:
pass
if job.worker_connection:
try:
del job.worker_connection.related_jobs[job.handle]
except KeyError:
pass
try:
del self.jobs[job.handle]
except KeyError:
pass
if dequeue is True:
try:
self.high_queue.remove(job)
except ValueError:
pass
try:
self.normal_queue.remove(job)
except ValueError:
pass
try:
self.low_queue.remove(job)
except ValueError:
pass
elif dequeue is not False:
dequeue.remove(job)
self.total_jobs -= 1
if job.running:
self.running_jobs -= 1
else:
self.waiting_jobs -= 1
def getQueue(self):
ret = []
for queue in [self.high_queue, self.normal_queue, self.low_queue]:
ret += queue
return ret
def handleAdminRequest(self, request):
self.log.info("Received admin request %s" % (request,))
if request.command.startswith(b'cancel job'):
self.handleCancelJob(request)
elif request.command.startswith(b'status'):
self.handleStatus(request)
elif request.command.startswith(b'workers'):
self.handleWorkers(request)
elif request.command.startswith(b'acl list'):
self.handleACLList(request)
elif request.command.startswith(b'acl grant'):
self.handleACLGrant(request)
elif request.command.startswith(b'acl revoke'):
self.handleACLRevoke(request)
elif request.command.startswith(b'acl self-revoke'):
self.handleACLSelfRevoke(request)
self.log.debug("Finished handling admin request %s" % (request,))
def _cancelJob(self, request, job, queue):
if self.acl:
if not self.acl.canInvoke(request.connection.ssl_subject,
job.name):
self.log.info("Rejecting cancel job from %s for %s "
"due to ACL" %
(request.connection.ssl_subject, job.name))
request.connection.sendRaw(b'ERR PERMISSION_DENIED\n')
return
self._removeJob(job, dequeue=queue)
self._updateStats()
request.connection.sendRaw(b'OK\n')
return
def handleCancelJob(self, request):
words = request.command.split()
handle = words[2]
if handle in self.jobs:
for queue in [self.high_queue, self.normal_queue, self.low_queue]:
for job in queue:
if handle == job.handle:
return self._cancelJob(request, job, queue)
request.connection.sendRaw(b'ERR UNKNOWN_JOB\n')
def handleACLList(self, request):
if self.acl is None:
request.connection.sendRaw(b'ERR ACL_DISABLED\n')
return
for entry in self.acl.getEntries():
l = "%s\tregister=%s\tinvoke=%s\tgrant=%s\n" % (
entry.subject, entry.register, entry.invoke, entry.grant)
request.connection.sendRaw(l.encode('utf8'))
request.connection.sendRaw(b'.\n')
def handleACLGrant(self, request):
words = request.command.split(None, 4)
verb = words[2]
subject = words[3]
if self.acl is None:
request.connection.sendRaw(b'ERR ACL_DISABLED\n')
return
if not self.acl.canGrant(request.connection.ssl_subject):
request.connection.sendRaw(b'ERR PERMISSION_DENIED\n')
return
try:
if verb == 'invoke':
self.acl.grantInvoke(subject, words[4])
elif verb == 'register':
self.acl.grantRegister(subject, words[4])
elif verb == 'grant':
self.acl.grantGrant(subject)
else:
request.connection.sendRaw(b'ERR UNKNOWN_ACL_VERB\n')
return
except ACLError as e:
self.log.info("Error in grant command: %s" % (e.message,))
request.connection.sendRaw(b'ERR UNABLE %s\n' % (e.message,))
return
request.connection.sendRaw(b'OK\n')
def handleACLRevoke(self, request):
words = request.command.split()
verb = words[2]
subject = words[3]
if self.acl is None:
request.connection.sendRaw(b'ERR ACL_DISABLED\n')
return
if subject != request.connection.ssl_subject:
if not self.acl.canGrant(request.connection.ssl_subject):
request.connection.sendRaw(b'ERR PERMISSION_DENIED\n')
return
try:
if verb == 'invoke':
self.acl.revokeInvoke(subject)
elif verb == 'register':
self.acl.revokeRegister(subject)
elif verb == 'grant':
self.acl.revokeGrant(subject)
elif verb == 'all':
try:
self.acl.remove(subject)
except ACLError:
pass
else:
request.connection.sendRaw(b'ERR UNKNOWN_ACL_VERB\n')
return
except ACLError as e:
self.log.info("Error in revoke command: %s" % (e.message,))
request.connection.sendRaw(b'ERR UNABLE %s\n' % (e.message,))
return
request.connection.sendRaw(b'OK\n')
def handleACLSelfRevoke(self, request):
words = request.command.split()
verb = words[2]
if self.acl is None:
request.connection.sendRaw(b'ERR ACL_DISABLED\n')
return
subject = request.connection.ssl_subject
try:
if verb == 'invoke':
self.acl.revokeInvoke(subject)
elif verb == 'register':
self.acl.revokeRegister(subject)
elif verb == 'grant':
self.acl.revokeGrant(subject)
elif verb == 'all':
try:
self.acl.remove(subject)
except ACLError:
pass
else:
request.connection.sendRaw(b'ERR UNKNOWN_ACL_VERB\n')
return
except ACLError as e:
self.log.info("Error in self-revoke command: %s" % (e.message,))
request.connection.sendRaw(b'ERR UNABLE %s\n' % (e.message,))
return
request.connection.sendRaw(b'OK\n')
def _getFunctionStats(self):
functions = {}
for function in self.functions:
functions[function] = [0, 0, 0]
for job in self.jobs.values():
if job.name not in functions:
functions[job.name] = [0, 0, 0]
functions[job.name][0] += 1
if job.running:
functions[job.name][1] += 1
for connection in self.active_connections:
for function in connection.functions:
if function not in functions:
functions[function] = [0, 0, 0]
functions[function][2] += 1
return functions
def handleStatus(self, request):
functions = self._getFunctionStats()
for name, values in functions.items():
request.connection.sendRaw(
("%s\t%s\t%s\t%s\n" %
(name.decode('utf-8'), values[0], values[1],
values[2])).encode('utf8'))
request.connection.sendRaw(b'.\n')
def handleWorkers(self, request):
for connection in self.active_connections:
fd = connection.conn.fileno()
ip = connection.host
client_id = connection.client_id or b'-'
functions = b' '.join(connection.functions).decode('utf8')
request.connection.sendRaw(("%s %s %s : %s\n" %
(fd, ip, client_id.decode('utf8'),
functions))
.encode('utf8'))
request.connection.sendRaw(b'.\n')
def wakeConnection(self, connection):
p = Packet(constants.RES, constants.NOOP, b'')
if connection.state == 'SLEEP':
connection.changeState("AWAKE")
connection.sendPacket(p)
def wakeConnections(self, job=None):
p = Packet(constants.RES, constants.NOOP, b'')
for connection in self.active_connections:
if connection.state == 'SLEEP':
if ((job and job.name in connection.functions) or
(job is None)):
connection.changeState("AWAKE")
connection.sendPacket(p)
def reportTimingStats(self, ptype, duration):
if not self.statsd:
return
ptype = constants.types.get(ptype, 'UNKNOWN')
key = 'packet.%s' % ptype
self.statsd.timing(key, int(duration * 1000))
self.statsd.incr(key)
def _updateStats(self):
if not self.statsd:
return
self.statsd.gauge('queue.total', self.total_jobs)
self.statsd.gauge('queue.running', self.running_jobs)
self.statsd.gauge('queue.waiting', self.waiting_jobs)
def _handleSubmitJob(self, packet, precedence, background=False):
name = packet.getArgument(0)
unique = packet.getArgument(1)
if not unique:
unique = None
arguments = packet.getArgument(2, True)
if self.acl:
if not self.acl.canInvoke(packet.connection.ssl_subject, name):
self.log.info("Rejecting SUBMIT_JOB from %s for %s "
"due to ACL" %
(packet.connection.ssl_subject, name))
self.sendError(packet.connection, 0,
'Permission denied by ACL')
return
self.max_handle += 1
handle = ('H:%s:%s' % (packet.connection.host,
self.max_handle)).encode('utf8')
if not background:
conn = packet.connection
else:
conn = None
job = ServerJob(handle, name, arguments, conn, unique)
p = Packet(constants.RES, constants.JOB_CREATED, handle)
packet.connection.sendPacket(p)
self.jobs[handle] = job
self.total_jobs += 1
self.waiting_jobs += 1
if not background:
packet.connection.related_jobs[handle] = job
if precedence == PRECEDENCE_HIGH:
self.high_queue.append(job)
elif precedence == PRECEDENCE_NORMAL:
self.normal_queue.append(job)
elif precedence == PRECEDENCE_LOW:
self.low_queue.append(job)
self._updateStats()
self.wakeConnections(job)
def handleSubmitJob(self, packet):
return self._handleSubmitJob(packet, PRECEDENCE_NORMAL)
def handleSubmitJobHigh(self, packet):
return self._handleSubmitJob(packet, PRECEDENCE_HIGH)
def handleSubmitJobLow(self, packet):
return self._handleSubmitJob(packet, PRECEDENCE_LOW)
def handleSubmitJobBg(self, packet):
return self._handleSubmitJob(packet, PRECEDENCE_NORMAL,
background=True)
def handleSubmitJobHighBg(self, packet):
return self._handleSubmitJob(packet, PRECEDENCE_HIGH, background=True)
def handleSubmitJobLowBg(self, packet):
return self._handleSubmitJob(packet, PRECEDENCE_LOW, background=True)
def getJobForConnection(self, connection, peek=False):
for queue in [self.high_queue, self.normal_queue, self.low_queue]:
for job in queue:
if job.name in connection.functions:
if not peek:
queue.remove(job)
connection.related_jobs[job.handle] = job
job.worker_connection = connection
job.running = True
self.waiting_jobs -= 1
self.running_jobs += 1
self._updateStats()
return job
return None
def handleGrabJobUniq(self, packet):
job = self.getJobForConnection(packet.connection)
if job:
self.sendJobAssignUniq(packet.connection, job)
else:
self.sendNoJob(packet.connection)
def sendJobAssignUniq(self, connection, job):
unique = job.binary_unique
if not unique:
unique = b''
data = b'\x00'.join((job.handle, job.name, unique, job.arguments))
p = Packet(constants.RES, constants.JOB_ASSIGN_UNIQ, data)
connection.sendPacket(p)
def sendNoJob(self, connection):
p = Packet(constants.RES, constants.NO_JOB, b'')
connection.sendPacket(p)
def handlePreSleep(self, packet):
packet.connection.changeState("SLEEP")
if self.getJobForConnection(packet.connection, peek=True):
self.wakeConnection(packet.connection)
def handleWorkComplete(self, packet):
self.handlePassthrough(packet, True)
def handleWorkFail(self, packet):
self.handlePassthrough(packet, True)
def handleWorkException(self, packet):
self.handlePassthrough(packet, True)
def handleWorkData(self, packet):
self.handlePassthrough(packet)
def handleWorkWarning(self, packet):
self.handlePassthrough(packet)
def handleWorkStatus(self, packet):
handle = packet.getArgument(0)
job = self.jobs.get(handle)
if not job:
self.log.info("Received packet %s for unknown job" % (packet,))
return
job.numerator = packet.getArgument(1)
job.denominator = packet.getArgument(2)
self.handlePassthrough(packet)
def handlePassthrough(self, packet, finished=False):
handle = packet.getArgument(0)
job = self.jobs.get(handle)
if not job:
self.log.info("Received packet %s for unknown job" % (packet,))
return
packet.code = constants.RES
if job.client_connection:
job.client_connection.sendPacket(packet)
if finished:
self._removeJob(job, dequeue=False)
self._updateStats()
def handleSetClientID(self, packet):
name = packet.getArgument(0)
packet.connection.client_id = name
def sendError(self, connection, code, text):
data = (str(code).encode('utf8') + b'\x00' +
str(text).encode('utf8') + b'\x00')
p = Packet(constants.RES, constants.ERROR, data)
connection.sendPacket(p)
def handleCanDo(self, packet):
name = packet.getArgument(0)
if self.acl:
if not self.acl.canRegister(packet.connection.ssl_subject, name):
self.log.info("Ignoring CAN_DO from %s for %s due to ACL" %
(packet.connection.ssl_subject, name))
return
self.log.debug("Adding function %s to %s" % (name, packet.connection))
packet.connection.functions.add(name)
self.functions.add(name)
def handleCantDo(self, packet):
name = packet.getArgument(0)
self.log.debug("Removing function %s from %s" %
(name, packet.connection))
packet.connection.functions.remove(name)
def handleResetAbilities(self, packet):
self.log.debug("Resetting functions for %s" % packet.connection)
packet.connection.functions = set()
def handleGetStatus(self, packet):
handle = packet.getArgument(0)
self.log.debug("Getting status for %s" % handle)
known = 0
running = 0
numerator = b''
denominator = b''
job = self.jobs.get(handle)
if job:
known = 1
if job.running:
running = 1
numerator = job.numerator or b''
denominator = job.denominator or b''
data = (handle + b'\x00' +
str(known).encode('utf8') + b'\x00' +
str(running).encode('utf8') + b'\x00' +
numerator + b'\x00' +
denominator)
p = Packet(constants.RES, constants.STATUS_RES, data)
packet.connection.sendPacket(p)
| true | true |
f7fc7740848542dfa85c88d49022ec626b2b3b3f | 849 | py | Python | com/install/configTemp/MysqlConfigTemp.py | hao707822882/Bichon | 54092e69c9316ee592ee392dc85e1f7fd0c47b68 | [
"Apache-2.0"
] | null | null | null | com/install/configTemp/MysqlConfigTemp.py | hao707822882/Bichon | 54092e69c9316ee592ee392dc85e1f7fd0c47b68 | [
"Apache-2.0"
] | null | null | null | com/install/configTemp/MysqlConfigTemp.py | hao707822882/Bichon | 54092e69c9316ee592ee392dc85e1f7fd0c47b68 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
# --coding:utf-8--
# coding: utf-8
# ━━━━━━神兽出没━━━━━━
# ┏┓ ┏┓
# ┏┛┻━━━┛┻┓
# ┃ ┃
# ┃ ━ ┃
# ┃ ┳┛ ┗┳ ┃
# ┃ ┃
# ┃ ┻ ┃
# ┃ ┃
# ┗━┓ ┏━┛
# ┃ ┃神兽保佑, 永无BUG!
# ┃ ┃Code is far away from bug with the animal protecting
# ┃ ┗━━━┓
# ┃ ┣┓
# ┃ ┏┛
# ┗┓┓┏━┳┓┏┛
# ┃┫┫ ┃┫┫
# ┗┻┛ ┗┻┛
# ━━━━━━感觉萌萌哒━━━━━━
# Module Desc:clover
# User: z.mm | 2428922347@qq.com
# Date: 2016/1/5
# Time: 17:33
__author__ = 'Administrator'
class MysqlConfigTemp(object):
temp = '''
[mysqld]
character-set-server=utf8
default-character-set=utf8
datadir=/var/lib/mysql
socket=/var/lib/mysql/mysql.sock
user=mysql
symbolic-links=0
[mysqld_safe]
character-set-server=utf8
log-error=/var/log/mysqld.log
pid-file=/var/run/mysqld/mysqld.pid
'''
| 18.06383 | 64 | 0.479388 |
__author__ = 'Administrator'
class MysqlConfigTemp(object):
temp = '''
[mysqld]
character-set-server=utf8
default-character-set=utf8
datadir=/var/lib/mysql
socket=/var/lib/mysql/mysql.sock
user=mysql
symbolic-links=0
[mysqld_safe]
character-set-server=utf8
log-error=/var/log/mysqld.log
pid-file=/var/run/mysqld/mysqld.pid
'''
| true | true |
f7fc77711d138e48524e60ec2c8ebc732c2f81ad | 1,305 | py | Python | tcp_echo_client.py | jlim262/py-socket-programming | 87f5b655f9275fc75d19a999a0cd399e8bb48cab | [
"MIT"
] | null | null | null | tcp_echo_client.py | jlim262/py-socket-programming | 87f5b655f9275fc75d19a999a0cd399e8bb48cab | [
"MIT"
] | null | null | null | tcp_echo_client.py | jlim262/py-socket-programming | 87f5b655f9275fc75d19a999a0cd399e8bb48cab | [
"MIT"
] | null | null | null | import socket
import sys
import argparse
host = 'localhost'
def echo_client(port):
""" A simple echo client """
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the server
print(f"Connecting to {host} port {port}")
sock.connect((host, port))
# Send data
try:
# Send data
message = "Test message. This will be echoed"
print(f"Sending {message}")
sock.sendall(message.encode('utf-8'))
# Look for the response
amount_received = 0
amount_expected = len(message)
while amount_received < amount_expected:
data = sock.recv(16)
amount_received += len(data)
print(f"Received: {data.decode()}")
except socket.error as e:
print(f"Socket error: {str(e)}")
except Exception as e:
print(f"Other exception: {str(e)}")
finally:
print("Closing connection to the server")
sock.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Socket Server Example')
parser.add_argument('--port', action="store",
dest="port", type=int, required=True)
given_args = parser.parse_args()
port = given_args.port
echo_client(port)
| 29 | 73 | 0.616858 | import socket
import sys
import argparse
host = 'localhost'
def echo_client(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print(f"Connecting to {host} port {port}")
sock.connect((host, port))
try:
message = "Test message. This will be echoed"
print(f"Sending {message}")
sock.sendall(message.encode('utf-8'))
amount_received = 0
amount_expected = len(message)
while amount_received < amount_expected:
data = sock.recv(16)
amount_received += len(data)
print(f"Received: {data.decode()}")
except socket.error as e:
print(f"Socket error: {str(e)}")
except Exception as e:
print(f"Other exception: {str(e)}")
finally:
print("Closing connection to the server")
sock.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Socket Server Example')
parser.add_argument('--port', action="store",
dest="port", type=int, required=True)
given_args = parser.parse_args()
port = given_args.port
echo_client(port)
| true | true |
f7fc77bfa401cc402649eb5cd805955a38db3e6e | 492 | py | Python | Python/LPTHW/ex39.py | vanonselenp/Learning | 477239defa81a1d4435c72e1cde151ac32160e39 | [
"MIT"
] | null | null | null | Python/LPTHW/ex39.py | vanonselenp/Learning | 477239defa81a1d4435c72e1cde151ac32160e39 | [
"MIT"
] | null | null | null | Python/LPTHW/ex39.py | vanonselenp/Learning | 477239defa81a1d4435c72e1cde151ac32160e39 | [
"MIT"
] | null | null | null | person = {'name': 'Peter', 'age': 29}
person['city'] = 'Cape Town'
print person
print person['name']
del person['city']
print person
states = {
'Oregon': 'OR',
'Florida': 'FL',
'California': 'CA',
'New York': 'NY',
'Michigan': 'MI',
}
cities = {
'OR':'Portland',
'FL':'Jacksonville',
'CA':'San Francisco',
'NY':'New York City',
'MI':'Detroit',
'NY':'New Jersey'
}
print cities['NY']
print cities
for abreviation, city in cities.items():
print '%s %s' % (abreviation, city)
| 14.909091 | 40 | 0.601626 | person = {'name': 'Peter', 'age': 29}
person['city'] = 'Cape Town'
print person
print person['name']
del person['city']
print person
states = {
'Oregon': 'OR',
'Florida': 'FL',
'California': 'CA',
'New York': 'NY',
'Michigan': 'MI',
}
cities = {
'OR':'Portland',
'FL':'Jacksonville',
'CA':'San Francisco',
'NY':'New York City',
'MI':'Detroit',
'NY':'New Jersey'
}
print cities['NY']
print cities
for abreviation, city in cities.items():
print '%s %s' % (abreviation, city)
| false | true |
f7fc786cd3dadfd413da87f87fb25b57d75ff278 | 719 | py | Python | hubspot/auth/oauth/models/__init__.py | cclauss/hubspot-api-python | 7c60c0f572b98c73e1f1816bf5981396a42735f6 | [
"Apache-2.0"
] | 117 | 2020-04-06T08:22:53.000Z | 2022-03-18T03:41:29.000Z | hubspot/auth/oauth/models/__init__.py | cclauss/hubspot-api-python | 7c60c0f572b98c73e1f1816bf5981396a42735f6 | [
"Apache-2.0"
] | 62 | 2020-04-06T16:21:06.000Z | 2022-03-17T16:50:44.000Z | hubspot/auth/oauth/models/__init__.py | cclauss/hubspot-api-python | 7c60c0f572b98c73e1f1816bf5981396a42735f6 | [
"Apache-2.0"
] | 45 | 2020-04-06T16:13:52.000Z | 2022-03-30T21:33:17.000Z | # coding: utf-8
# flake8: noqa
"""
OAuthService
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
# import models into model package
from hubspot.auth.oauth.models.access_token_info_response import AccessTokenInfoResponse
from hubspot.auth.oauth.models.error import Error
from hubspot.auth.oauth.models.error_detail import ErrorDetail
from hubspot.auth.oauth.models.refresh_token_info_response import RefreshTokenInfoResponse
from hubspot.auth.oauth.models.token_response_if import TokenResponseIF
| 32.681818 | 124 | 0.815021 |
from __future__ import absolute_import
from hubspot.auth.oauth.models.access_token_info_response import AccessTokenInfoResponse
from hubspot.auth.oauth.models.error import Error
from hubspot.auth.oauth.models.error_detail import ErrorDetail
from hubspot.auth.oauth.models.refresh_token_info_response import RefreshTokenInfoResponse
from hubspot.auth.oauth.models.token_response_if import TokenResponseIF
| true | true |
f7fc79bd981c6ee38a3ec8c61da89a9e6b7045d4 | 7,285 | py | Python | google/ads/google_ads/v4/proto/enums/hotel_placeholder_field_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | 1 | 2021-04-09T04:28:47.000Z | 2021-04-09T04:28:47.000Z | google/ads/google_ads/v4/proto/enums/hotel_placeholder_field_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v4/proto/enums/hotel_placeholder_field_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v4/proto/enums/hotel_placeholder_field.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v4/proto/enums/hotel_placeholder_field.proto',
package='google.ads.googleads.v4.enums',
syntax='proto3',
serialized_options=_b('\n!com.google.ads.googleads.v4.enumsB\033HotelsPlaceholderFieldProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v4/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V4.Enums\312\002\035Google\\Ads\\GoogleAds\\V4\\Enums\352\002!Google::Ads::GoogleAds::V4::Enums'),
serialized_pb=_b('\nAgoogle/ads/googleads_v4/proto/enums/hotel_placeholder_field.proto\x12\x1dgoogle.ads.googleads.v4.enums\x1a\x1cgoogle/api/annotations.proto\"\xcd\x03\n\x19HotelPlaceholderFieldEnum\"\xaf\x03\n\x15HotelPlaceholderField\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x0f\n\x0bPROPERTY_ID\x10\x02\x12\x11\n\rPROPERTY_NAME\x10\x03\x12\x14\n\x10\x44\x45STINATION_NAME\x10\x04\x12\x0f\n\x0b\x44\x45SCRIPTION\x10\x05\x12\x0b\n\x07\x41\x44\x44RESS\x10\x06\x12\t\n\x05PRICE\x10\x07\x12\x13\n\x0f\x46ORMATTED_PRICE\x10\x08\x12\x0e\n\nSALE_PRICE\x10\t\x12\x18\n\x14\x46ORMATTED_SALE_PRICE\x10\n\x12\r\n\tIMAGE_URL\x10\x0b\x12\x0c\n\x08\x43\x41TEGORY\x10\x0c\x12\x0f\n\x0bSTAR_RATING\x10\r\x12\x17\n\x13\x43ONTEXTUAL_KEYWORDS\x10\x0e\x12\x0e\n\nFINAL_URLS\x10\x0f\x12\x15\n\x11\x46INAL_MOBILE_URLS\x10\x10\x12\x10\n\x0cTRACKING_URL\x10\x11\x12\x14\n\x10\x41NDROID_APP_LINK\x10\x12\x12\x18\n\x14SIMILAR_PROPERTY_IDS\x10\x13\x12\x10\n\x0cIOS_APP_LINK\x10\x14\x12\x14\n\x10IOS_APP_STORE_ID\x10\x15\x42\xf0\x01\n!com.google.ads.googleads.v4.enumsB\x1bHotelsPlaceholderFieldProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v4/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V4.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V4\\Enums\xea\x02!Google::Ads::GoogleAds::V4::Enumsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_HOTELPLACEHOLDERFIELDENUM_HOTELPLACEHOLDERFIELD = _descriptor.EnumDescriptor(
name='HotelPlaceholderField',
full_name='google.ads.googleads.v4.enums.HotelPlaceholderFieldEnum.HotelPlaceholderField',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PROPERTY_ID', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PROPERTY_NAME', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DESTINATION_NAME', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DESCRIPTION', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ADDRESS', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PRICE', index=7, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FORMATTED_PRICE', index=8, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SALE_PRICE', index=9, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FORMATTED_SALE_PRICE', index=10, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IMAGE_URL', index=11, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CATEGORY', index=12, number=12,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STAR_RATING', index=13, number=13,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONTEXTUAL_KEYWORDS', index=14, number=14,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FINAL_URLS', index=15, number=15,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FINAL_MOBILE_URLS', index=16, number=16,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TRACKING_URL', index=17, number=17,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ANDROID_APP_LINK', index=18, number=18,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIMILAR_PROPERTY_IDS', index=19, number=19,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IOS_APP_LINK', index=20, number=20,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IOS_APP_STORE_ID', index=21, number=21,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=161,
serialized_end=592,
)
_sym_db.RegisterEnumDescriptor(_HOTELPLACEHOLDERFIELDENUM_HOTELPLACEHOLDERFIELD)
_HOTELPLACEHOLDERFIELDENUM = _descriptor.Descriptor(
name='HotelPlaceholderFieldEnum',
full_name='google.ads.googleads.v4.enums.HotelPlaceholderFieldEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_HOTELPLACEHOLDERFIELDENUM_HOTELPLACEHOLDERFIELD,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=131,
serialized_end=592,
)
_HOTELPLACEHOLDERFIELDENUM_HOTELPLACEHOLDERFIELD.containing_type = _HOTELPLACEHOLDERFIELDENUM
DESCRIPTOR.message_types_by_name['HotelPlaceholderFieldEnum'] = _HOTELPLACEHOLDERFIELDENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
HotelPlaceholderFieldEnum = _reflection.GeneratedProtocolMessageType('HotelPlaceholderFieldEnum', (_message.Message,), dict(
DESCRIPTOR = _HOTELPLACEHOLDERFIELDENUM,
__module__ = 'google.ads.googleads_v4.proto.enums.hotel_placeholder_field_pb2'
,
__doc__ = """Values for Hotel placeholder fields. For more information about dynamic
remarketing feeds, see
https://support.google.com/google-ads/answer/6053288.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v4.enums.HotelPlaceholderFieldEnum)
))
_sym_db.RegisterMessage(HotelPlaceholderFieldEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 41.392045 | 1,332 | 0.758133 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v4/proto/enums/hotel_placeholder_field.proto',
package='google.ads.googleads.v4.enums',
syntax='proto3',
serialized_options=_b('\n!com.google.ads.googleads.v4.enumsB\033HotelsPlaceholderFieldProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v4/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V4.Enums\312\002\035Google\\Ads\\GoogleAds\\V4\\Enums\352\002!Google::Ads::GoogleAds::V4::Enums'),
serialized_pb=_b('\nAgoogle/ads/googleads_v4/proto/enums/hotel_placeholder_field.proto\x12\x1dgoogle.ads.googleads.v4.enums\x1a\x1cgoogle/api/annotations.proto\"\xcd\x03\n\x19HotelPlaceholderFieldEnum\"\xaf\x03\n\x15HotelPlaceholderField\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x0f\n\x0bPROPERTY_ID\x10\x02\x12\x11\n\rPROPERTY_NAME\x10\x03\x12\x14\n\x10\x44\x45STINATION_NAME\x10\x04\x12\x0f\n\x0b\x44\x45SCRIPTION\x10\x05\x12\x0b\n\x07\x41\x44\x44RESS\x10\x06\x12\t\n\x05PRICE\x10\x07\x12\x13\n\x0f\x46ORMATTED_PRICE\x10\x08\x12\x0e\n\nSALE_PRICE\x10\t\x12\x18\n\x14\x46ORMATTED_SALE_PRICE\x10\n\x12\r\n\tIMAGE_URL\x10\x0b\x12\x0c\n\x08\x43\x41TEGORY\x10\x0c\x12\x0f\n\x0bSTAR_RATING\x10\r\x12\x17\n\x13\x43ONTEXTUAL_KEYWORDS\x10\x0e\x12\x0e\n\nFINAL_URLS\x10\x0f\x12\x15\n\x11\x46INAL_MOBILE_URLS\x10\x10\x12\x10\n\x0cTRACKING_URL\x10\x11\x12\x14\n\x10\x41NDROID_APP_LINK\x10\x12\x12\x18\n\x14SIMILAR_PROPERTY_IDS\x10\x13\x12\x10\n\x0cIOS_APP_LINK\x10\x14\x12\x14\n\x10IOS_APP_STORE_ID\x10\x15\x42\xf0\x01\n!com.google.ads.googleads.v4.enumsB\x1bHotelsPlaceholderFieldProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v4/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V4.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V4\\Enums\xea\x02!Google::Ads::GoogleAds::V4::Enumsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_HOTELPLACEHOLDERFIELDENUM_HOTELPLACEHOLDERFIELD = _descriptor.EnumDescriptor(
name='HotelPlaceholderField',
full_name='google.ads.googleads.v4.enums.HotelPlaceholderFieldEnum.HotelPlaceholderField',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PROPERTY_ID', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PROPERTY_NAME', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DESTINATION_NAME', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DESCRIPTION', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ADDRESS', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PRICE', index=7, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FORMATTED_PRICE', index=8, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SALE_PRICE', index=9, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FORMATTED_SALE_PRICE', index=10, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IMAGE_URL', index=11, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CATEGORY', index=12, number=12,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STAR_RATING', index=13, number=13,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONTEXTUAL_KEYWORDS', index=14, number=14,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FINAL_URLS', index=15, number=15,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FINAL_MOBILE_URLS', index=16, number=16,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TRACKING_URL', index=17, number=17,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ANDROID_APP_LINK', index=18, number=18,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIMILAR_PROPERTY_IDS', index=19, number=19,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IOS_APP_LINK', index=20, number=20,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IOS_APP_STORE_ID', index=21, number=21,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=161,
serialized_end=592,
)
_sym_db.RegisterEnumDescriptor(_HOTELPLACEHOLDERFIELDENUM_HOTELPLACEHOLDERFIELD)
_HOTELPLACEHOLDERFIELDENUM = _descriptor.Descriptor(
name='HotelPlaceholderFieldEnum',
full_name='google.ads.googleads.v4.enums.HotelPlaceholderFieldEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_HOTELPLACEHOLDERFIELDENUM_HOTELPLACEHOLDERFIELD,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=131,
serialized_end=592,
)
_HOTELPLACEHOLDERFIELDENUM_HOTELPLACEHOLDERFIELD.containing_type = _HOTELPLACEHOLDERFIELDENUM
DESCRIPTOR.message_types_by_name['HotelPlaceholderFieldEnum'] = _HOTELPLACEHOLDERFIELDENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
HotelPlaceholderFieldEnum = _reflection.GeneratedProtocolMessageType('HotelPlaceholderFieldEnum', (_message.Message,), dict(
DESCRIPTOR = _HOTELPLACEHOLDERFIELDENUM,
__module__ = 'google.ads.googleads_v4.proto.enums.hotel_placeholder_field_pb2'
,
__doc__ = """Values for Hotel placeholder fields. For more information about dynamic
remarketing feeds, see
https://support.google.com/google-ads/answer/6053288.
""",
))
_sym_db.RegisterMessage(HotelPlaceholderFieldEnum)
DESCRIPTOR._options = None
| true | true |
f7fc7a2dcaf432d5f99d19955e605c076ead8c11 | 510 | py | Python | Blogg/admin.py | waregagbagbo/Blog | aaafcf454abbf9a792227e4a6f8d24da62dae27d | [
"MIT"
] | null | null | null | Blogg/admin.py | waregagbagbo/Blog | aaafcf454abbf9a792227e4a6f8d24da62dae27d | [
"MIT"
] | null | null | null | Blogg/admin.py | waregagbagbo/Blog | aaafcf454abbf9a792227e4a6f8d24da62dae27d | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import*
# Register your models here.
class PostAdmin(admin.ModelAdmin):
list_display =('title', 'slug', 'status','created_on')
list_filter = ('status',)
prepopulated_fields = {"slug":('title',)}
class BookAdmin(admin.ModelAdmin):
list_display = ('name','image','author','price')
admin.site.register(Profile)
admin.site.register(Post,PostAdmin)
admin.site.register(Category)
admin.site.register(Book,BookAdmin)
admin.site.register(Author)
| 24.285714 | 58 | 0.72549 | from django.contrib import admin
from .models import*
class PostAdmin(admin.ModelAdmin):
list_display =('title', 'slug', 'status','created_on')
list_filter = ('status',)
prepopulated_fields = {"slug":('title',)}
class BookAdmin(admin.ModelAdmin):
list_display = ('name','image','author','price')
admin.site.register(Profile)
admin.site.register(Post,PostAdmin)
admin.site.register(Category)
admin.site.register(Book,BookAdmin)
admin.site.register(Author)
| true | true |
f7fc7a2fcd8674466b1654e8053c9a78f1a0eb61 | 16,676 | py | Python | evidently/tests/test_readme_examples.py | jim-fun/evidently | eb3479b8ce39e43601fb2d1ffbf61e0624541865 | [
"Apache-2.0"
] | null | null | null | evidently/tests/test_readme_examples.py | jim-fun/evidently | eb3479b8ce39e43601fb2d1ffbf61e0624541865 | [
"Apache-2.0"
] | null | null | null | evidently/tests/test_readme_examples.py | jim-fun/evidently | eb3479b8ce39e43601fb2d1ffbf61e0624541865 | [
"Apache-2.0"
] | null | null | null | import json
import pandas as pd
import numpy as np
from sklearn import datasets
from unittest import TestCase
from evidently import ColumnMapping
from evidently.dashboard import Dashboard
from evidently.model_profile import Profile
from evidently.profile_sections import DataDriftProfileSection, CatTargetDriftProfileSection, \
RegressionPerformanceProfileSection, ClassificationPerformanceProfileSection, \
ProbClassificationPerformanceProfileSection
from evidently.tabs import DataDriftTab, RegressionPerformanceTab, CatTargetDriftTab, ClassificationPerformanceTab, \
ProbClassificationPerformanceTab
def _get_iris():
# we do not use setUp method here, because of side effects in tests
# side effect can be avoided by using fixtures from pytest :-)
iris = datasets.load_iris()
iris_frame = pd.DataFrame(iris.data, columns=iris.feature_names)
iris_frame['target'] = iris.target
return iris, iris_frame
def _get_probabilistic_iris():
iris = datasets.load_iris()
iris_frame = pd.DataFrame(iris.data, columns=iris.feature_names)
random_probs = np.random.random((3, 150))
random_probs = (random_probs / random_probs.sum(0))
pred_df = pd.DataFrame(random_probs.T, columns=iris.target_names)
iris_frame['target'] = iris.target_names[iris['target']]
merged_reference = pd.concat([iris_frame, pred_df], axis=1)
iris_column_mapping = ColumnMapping()
iris_column_mapping.target = 'target'
iris_column_mapping.prediction = iris.target_names.tolist()
iris_column_mapping.numerical_features = iris.feature_names
return merged_reference, iris_column_mapping
class TestDashboards(TestCase):
# TODO(fixme): Actually we would like to test html's output, but because
# evidently/nbextension/static/index.js is missing
# (and evidently/nbextension/static/index.js.LICENSE.txt is an actual text file)
# saving an html report in the test itself fails.
# A reasonable fallback is to use the private _json() method. Although, since it is never used anywhere else
# it may be considered a bad testing practice to have methods only for testing purposes.
# For now we stick to it until something better comes along.
def setUp(self) -> None:
iris = datasets.load_iris()
self.iris_frame = pd.DataFrame(iris.data, columns=iris.feature_names)
self.iris_frame['target'] = iris.target
self.iris_targets = iris.target_names
###
# The following are extracted from the README.md file.
###
def test_data_drift_dashboard(self):
# To generate the **Data Drift** report, run:
iris_data_drift_report = Dashboard(tabs=[DataDriftTab()])
iris_data_drift_report.calculate(self.iris_frame[:100], self.iris_frame[100:])
actual = json.loads(iris_data_drift_report._json())
# we leave the actual content test to other tests for widgets
self.assertTrue('name' in actual)
self.assertTrue(len(actual['widgets']) == 1)
def test_data_drift_categorical_target_drift_dashboard(self):
# To generate the **Data Drift** and the **Categorical Target Drift** reports, run:
iris_data_and_target_drift_report = Dashboard(tabs=[DataDriftTab(), CatTargetDriftTab()])
iris_data_and_target_drift_report.calculate(self.iris_frame[:100], self.iris_frame[100:])
actual = json.loads(iris_data_and_target_drift_report._json())
self.assertTrue('name' in actual)
self.assertTrue(len(actual['widgets']) == 3)
def test_regression_performance_dashboard(self):
# To generate the **Regression Model Performance** report, run:
# FIXME: when prediction column is not present in the dataset
# ValueError: [Widget Regression Model Performance Report.] self.wi is None,
# no data available (forget to set it in widget?)
self.iris_frame['prediction'] = self.iris_frame['target'][::-1]
regression_model_performance = Dashboard(tabs=[RegressionPerformanceTab()])
regression_model_performance.calculate(self.iris_frame[:100], self.iris_frame[100:])
actual = json.loads(regression_model_performance._json())
self.assertTrue('name' in actual)
self.assertEqual(len(actual['widgets']), 20)
def test_regression_performance_single_frame_dashboard(self):
# You can also generate a **Regression Model Performance** for a single `DataFrame`. In this case, run:
# FIXME: when prediction column is not present in the dataset
# ValueError: [Widget Regression Model Performance Report.] self.wi is None,
# no data available (forget to set it in widget?)
self.iris_frame['prediction'] = self.iris_frame['target'][::-1]
regression_single_model_performance = Dashboard(tabs=[RegressionPerformanceTab()])
regression_single_model_performance.calculate(self.iris_frame, None)
actual = json.loads(regression_single_model_performance._json())
self.assertTrue('name' in actual)
self.assertEqual(len(actual['widgets']), 12)
def test_classification_performance_dashboard(self):
# To generate the **Classification Model Performance** report, run:
# FIXME: when prediction column is not present in the dataset
# ValueError: [Widget Classification Model Performance Report.] self.wi is None,
# no data available (forget to set it in widget?)
self.iris_frame['prediction'] = self.iris_frame['target'][::-1]
classification_performance_report = Dashboard(tabs=[ClassificationPerformanceTab()])
classification_performance_report.calculate(self.iris_frame[:100], self.iris_frame[100:])
actual = json.loads(classification_performance_report._json())
self.assertTrue('name' in actual)
self.assertEqual(len(actual['widgets']), 10)
def test_probabilistic_classification_performance_dashboard(self):
# For **Probabilistic Classification Model Performance** report, run:
random_probs = np.random.random((3, 150))
random_probs = (random_probs / random_probs.sum(0))
pred_df = pd.DataFrame(random_probs.T, columns=self.iris_targets)
iris_frame = pd.concat([self.iris_frame, pred_df], axis=1)
iris_frame['target'] = self.iris_targets[self.iris_frame['target']]
iris_column_mapping = ColumnMapping()
iris_column_mapping.prediction = self.iris_targets
classification_performance_report = Dashboard(tabs=[ProbClassificationPerformanceTab()])
classification_performance_report.calculate(iris_frame, iris_frame, iris_column_mapping)
actual = json.loads(classification_performance_report._json())
self.assertTrue('name' in actual)
self.assertEqual(len(actual['widgets']), 20)
def test_classification_performance_on_single_frame_dashboard(self):
# You can also generate either of the **Classification** reports for a single `DataFrame`. In this case, run:
self.iris_frame['prediction'] = self.iris_frame['target'][::-1]
classification_single_frame_performance = Dashboard(tabs=[ClassificationPerformanceTab()])
classification_single_frame_performance.calculate(self.iris_frame, None)
actual = json.loads(classification_single_frame_performance._json())
self.assertTrue('name' in actual)
self.assertEqual(len(actual['widgets']), 9)
def test_probabilistic_classification_performance_on_single_frame_dashboard(self):
# You can also generate either of the **Classification** reports for a single `DataFrame`. In this case, run:
# FIXME: like above, when prediction column is not present in the dataset
random_probs = np.random.random((3, 150))
random_probs = (random_probs / random_probs.sum(0))
pred_df = pd.DataFrame(random_probs.T, columns=self.iris_targets)
iris_frame = pd.concat([self.iris_frame, pred_df], axis=1)
iris_frame['target'] = self.iris_targets[self.iris_frame['target']]
iris_column_mapping = ColumnMapping()
iris_column_mapping.prediction = self.iris_targets
prob_classification_single_frame_performance = Dashboard(tabs=[ProbClassificationPerformanceTab()])
prob_classification_single_frame_performance.calculate(iris_frame, None, iris_column_mapping)
actual = json.loads(prob_classification_single_frame_performance._json())
self.assertTrue('name' in actual)
self.assertEqual(len(actual['widgets']), 11)
class TestProfiles(TestCase):
###
# The following are extracted from the README.md file.
###
def test_data_drift_profile(self):
# To generate the **Data Drift** report, run:
iris, iris_frame = _get_iris()
iris_frame['prediction'] = iris.target[::-1]
iris_data_drift_profile = Profile(sections=[DataDriftProfileSection()])
iris_data_drift_profile.calculate(iris_frame[:100], iris_frame[100:])
actual = json.loads(iris_data_drift_profile.json())
# we leave the actual content test to other tests for widgets
self.assertTrue('timestamp' in actual)
self.assertEqual(len(actual), 2)
self.assertEqual(len(actual['data_drift']['data']), 6)
self.assertTrue('metrics' in actual['data_drift']['data'])
def test_data_drift_categorical_target_drift_profile(self):
# To generate the **Data Drift** and the **Categorical Target Drift** reports, run:
iris, iris_frame = _get_iris()
iris_frame['prediction'] = iris.target[::-1]
iris_data_drift_profile = Profile(sections=[DataDriftProfileSection()])
iris_data_drift_profile.calculate(iris_frame[:100], iris_frame[100:])
iris_target_and_data_drift_profile = Profile(
sections=[DataDriftProfileSection(), CatTargetDriftProfileSection()])
iris_target_and_data_drift_profile.calculate(iris_frame[:100], iris_frame[100:])
actual = json.loads(iris_target_and_data_drift_profile.json())
# we leave the actual content test to other tests for widgets
self.assertTrue('timestamp' in actual)
self.assertTrue(len(actual) == 3)
self.assertEqual(len(actual['data_drift']['data']), 6)
self.assertEqual(len(actual['cat_target_drift']['data']), 5)
self.assertTrue(actual['data_drift']['data'].get('metrics'))
def test_regression_performance_profile(self):
# To generate the **Regression Model Performance** report, run:
iris, iris_frame = _get_iris()
iris_frame['prediction'] = iris.target[::-1]
iris_data_drift_profile = Profile(sections=[DataDriftProfileSection()])
iris_data_drift_profile.calculate(iris_frame[:100], iris_frame[100:])
regression_single_model_performance = Profile(sections=[RegressionPerformanceProfileSection()])
regression_single_model_performance.calculate(iris_frame, None)
actual = json.loads(regression_single_model_performance.json())
# we leave the actual content test to other tests for widgets
self.assertTrue('timestamp' in actual)
self.assertTrue(len(actual) == 2)
self.assertTrue(len(actual['regression_performance']['data']) == 5)
self.assertTrue(actual['regression_performance']['data'].get('metrics'))
def test_regression_performance_single_frame_profile(self):
# You can also generate a **Regression Model Performance** for a single `DataFrame`. In this case, run:
iris, iris_frame = _get_iris()
iris_frame['prediction'] = iris.target[::-1]
iris_data_drift_profile = Profile(sections=[DataDriftProfileSection()])
iris_data_drift_profile.calculate(iris_frame[:100], iris_frame[100:])
regression_single_model_performance = Profile(sections=[RegressionPerformanceProfileSection()])
regression_single_model_performance.calculate(iris_frame, None)
actual = json.loads(regression_single_model_performance.json())
# we leave the actual content test to other tests for widgets
self.assertTrue('timestamp' in actual)
self.assertTrue(len(actual) == 2)
self.assertTrue(len(actual['regression_performance']['data']) == 5)
self.assertTrue(actual['regression_performance']['data'].get('metrics'))
def test_classification_performance_profile(self):
# To generate the **Classification Model Performance** report, run:
iris, iris_frame = _get_iris()
iris_frame['prediction'] = iris.target[::-1]
iris_data_drift_profile = Profile(sections=[DataDriftProfileSection()])
iris_data_drift_profile.calculate(iris_frame[:100], iris_frame[100:])
classification_performance_profile = Profile(sections=[ClassificationPerformanceProfileSection()])
classification_performance_profile.calculate(iris_frame[:100], iris_frame[100:])
actual = json.loads(classification_performance_profile.json())
# we leave the actual content test to other tests for widgets
self.assertTrue('timestamp' in actual)
self.assertTrue(len(actual) == 2)
self.assertTrue(len(actual['classification_performance']['data']) == 5)
self.assertTrue(actual['classification_performance']['data'].get('metrics'))
def test_classification_performance_single_profile(self):
iris, iris_frame = _get_iris()
iris_frame['prediction'] = iris.target[::-1]
iris_data_drift_profile = Profile(sections=[DataDriftProfileSection()])
iris_data_drift_profile.calculate(iris_frame[:100], iris_frame[100:])
classification_performance_profile = Profile(sections=[ClassificationPerformanceProfileSection()])
classification_performance_profile.calculate(iris_frame[:100], None)
actual = json.loads(classification_performance_profile.json())
# we leave the actual content test to other tests for widgets
self.assertTrue('timestamp' in actual)
self.assertTrue(len(actual) == 2)
self.assertTrue(len(actual['classification_performance']['data']) == 5)
self.assertTrue(actual['classification_performance']['data'].get('metrics'))
def test_probabilistic_classification_performance_profile(self):
# For **Probabilistic Classification Model Performance** report, run:
merged_reference, column_mapping = _get_probabilistic_iris()
iris_prob_classification_profile = Profile(sections=[ProbClassificationPerformanceProfileSection()])
iris_prob_classification_profile.calculate(merged_reference, merged_reference, column_mapping)
# FIXME: this does not work! why?
# iris_prob_classification_profile.calculate(merged_reference[:100], merged_reference[100:],
# column_mapping = iris_column_mapping)
actual = json.loads(iris_prob_classification_profile.json())
# we leave the actual content test to other tests for widgets
self.assertTrue('timestamp' in actual)
self.assertEqual(len(actual), 2)
self.assertEqual(len(actual['probabilistic_classification_performance']['data']), 5)
self.assertEqual(len(actual['probabilistic_classification_performance']['data']['metrics']), 2)
self.assertTrue('reference' in actual['probabilistic_classification_performance']['data']['metrics'])
self.assertTrue('current' in actual['probabilistic_classification_performance']['data']['metrics'])
def test_probabilistic_classification_single_performance_profile(self):
# For **Probabilistic Classification Model Performance** report, run:
merged_reference, column_mapping = _get_probabilistic_iris()
iris_prob_classification_profile = Profile(sections=[ProbClassificationPerformanceProfileSection()])
iris_prob_classification_profile.calculate(merged_reference, None, column_mapping)
# FIXME: this does not work! why?
# iris_prob_classification_profile.calculate(merged_reference[:100], None,
# column_mapping = iris_column_mapping)
actual = json.loads(iris_prob_classification_profile.json())
# we leave the actual content test to other tests for widgets
self.assertTrue('timestamp' in actual)
self.assertEqual(len(actual), 2)
self.assertEqual(len(actual['probabilistic_classification_performance']['data']), 5)
self.assertEqual(len(actual['probabilistic_classification_performance']['data']['metrics']), 1)
self.assertTrue('reference' in actual['probabilistic_classification_performance']['data']['metrics'])
| 56.914676 | 117 | 0.720197 | import json
import pandas as pd
import numpy as np
from sklearn import datasets
from unittest import TestCase
from evidently import ColumnMapping
from evidently.dashboard import Dashboard
from evidently.model_profile import Profile
from evidently.profile_sections import DataDriftProfileSection, CatTargetDriftProfileSection, \
RegressionPerformanceProfileSection, ClassificationPerformanceProfileSection, \
ProbClassificationPerformanceProfileSection
from evidently.tabs import DataDriftTab, RegressionPerformanceTab, CatTargetDriftTab, ClassificationPerformanceTab, \
ProbClassificationPerformanceTab
def _get_iris():
iris = datasets.load_iris()
iris_frame = pd.DataFrame(iris.data, columns=iris.feature_names)
iris_frame['target'] = iris.target
return iris, iris_frame
def _get_probabilistic_iris():
iris = datasets.load_iris()
iris_frame = pd.DataFrame(iris.data, columns=iris.feature_names)
random_probs = np.random.random((3, 150))
random_probs = (random_probs / random_probs.sum(0))
pred_df = pd.DataFrame(random_probs.T, columns=iris.target_names)
iris_frame['target'] = iris.target_names[iris['target']]
merged_reference = pd.concat([iris_frame, pred_df], axis=1)
iris_column_mapping = ColumnMapping()
iris_column_mapping.target = 'target'
iris_column_mapping.prediction = iris.target_names.tolist()
iris_column_mapping.numerical_features = iris.feature_names
return merged_reference, iris_column_mapping
class TestDashboards(TestCase):
# evidently/nbextension/static/index.js is missing
# (and evidently/nbextension/static/index.js.LICENSE.txt is an actual text file)
# saving an html report in the test itself fails.
# A reasonable fallback is to use the private _json() method. Although, since it is never used anywhere else
# it may be considered a bad testing practice to have methods only for testing purposes.
# For now we stick to it until something better comes along.
def setUp(self) -> None:
iris = datasets.load_iris()
self.iris_frame = pd.DataFrame(iris.data, columns=iris.feature_names)
self.iris_frame['target'] = iris.target
self.iris_targets = iris.target_names
###
# The following are extracted from the README.md file.
###
def test_data_drift_dashboard(self):
# To generate the **Data Drift** report, run:
iris_data_drift_report = Dashboard(tabs=[DataDriftTab()])
iris_data_drift_report.calculate(self.iris_frame[:100], self.iris_frame[100:])
actual = json.loads(iris_data_drift_report._json())
# we leave the actual content test to other tests for widgets
self.assertTrue('name' in actual)
self.assertTrue(len(actual['widgets']) == 1)
def test_data_drift_categorical_target_drift_dashboard(self):
# To generate the **Data Drift** and the **Categorical Target Drift** reports, run:
iris_data_and_target_drift_report = Dashboard(tabs=[DataDriftTab(), CatTargetDriftTab()])
iris_data_and_target_drift_report.calculate(self.iris_frame[:100], self.iris_frame[100:])
actual = json.loads(iris_data_and_target_drift_report._json())
self.assertTrue('name' in actual)
self.assertTrue(len(actual['widgets']) == 3)
def test_regression_performance_dashboard(self):
# To generate the **Regression Model Performance** report, run:
# FIXME: when prediction column is not present in the dataset
# ValueError: [Widget Regression Model Performance Report.] self.wi is None,
# no data available (forget to set it in widget?)
self.iris_frame['prediction'] = self.iris_frame['target'][::-1]
regression_model_performance = Dashboard(tabs=[RegressionPerformanceTab()])
regression_model_performance.calculate(self.iris_frame[:100], self.iris_frame[100:])
actual = json.loads(regression_model_performance._json())
self.assertTrue('name' in actual)
self.assertEqual(len(actual['widgets']), 20)
def test_regression_performance_single_frame_dashboard(self):
# You can also generate a **Regression Model Performance** for a single `DataFrame`. In this case, run:
# FIXME: when prediction column is not present in the dataset
# ValueError: [Widget Regression Model Performance Report.] self.wi is None,
# no data available (forget to set it in widget?)
self.iris_frame['prediction'] = self.iris_frame['target'][::-1]
regression_single_model_performance = Dashboard(tabs=[RegressionPerformanceTab()])
regression_single_model_performance.calculate(self.iris_frame, None)
actual = json.loads(regression_single_model_performance._json())
self.assertTrue('name' in actual)
self.assertEqual(len(actual['widgets']), 12)
def test_classification_performance_dashboard(self):
# To generate the **Classification Model Performance** report, run:
# FIXME: when prediction column is not present in the dataset
# ValueError: [Widget Classification Model Performance Report.] self.wi is None,
# no data available (forget to set it in widget?)
self.iris_frame['prediction'] = self.iris_frame['target'][::-1]
classification_performance_report = Dashboard(tabs=[ClassificationPerformanceTab()])
classification_performance_report.calculate(self.iris_frame[:100], self.iris_frame[100:])
actual = json.loads(classification_performance_report._json())
self.assertTrue('name' in actual)
self.assertEqual(len(actual['widgets']), 10)
def test_probabilistic_classification_performance_dashboard(self):
# For **Probabilistic Classification Model Performance** report, run:
random_probs = np.random.random((3, 150))
random_probs = (random_probs / random_probs.sum(0))
pred_df = pd.DataFrame(random_probs.T, columns=self.iris_targets)
iris_frame = pd.concat([self.iris_frame, pred_df], axis=1)
iris_frame['target'] = self.iris_targets[self.iris_frame['target']]
iris_column_mapping = ColumnMapping()
iris_column_mapping.prediction = self.iris_targets
classification_performance_report = Dashboard(tabs=[ProbClassificationPerformanceTab()])
classification_performance_report.calculate(iris_frame, iris_frame, iris_column_mapping)
actual = json.loads(classification_performance_report._json())
self.assertTrue('name' in actual)
self.assertEqual(len(actual['widgets']), 20)
def test_classification_performance_on_single_frame_dashboard(self):
# You can also generate either of the **Classification** reports for a single `DataFrame`. In this case, run:
self.iris_frame['prediction'] = self.iris_frame['target'][::-1]
classification_single_frame_performance = Dashboard(tabs=[ClassificationPerformanceTab()])
classification_single_frame_performance.calculate(self.iris_frame, None)
actual = json.loads(classification_single_frame_performance._json())
self.assertTrue('name' in actual)
self.assertEqual(len(actual['widgets']), 9)
def test_probabilistic_classification_performance_on_single_frame_dashboard(self):
# You can also generate either of the **Classification** reports for a single `DataFrame`. In this case, run:
# FIXME: like above, when prediction column is not present in the dataset
random_probs = np.random.random((3, 150))
random_probs = (random_probs / random_probs.sum(0))
pred_df = pd.DataFrame(random_probs.T, columns=self.iris_targets)
iris_frame = pd.concat([self.iris_frame, pred_df], axis=1)
iris_frame['target'] = self.iris_targets[self.iris_frame['target']]
iris_column_mapping = ColumnMapping()
iris_column_mapping.prediction = self.iris_targets
prob_classification_single_frame_performance = Dashboard(tabs=[ProbClassificationPerformanceTab()])
prob_classification_single_frame_performance.calculate(iris_frame, None, iris_column_mapping)
actual = json.loads(prob_classification_single_frame_performance._json())
self.assertTrue('name' in actual)
self.assertEqual(len(actual['widgets']), 11)
class TestProfiles(TestCase):
###
# The following are extracted from the README.md file.
###
def test_data_drift_profile(self):
# To generate the **Data Drift** report, run:
iris, iris_frame = _get_iris()
iris_frame['prediction'] = iris.target[::-1]
iris_data_drift_profile = Profile(sections=[DataDriftProfileSection()])
iris_data_drift_profile.calculate(iris_frame[:100], iris_frame[100:])
actual = json.loads(iris_data_drift_profile.json())
# we leave the actual content test to other tests for widgets
self.assertTrue('timestamp' in actual)
self.assertEqual(len(actual), 2)
self.assertEqual(len(actual['data_drift']['data']), 6)
self.assertTrue('metrics' in actual['data_drift']['data'])
def test_data_drift_categorical_target_drift_profile(self):
# To generate the **Data Drift** and the **Categorical Target Drift** reports, run:
iris, iris_frame = _get_iris()
iris_frame['prediction'] = iris.target[::-1]
iris_data_drift_profile = Profile(sections=[DataDriftProfileSection()])
iris_data_drift_profile.calculate(iris_frame[:100], iris_frame[100:])
iris_target_and_data_drift_profile = Profile(
sections=[DataDriftProfileSection(), CatTargetDriftProfileSection()])
iris_target_and_data_drift_profile.calculate(iris_frame[:100], iris_frame[100:])
actual = json.loads(iris_target_and_data_drift_profile.json())
# we leave the actual content test to other tests for widgets
self.assertTrue('timestamp' in actual)
self.assertTrue(len(actual) == 3)
self.assertEqual(len(actual['data_drift']['data']), 6)
self.assertEqual(len(actual['cat_target_drift']['data']), 5)
self.assertTrue(actual['data_drift']['data'].get('metrics'))
def test_regression_performance_profile(self):
# To generate the **Regression Model Performance** report, run:
iris, iris_frame = _get_iris()
iris_frame['prediction'] = iris.target[::-1]
iris_data_drift_profile = Profile(sections=[DataDriftProfileSection()])
iris_data_drift_profile.calculate(iris_frame[:100], iris_frame[100:])
regression_single_model_performance = Profile(sections=[RegressionPerformanceProfileSection()])
regression_single_model_performance.calculate(iris_frame, None)
actual = json.loads(regression_single_model_performance.json())
# we leave the actual content test to other tests for widgets
self.assertTrue('timestamp' in actual)
self.assertTrue(len(actual) == 2)
self.assertTrue(len(actual['regression_performance']['data']) == 5)
self.assertTrue(actual['regression_performance']['data'].get('metrics'))
def test_regression_performance_single_frame_profile(self):
# You can also generate a **Regression Model Performance** for a single `DataFrame`. In this case, run:
iris, iris_frame = _get_iris()
iris_frame['prediction'] = iris.target[::-1]
iris_data_drift_profile = Profile(sections=[DataDriftProfileSection()])
iris_data_drift_profile.calculate(iris_frame[:100], iris_frame[100:])
regression_single_model_performance = Profile(sections=[RegressionPerformanceProfileSection()])
regression_single_model_performance.calculate(iris_frame, None)
actual = json.loads(regression_single_model_performance.json())
# we leave the actual content test to other tests for widgets
self.assertTrue('timestamp' in actual)
self.assertTrue(len(actual) == 2)
self.assertTrue(len(actual['regression_performance']['data']) == 5)
self.assertTrue(actual['regression_performance']['data'].get('metrics'))
def test_classification_performance_profile(self):
# To generate the **Classification Model Performance** report, run:
iris, iris_frame = _get_iris()
iris_frame['prediction'] = iris.target[::-1]
iris_data_drift_profile = Profile(sections=[DataDriftProfileSection()])
iris_data_drift_profile.calculate(iris_frame[:100], iris_frame[100:])
classification_performance_profile = Profile(sections=[ClassificationPerformanceProfileSection()])
classification_performance_profile.calculate(iris_frame[:100], iris_frame[100:])
actual = json.loads(classification_performance_profile.json())
# we leave the actual content test to other tests for widgets
self.assertTrue('timestamp' in actual)
self.assertTrue(len(actual) == 2)
self.assertTrue(len(actual['classification_performance']['data']) == 5)
self.assertTrue(actual['classification_performance']['data'].get('metrics'))
def test_classification_performance_single_profile(self):
iris, iris_frame = _get_iris()
iris_frame['prediction'] = iris.target[::-1]
iris_data_drift_profile = Profile(sections=[DataDriftProfileSection()])
iris_data_drift_profile.calculate(iris_frame[:100], iris_frame[100:])
classification_performance_profile = Profile(sections=[ClassificationPerformanceProfileSection()])
classification_performance_profile.calculate(iris_frame[:100], None)
actual = json.loads(classification_performance_profile.json())
# we leave the actual content test to other tests for widgets
self.assertTrue('timestamp' in actual)
self.assertTrue(len(actual) == 2)
self.assertTrue(len(actual['classification_performance']['data']) == 5)
self.assertTrue(actual['classification_performance']['data'].get('metrics'))
def test_probabilistic_classification_performance_profile(self):
# For **Probabilistic Classification Model Performance** report, run:
merged_reference, column_mapping = _get_probabilistic_iris()
iris_prob_classification_profile = Profile(sections=[ProbClassificationPerformanceProfileSection()])
iris_prob_classification_profile.calculate(merged_reference, merged_reference, column_mapping)
# FIXME: this does not work! why?
# iris_prob_classification_profile.calculate(merged_reference[:100], merged_reference[100:],
# column_mapping = iris_column_mapping)
actual = json.loads(iris_prob_classification_profile.json())
# we leave the actual content test to other tests for widgets
self.assertTrue('timestamp' in actual)
self.assertEqual(len(actual), 2)
self.assertEqual(len(actual['probabilistic_classification_performance']['data']), 5)
self.assertEqual(len(actual['probabilistic_classification_performance']['data']['metrics']), 2)
self.assertTrue('reference' in actual['probabilistic_classification_performance']['data']['metrics'])
self.assertTrue('current' in actual['probabilistic_classification_performance']['data']['metrics'])
def test_probabilistic_classification_single_performance_profile(self):
# For **Probabilistic Classification Model Performance** report, run:
merged_reference, column_mapping = _get_probabilistic_iris()
iris_prob_classification_profile = Profile(sections=[ProbClassificationPerformanceProfileSection()])
iris_prob_classification_profile.calculate(merged_reference, None, column_mapping)
# FIXME: this does not work! why?
# iris_prob_classification_profile.calculate(merged_reference[:100], None,
# column_mapping = iris_column_mapping)
actual = json.loads(iris_prob_classification_profile.json())
# we leave the actual content test to other tests for widgets
self.assertTrue('timestamp' in actual)
self.assertEqual(len(actual), 2)
self.assertEqual(len(actual['probabilistic_classification_performance']['data']), 5)
self.assertEqual(len(actual['probabilistic_classification_performance']['data']['metrics']), 1)
self.assertTrue('reference' in actual['probabilistic_classification_performance']['data']['metrics'])
| true | true |
f7fc7ac1722de66b9faa483bb47f087e4fa66fa3 | 1,819 | py | Python | robot_sidelights.py | kholm777/maqueen | 4bfec1e28d6e51991a8839fade38f64190ff56eb | [
"MIT"
] | null | null | null | robot_sidelights.py | kholm777/maqueen | 4bfec1e28d6e51991a8839fade38f64190ff56eb | [
"MIT"
] | null | null | null | robot_sidelights.py | kholm777/maqueen | 4bfec1e28d6e51991a8839fade38f64190ff56eb | [
"MIT"
] | null | null | null | # MIT License
# Copyright (c) 2021 Kristoffer Holm
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from maqueen import Maqueen
from microbit import *
import utime
import random
robot = Maqueen()
utime.sleep_ms(1000)
for x in range(256):
robot.rgb_front_left(0,0,x)
robot.rgb_rear_left(0,0,x)
robot.rgb_front_right(0,0,x)
robot.rgb_rear_right(0,0,x)
utime.sleep_ms(1000)
for x in range(100):
red = random.randint(0,255)
green = random.randint(0,255)
blue = random.randint(0,255)
robot.rgb_front_left(red,green,blue)
robot.rgb_rear_left(red,green,blue)
robot.rgb_front_right(red,green,blue)
robot.rgb_rear_right(red,green,blue)
utime.sleep_ms(100)
robot.rgb_front_left(0,0,0)
robot.rgb_rear_left(0,0,0)
robot.rgb_front_right(0,0,0)
robot.rgb_rear_right(0,0,0) | 34.320755 | 79 | 0.759758 |
from maqueen import Maqueen
from microbit import *
import utime
import random
robot = Maqueen()
utime.sleep_ms(1000)
for x in range(256):
robot.rgb_front_left(0,0,x)
robot.rgb_rear_left(0,0,x)
robot.rgb_front_right(0,0,x)
robot.rgb_rear_right(0,0,x)
utime.sleep_ms(1000)
for x in range(100):
red = random.randint(0,255)
green = random.randint(0,255)
blue = random.randint(0,255)
robot.rgb_front_left(red,green,blue)
robot.rgb_rear_left(red,green,blue)
robot.rgb_front_right(red,green,blue)
robot.rgb_rear_right(red,green,blue)
utime.sleep_ms(100)
robot.rgb_front_left(0,0,0)
robot.rgb_rear_left(0,0,0)
robot.rgb_front_right(0,0,0)
robot.rgb_rear_right(0,0,0) | true | true |
f7fc7b298fa9c9514eb7101c3b6efe74205fc692 | 4,327 | py | Python | contrib/seeds/generate-seeds.py | thnass/nakedcash | 08b2c52cd0a846d52701967acf20f5f4d7773d94 | [
"MIT"
] | null | null | null | contrib/seeds/generate-seeds.py | thnass/nakedcash | 08b2c52cd0a846d52701967acf20f5f4d7773d94 | [
"MIT"
] | null | null | null | contrib/seeds/generate-seeds.py | thnass/nakedcash | 08b2c52cd0a846d52701967acf20f5f4d7773d94 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 7291)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 29116)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.355072 | 98 | 0.581465 |
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr:
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr:
sub = [[], []]
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1):
continue
x += 1
assert(x < 2)
else:
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'):
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match:
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1:
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 7291)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 29116)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| true | true |
f7fc7b6039d08d325b6e4d393fb00d85bc6cc112 | 13,833 | py | Python | projects/TGS_salt/training/train1_all_Unet_scSE_hyper.py | liaopeiyuan/ml-arsenal-public | f8938ce3cb58b35fc7cc20d096c39a85ec9780b2 | [
"Apache-2.0"
] | 280 | 2018-10-21T01:07:18.000Z | 2021-12-30T11:29:48.000Z | projects/TGS_salt/training/train1_all_Unet_scSE_hyper.py | liaopeiyuan/ml-arsenal-public | f8938ce3cb58b35fc7cc20d096c39a85ec9780b2 | [
"Apache-2.0"
] | 3 | 2018-11-13T08:04:48.000Z | 2020-04-17T09:20:03.000Z | projects/TGS_salt/training/train1_all_Unet_scSE_hyper.py | liaopeiyuan/ml-arsenal-public | f8938ce3cb58b35fc7cc20d096c39a85ec9780b2 | [
"Apache-2.0"
] | 59 | 2018-10-21T04:38:23.000Z | 2021-03-29T07:58:47.000Z | import os
import sys
sys.path.append('../../')
from dependencies import *
from settings import *
from reproducibility import *
from models.TGS_salt.Unet34_scSE_hyper import Unet_scSE_hyper as Net
SIZE = 128
FACTOR = 128
FOLD = 1
ne = ""
initial_checkpoint = None#'/home/liaop20/data/salt/checkpoints/list_train'+str(FOLD)+'_3600/'#None#'/home/liaop20/data/salt/checkpoints/list_train6_3600_ne_balanced/ResNet34_25600151000_model.pth'#None
MODEL = "ResNet34_"
OHEM = "all_128"
PAD = 0
Y0, Y1, X0, X1 = PAD,PAD+SIZE,PAD,PAD+SIZE,
def time_to_str(time, str):
#if str == 'min':
# return str(round(float(time)/60,5))+" min(s)"
return round(time,4)
#TODO: Instead of directly printing to stdout, copy it into a txt file
class Logger():
def __init__(self,name=MODEL+OHEM+ne, fold=FOLD):
super().__init__()
self.fold=str(fold)
self.model=name
#if OHEM != "OHEM":
# self.model=MODEL+ne[ne.find("_")+1:]
self.file = open(self.fold+self.model+"_log.txt","w+")
self.file.close()
def write(self, str):
print(str)
self.file = open(self.fold+self.model+"_log.txt","a+")
self.file.write(str)
self.file.close()
def write2(self, str):
print(str, end='',flush=True)
self.file = open(self.fold+self.model+"_log.txt","a+")
self.file.write(str)
self.file.close()
def stop():
self.file.close()
def valid_augment(image,mask,index):
cache = Struct(image = image.copy(), mask = mask.copy())
image, mask = do_resize2(image, mask, SIZE, SIZE)
image, mask = do_center_pad_to_factor2(image, mask, factor = FACTOR)
return image,mask,index,cache
def train_augment(image,mask,index):
cache = Struct(image = image.copy(), mask = mask.copy())
if np.random.rand() < 0.5:
image, mask = do_horizontal_flip2(image, mask)
pass
if np.random.rand() < 0.5:
c = np.random.choice(4)
if c==0:
image, mask = do_random_shift_scale_crop_pad2(image, mask, 0.2) #0.125
if c==1:
image, mask = do_horizontal_shear2( image, mask, dx=np.random.uniform(-0.07,0.07) )
pass
if c==2:
image, mask = do_shift_scale_rotate2( image, mask, dx=0, dy=0, scale=1, angle=np.random.uniform(0,15)) #10
if c==3:
image, mask = do_elastic_transform2(image, mask, grid=10, distort=np.random.uniform(0,0.15))#0.10
pass
if np.random.rand() < 0.5:
c = np.random.choice(3)
if c==0:
image = do_brightness_shift(image,np.random.uniform(-0.1,+0.1))
if c==1:
image = do_brightness_multiply(image,np.random.uniform(1-0.08,1+0.08))
if c==2:
image = do_gamma(image,np.random.uniform(1-0.08,1+0.08))
# if c==1:
# image = do_invert_intensity(image)
image, mask = do_resize2(image, mask, SIZE, SIZE)
image, mask = do_center_pad_to_factor2(image, mask, factor = FACTOR)
return image,mask,index,cache
def validation( net, valid_loader ):
valid_num = 0
valid_loss = np.zeros(3,np.float32)
predicts = []
truths = []
for input, truth, index, cache in valid_loader:
input = input.cuda()
truth = truth.cuda()
with torch.no_grad():
logit = data_parallel(net,input) #net(input)
prob = F.sigmoid(logit)
loss = net.focal_loss(logit, truth, 1.0, 0.5, 0.25) + net.criterion(logit, truth)
dice = net.metric(logit, truth)
batch_size = len(index)
valid_loss += batch_size*np.array(( loss.item(), dice.item(), 0))
valid_num += batch_size
prob = prob [:,:,Y0:Y1, X0:X1]
truth = truth[:,:,Y0:Y1, X0:X1]
#prob = F.avg_pool2d(prob, kernel_size=2, stride=2)
#truth = F.avg_pool2d(truth, kernel_size=2, stride=2)
predicts.append(prob.data.cpu().numpy())
truths.append(truth.data.cpu().numpy())
assert(valid_num == len(valid_loader.sampler))
valid_loss = valid_loss/valid_num
#--------------------------------------------------------
predicts = np.concatenate(predicts).squeeze()
truths = np.concatenate(truths).squeeze()
precision, result, threshold = do_kaggle_metric(predicts, truths)
valid_loss[2] = precision.mean()
return valid_loss
def train(initial_checkpoint):
## setup -----------------
os.makedirs(CHECKPOINTS +'/checkpoint', exist_ok=True)
os.makedirs(CHECKPOINTS +'/train', exist_ok=True)
os.makedirs(CHECKPOINTS +'/backup', exist_ok=True)
log = Logger()
log.write('\n--- [START %s] %s\n\n' % (IDENTIFIER, '-' * 64))
log.write('\tSEED = %u\n' % SEED)
log.write('\tPROJECT_PATH = %s\n' % CODE)
log.write('\t__file__ = %s\n' % __file__)
log.write('\tRESULT = %s\n' % CHECKPOINTS)
log.write('\n')
log.write('\t<additional comments>\n')
log.write('\t ... \n')
log.write('\n')
## dataset ----------------------------------------
log.write('Configuring dataset...\n')
batch_size = 16
train_dataset = TGSDataset('list_train'+str(FOLD)+'_3600'+ne, train_augment, 'train')
os.makedirs(CHECKPOINTS +'/list_train'+str(FOLD)+'_3600'+ne, exist_ok=True)
train_loader = DataLoader(
train_dataset,
sampler = RandomSampler(train_dataset),
#sampler = ConstantSampler(train_dataset,[31]*batch_size*100),
batch_size = batch_size,
drop_last = True,
num_workers = 8,
pin_memory = True,
collate_fn = null_collate)
valid_dataset = TGSDataset('list_valid'+str(FOLD)+'_400'+ne, valid_augment, 'train')
valid_loader = DataLoader(
valid_dataset,
sampler = RandomSampler(valid_dataset),
batch_size = batch_size,
drop_last = False,
num_workers = 8,
pin_memory = True,
collate_fn = null_collate)
assert(len(train_dataset)>=batch_size)
log.write('batch_size = %d\n'%(batch_size))
log.write('train_dataset.split = %s\n'%(train_dataset.split))
log.write('valid_dataset.split = %s\n'%(valid_dataset.split))
log.write('\n')
#debug
if 0: #debug ##-------------------------------
for input, truth, index, cache in train_loader:
images = input.cpu().data.numpy().squeeze()
masks = truth.cpu().data.numpy().squeeze()
batch_size = len(index)
for b in range(batch_size):
image = images[b]*255
image = np.dstack([image,image,image])
mask = masks[b]
image_show('image',image,resize=2)
image_show_norm('mask', mask, max=1,resize=2)
overlay0 = draw_mask_overlay(mask, image, color=[0,0,255])
overlay0 = draw_mask_to_contour_overlay(mask, overlay0, 2, color=[0,0,255])
image_show('overlay0',overlay0,resize=2)
cv2.waitKey(0)
#--------------------------------------
## net ----------------------------------------
log.write('Configuring neural network...\n')
net = Net().cuda()
if initial_checkpoint is not None:
log.write('\tinitial_checkpoint = %s\n' % initial_checkpoint)
net.load_state_dict(torch.load(initial_checkpoint, map_location=lambda storage, loc: storage))
log.write("The net is an instance of {}.".format(type(net)))
log.write('\n')
## optimiser ----------------------------------
num_iters = 300 *1000
iter_smooth = 20
iter_log = 50
iter_valid = 100
iter_save = [0, num_iters-1]\
+ list(range(0,num_iters,500))#1*1000
FREEZE=False
#------------------------------------------------------
if FREEZE: ##freeze
for p in net.feature_net.parameters():
p.requires_grad = False
#------------------------------------------------------
scheduler = lambda x: (0.01/2)*(np.cos(PI*(np.mod(x-1,300*1000/30)/(300*1000/30)))+1)
#log.write(scheduler(1))
#log.write(scheduler(5000))
#log.write(scheduler(10001))
optimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()),
lr=0.01, momentum=0.9, weight_decay=0.0001)
start_iter = 0
start_epoch= 0
if initial_checkpoint is not None:
checkpoint = torch.load(initial_checkpoint.replace('_model.pth','_optimizer.pth'))
start_iter = checkpoint['iter' ]
start_epoch = checkpoint['epoch']
rate = get_learning_rate(optimizer) #load all except learning rate
optimizer.load_state_dict(checkpoint['optimizer'])
adjust_learning_rate(optimizer, rate)
pass
## start training here! ##############################################
log.write('Start training...\n')
log.write(' rate iter epoch | valid_loss | train_loss | batch_loss | time \n')
log.write('-------------------------------------------------------------------------------------------------------------------------------\n')
train_loss = np.zeros(6,np.float32)
valid_loss = np.zeros(6,np.float32)
batch_loss = np.zeros(6,np.float32)
rate = 0
iter = 0
i = 0
start = timer()
while iter<num_iters: # loop over the dataset multiple times
sum_train_loss = np.zeros(6,np.float32)
sum = 0
optimizer.zero_grad()
for input, truth, index, cache in train_loader:
if 0: #debug ##-------------------------------
image = input.cpu().data.numpy().squeeze()
mask = truth.cpu().data.numpy().squeeze()
batch_size = len(index)
for b in range(batch_size):
image_show_norm('image',image[b],max=1,resize=2)
image_show_norm('mask', mask[b], max=1,resize=2)
cv2.waitKey(0)
#--------------------------------------
len_train_dataset = len(train_dataset)
batch_size = len(index)
iter = i + start_iter
epoch = (iter-start_iter)*batch_size/len_train_dataset + start_epoch
num_samples = epoch*len_train_dataset
if iter % iter_valid==0:
net.set_mode('valid')
valid_loss = validation(net, valid_loader)
net.set_mode('train')
log.write2('\r')
log.write('%0.4f %5.1f %6.1f | %0.3f %0.3f (%0.3f) | %0.3f %0.3f | %0.3f %0.3f | %s \n' % (\
rate, iter/1000, epoch,
valid_loss[0], valid_loss[1], valid_loss[2],
train_loss[0], train_loss[1],
batch_loss[0], batch_loss[1],
time_to_str((timer() - start),'min')))
time.sleep(0.01)
if iter in iter_save:
torch.save(net.state_dict(),CHECKPOINTS+"/"+train_dataset.split+'/'+MODEL+OHEM+'%08d_model.pth'%(iter))
torch.save({
'optimizer': optimizer.state_dict(),
'iter' : iter,
'epoch' : epoch,
}, CHECKPOINTS+"/"+train_dataset.split+'/'+MODEL+OHEM+'%08d_optimizer.pth'%(iter))
pass
# learning rate schduler -------------
if scheduler is not None:
#scheduler.batch_step()
lr = scheduler(iter)
if lr<0 : break
adjust_learning_rate(optimizer, lr)
rate = get_learning_rate(optimizer)
#rate = 0.01
# one iteration update -------------
#net.set_mode('train',is_freeze_bn=True)
net.set_mode('train')
input = input.cuda()
truth = truth.cuda()
logit = data_parallel(net,input) #net(input)
if OHEM == "OHEM":
loss = net.focal_loss(logit, truth, 1.0, 0.5, 0.25) + net.criterion(logit, truth)
else:
loss = net.criterion(logit, truth)
dice = net.metric(logit, truth)
loss.backward()
optimizer.step()
optimizer.zero_grad()
#torch.nn.utils.clip_grad_norm(net.parameters(), 1)
# print statistics ------------
batch_loss = np.array((
loss.item(),
dice.item(),
0, 0, 0, 0,
))
sum_train_loss += batch_loss
sum += 1
if iter%iter_smooth == 0:
train_loss = sum_train_loss/sum
sum_train_loss = np.zeros(6,np.float32)
sum = 0
log.write2('\r%0.4f %5.1f %6.1f | %0.3f %0.3f (%0.3f) | %0.3f %0.3f | %0.3f %0.3f | %s ' % (\
rate, iter/1000, epoch,
valid_loss[0], valid_loss[1], valid_loss[2],
train_loss[0], train_loss[1],
batch_loss[0], batch_loss[1],
time_to_str((timer() - start), 'min')))
i=i+1
pass #-- end of one data loader --
pass #-- end of all iterations --
log.write('\n')
if __name__ == '__main__':
print("Training U-Net with hypercolumn concatenation and spatial/channel-wise excitation...")
train(initial_checkpoint)
print('\tFinished!')
| 35.836788 | 201 | 0.52382 | import os
import sys
sys.path.append('../../')
from dependencies import *
from settings import *
from reproducibility import *
from models.TGS_salt.Unet34_scSE_hyper import Unet_scSE_hyper as Net
SIZE = 128
FACTOR = 128
FOLD = 1
ne = ""
initial_checkpoint = Noneold=FOLD):
super().__init__()
self.fold=str(fold)
self.model=name
self.file = open(self.fold+self.model+"_log.txt","w+")
self.file.close()
def write(self, str):
print(str)
self.file = open(self.fold+self.model+"_log.txt","a+")
self.file.write(str)
self.file.close()
def write2(self, str):
print(str, end='',flush=True)
self.file = open(self.fold+self.model+"_log.txt","a+")
self.file.write(str)
self.file.close()
def stop():
self.file.close()
def valid_augment(image,mask,index):
cache = Struct(image = image.copy(), mask = mask.copy())
image, mask = do_resize2(image, mask, SIZE, SIZE)
image, mask = do_center_pad_to_factor2(image, mask, factor = FACTOR)
return image,mask,index,cache
def train_augment(image,mask,index):
cache = Struct(image = image.copy(), mask = mask.copy())
if np.random.rand() < 0.5:
image, mask = do_horizontal_flip2(image, mask)
pass
if np.random.rand() < 0.5:
c = np.random.choice(4)
if c==0:
image, mask = do_random_shift_scale_crop_pad2(image, mask, 0.2)
if c==1:
image, mask = do_horizontal_shear2( image, mask, dx=np.random.uniform(-0.07,0.07) )
pass
if c==2:
image, mask = do_shift_scale_rotate2( image, mask, dx=0, dy=0, scale=1, angle=np.random.uniform(0,15))
if c==3:
image, mask = do_elastic_transform2(image, mask, grid=10, distort=np.random.uniform(0,0.15))
pass
if np.random.rand() < 0.5:
c = np.random.choice(3)
if c==0:
image = do_brightness_shift(image,np.random.uniform(-0.1,+0.1))
if c==1:
image = do_brightness_multiply(image,np.random.uniform(1-0.08,1+0.08))
if c==2:
image = do_gamma(image,np.random.uniform(1-0.08,1+0.08))
image, mask = do_resize2(image, mask, SIZE, SIZE)
image, mask = do_center_pad_to_factor2(image, mask, factor = FACTOR)
return image,mask,index,cache
def validation( net, valid_loader ):
valid_num = 0
valid_loss = np.zeros(3,np.float32)
predicts = []
truths = []
for input, truth, index, cache in valid_loader:
input = input.cuda()
truth = truth.cuda()
with torch.no_grad():
logit = data_parallel(net,input)
prob = F.sigmoid(logit)
loss = net.focal_loss(logit, truth, 1.0, 0.5, 0.25) + net.criterion(logit, truth)
dice = net.metric(logit, truth)
batch_size = len(index)
valid_loss += batch_size*np.array(( loss.item(), dice.item(), 0))
valid_num += batch_size
prob = prob [:,:,Y0:Y1, X0:X1]
truth = truth[:,:,Y0:Y1, X0:X1]
predicts.append(prob.data.cpu().numpy())
truths.append(truth.data.cpu().numpy())
assert(valid_num == len(valid_loader.sampler))
valid_loss = valid_loss/valid_num
predicts = np.concatenate(predicts).squeeze()
truths = np.concatenate(truths).squeeze()
precision, result, threshold = do_kaggle_metric(predicts, truths)
valid_loss[2] = precision.mean()
return valid_loss
def train(initial_checkpoint):
TS +'/checkpoint', exist_ok=True)
os.makedirs(CHECKPOINTS +'/train', exist_ok=True)
os.makedirs(CHECKPOINTS +'/backup', exist_ok=True)
log = Logger()
log.write('\n--- [START %s] %s\n\n' % (IDENTIFIER, '-' * 64))
log.write('\tSEED = %u\n' % SEED)
log.write('\tPROJECT_PATH = %s\n' % CODE)
log.write('\t__file__ = %s\n' % __file__)
log.write('\tRESULT = %s\n' % CHECKPOINTS)
log.write('\n')
log.write('\t<additional comments>\n')
log.write('\t ... \n')
log.write('\n')
ch_size = 16
train_dataset = TGSDataset('list_train'+str(FOLD)+'_3600'+ne, train_augment, 'train')
os.makedirs(CHECKPOINTS +'/list_train'+str(FOLD)+'_3600'+ne, exist_ok=True)
train_loader = DataLoader(
train_dataset,
sampler = RandomSampler(train_dataset),
batch_size = batch_size,
drop_last = True,
num_workers = 8,
pin_memory = True,
collate_fn = null_collate)
valid_dataset = TGSDataset('list_valid'+str(FOLD)+'_400'+ne, valid_augment, 'train')
valid_loader = DataLoader(
valid_dataset,
sampler = RandomSampler(valid_dataset),
batch_size = batch_size,
drop_last = False,
num_workers = 8,
pin_memory = True,
collate_fn = null_collate)
assert(len(train_dataset)>=batch_size)
log.write('batch_size = %d\n'%(batch_size))
log.write('train_dataset.split = %s\n'%(train_dataset.split))
log.write('valid_dataset.split = %s\n'%(valid_dataset.split))
log.write('\n')
if 0: images = input.cpu().data.numpy().squeeze()
masks = truth.cpu().data.numpy().squeeze()
batch_size = len(index)
for b in range(batch_size):
image = images[b]*255
image = np.dstack([image,image,image])
mask = masks[b]
image_show('image',image,resize=2)
image_show_norm('mask', mask, max=1,resize=2)
overlay0 = draw_mask_overlay(mask, image, color=[0,0,255])
overlay0 = draw_mask_to_contour_overlay(mask, overlay0, 2, color=[0,0,255])
image_show('overlay0',overlay0,resize=2)
cv2.waitKey(0)
n')
net = Net().cuda()
if initial_checkpoint is not None:
log.write('\tinitial_checkpoint = %s\n' % initial_checkpoint)
net.load_state_dict(torch.load(initial_checkpoint, map_location=lambda storage, loc: storage))
log.write("The net is an instance of {}.".format(type(net)))
log.write('\n')
= 20
iter_log = 50
iter_valid = 100
iter_save = [0, num_iters-1]\
+ list(range(0,num_iters,500))
FREEZE=False
if FREEZE: for p in net.feature_net.parameters():
p.requires_grad = False
scheduler = lambda x: (0.01/2)*(np.cos(PI*(np.mod(x-1,300*1000/30)/(300*1000/30)))+1)
optimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()),
lr=0.01, momentum=0.9, weight_decay=0.0001)
start_iter = 0
start_epoch= 0
if initial_checkpoint is not None:
checkpoint = torch.load(initial_checkpoint.replace('_model.pth','_optimizer.pth'))
start_iter = checkpoint['iter' ]
start_epoch = checkpoint['epoch']
rate = get_learning_rate(optimizer)
optimizer.load_state_dict(checkpoint['optimizer'])
adjust_learning_rate(optimizer, rate)
pass
tart_iter
epoch = (iter-start_iter)*batch_size/len_train_dataset + start_epoch
num_samples = epoch*len_train_dataset
if iter % iter_valid==0:
net.set_mode('valid')
valid_loss = validation(net, valid_loader)
net.set_mode('train')
log.write2('\r')
log.write('%0.4f %5.1f %6.1f | %0.3f %0.3f (%0.3f) | %0.3f %0.3f | %0.3f %0.3f | %s \n' % (\
rate, iter/1000, epoch,
valid_loss[0], valid_loss[1], valid_loss[2],
train_loss[0], train_loss[1],
batch_loss[0], batch_loss[1],
time_to_str((timer() - start),'min')))
time.sleep(0.01)
if iter in iter_save:
torch.save(net.state_dict(),CHECKPOINTS+"/"+train_dataset.split+'/'+MODEL+OHEM+'%08d_model.pth'%(iter))
torch.save({
'optimizer': optimizer.state_dict(),
'iter' : iter,
'epoch' : epoch,
}, CHECKPOINTS+"/"+train_dataset.split+'/'+MODEL+OHEM+'%08d_optimizer.pth'%(iter))
pass
if scheduler is not None:
lr = scheduler(iter)
if lr<0 : break
adjust_learning_rate(optimizer, lr)
rate = get_learning_rate(optimizer)
net.set_mode('train')
input = input.cuda()
truth = truth.cuda()
logit = data_parallel(net,input)
if OHEM == "OHEM":
loss = net.focal_loss(logit, truth, 1.0, 0.5, 0.25) + net.criterion(logit, truth)
else:
loss = net.criterion(logit, truth)
dice = net.metric(logit, truth)
loss.backward()
optimizer.step()
optimizer.zero_grad()
batch_loss = np.array((
loss.item(),
dice.item(),
0, 0, 0, 0,
))
sum_train_loss += batch_loss
sum += 1
if iter%iter_smooth == 0:
train_loss = sum_train_loss/sum
sum_train_loss = np.zeros(6,np.float32)
sum = 0
log.write2('\r%0.4f %5.1f %6.1f | %0.3f %0.3f (%0.3f) | %0.3f %0.3f | %0.3f %0.3f | %s ' % (\
rate, iter/1000, epoch,
valid_loss[0], valid_loss[1], valid_loss[2],
train_loss[0], train_loss[1],
batch_loss[0], batch_loss[1],
time_to_str((timer() - start), 'min')))
i=i+1
pass
pass
log.write('\n')
if __name__ == '__main__':
print("Training U-Net with hypercolumn concatenation and spatial/channel-wise excitation...")
train(initial_checkpoint)
print('\tFinished!')
| true | true |
f7fc7bcf72b628a4ccce95214c12abee5e811d3e | 665 | py | Python | migrations/versions/07f6e404201c_.py | tomasfarias/fariasweb | ec908ef8068b0019cb04ab1c96f738c11224fa03 | [
"MIT"
] | 2 | 2019-07-25T02:25:18.000Z | 2019-08-30T04:00:52.000Z | migrations/versions/07f6e404201c_.py | tomasfarias/fariasweb | ec908ef8068b0019cb04ab1c96f738c11224fa03 | [
"MIT"
] | 1 | 2019-08-29T02:12:51.000Z | 2019-08-29T02:12:51.000Z | migrations/versions/07f6e404201c_.py | tomasfarias/fariasweb | ec908ef8068b0019cb04ab1c96f738c11224fa03 | [
"MIT"
] | null | null | null | """empty message
Revision ID: 07f6e404201c
Revises: 90c05db34e87
Create Date: 2019-04-02 14:16:58.606184
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '07f6e404201c'
down_revision = '90c05db34e87'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_post_url'), 'post', ['url'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_post_url'), table_name='post')
# ### end Alembic commands ###
| 22.931034 | 71 | 0.688722 | from alembic import op
import sqlalchemy as sa
revision = '07f6e404201c'
down_revision = '90c05db34e87'
branch_labels = None
depends_on = None
def upgrade():
| true | true |
f7fc7beea028640e622f981e3f2fe9aa8129f1fa | 5,719 | py | Python | intersight/models/storage_physical_port_ref.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 21 | 2018-03-29T14:20:35.000Z | 2021-10-13T05:11:41.000Z | intersight/models/storage_physical_port_ref.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 14 | 2018-01-30T15:45:46.000Z | 2022-02-23T14:23:21.000Z | intersight/models/storage_physical_port_ref.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 18 | 2018-01-03T15:09:56.000Z | 2021-07-16T02:21:54.000Z | # coding: utf-8
"""
Cisco Intersight OpenAPI specification.
The Cisco Intersight OpenAPI specification.
OpenAPI spec version: 1.0.9-1461
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class StoragePhysicalPortRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'object_type': 'str',
'moid': 'str',
'selector': 'str'
}
attribute_map = {
'object_type': 'ObjectType',
'moid': 'Moid',
'selector': 'Selector'
}
def __init__(self, object_type=None, moid=None, selector=None):
"""
StoragePhysicalPortRef - a model defined in Swagger
"""
self._object_type = None
self._moid = None
self._selector = None
if object_type is not None:
self.object_type = object_type
if moid is not None:
self.moid = moid
if selector is not None:
self.selector = selector
@property
def object_type(self):
"""
Gets the object_type of this StoragePhysicalPortRef.
The Object Type of the referenced REST resource.
:return: The object_type of this StoragePhysicalPortRef.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this StoragePhysicalPortRef.
The Object Type of the referenced REST resource.
:param object_type: The object_type of this StoragePhysicalPortRef.
:type: str
"""
self._object_type = object_type
@property
def moid(self):
"""
Gets the moid of this StoragePhysicalPortRef.
The Moid of the referenced REST resource.
:return: The moid of this StoragePhysicalPortRef.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this StoragePhysicalPortRef.
The Moid of the referenced REST resource.
:param moid: The moid of this StoragePhysicalPortRef.
:type: str
"""
self._moid = moid
@property
def selector(self):
"""
Gets the selector of this StoragePhysicalPortRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:return: The selector of this StoragePhysicalPortRef.
:rtype: str
"""
return self._selector
@selector.setter
def selector(self, selector):
"""
Sets the selector of this StoragePhysicalPortRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:param selector: The selector of this StoragePhysicalPortRef.
:type: str
"""
self._selector = selector
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, StoragePhysicalPortRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 31.423077 | 576 | 0.606749 |
from pprint import pformat
from six import iteritems
import re
class StoragePhysicalPortRef(object):
swagger_types = {
'object_type': 'str',
'moid': 'str',
'selector': 'str'
}
attribute_map = {
'object_type': 'ObjectType',
'moid': 'Moid',
'selector': 'Selector'
}
def __init__(self, object_type=None, moid=None, selector=None):
self._object_type = None
self._moid = None
self._selector = None
if object_type is not None:
self.object_type = object_type
if moid is not None:
self.moid = moid
if selector is not None:
self.selector = selector
@property
def object_type(self):
return self._object_type
@object_type.setter
def object_type(self, object_type):
self._object_type = object_type
@property
def moid(self):
return self._moid
@moid.setter
def moid(self, moid):
self._moid = moid
@property
def selector(self):
return self._selector
@selector.setter
def selector(self, selector):
self._selector = selector
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, StoragePhysicalPortRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7fc7c7780d7a6562770da748b0838e0f4cae4ca | 5,403 | py | Python | ecommerce/migrations/0001_initial.py | umarmughal824/bootcamp-ecommerce | 681bcc788a66867b8f240790c0ed33680b73932b | [
"BSD-3-Clause"
] | 2 | 2018-06-20T19:37:03.000Z | 2021-01-06T09:51:40.000Z | ecommerce/migrations/0001_initial.py | mitodl/bootcamp-ecommerce | ba7d6aefe56c6481ae2a5afc84cdd644538b6d50 | [
"BSD-3-Clause"
] | 1,226 | 2017-02-23T14:52:28.000Z | 2022-03-29T13:19:54.000Z | ecommerce/migrations/0001_initial.py | umarmughal824/bootcamp-ecommerce | 681bcc788a66867b8f240790c0ed33680b73932b | [
"BSD-3-Clause"
] | 3 | 2017-03-20T03:51:27.000Z | 2021-03-19T15:54:31.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-22 17:58
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [
migrations.CreateModel(
name="Line",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_on", models.DateTimeField(auto_now_add=True)),
("updated_on", models.DateTimeField(auto_now=True)),
("klasse_id", models.TextField()),
("price", models.DecimalField(decimal_places=2, max_digits=20)),
("description", models.TextField()),
],
options={"abstract": False},
),
migrations.CreateModel(
name="Order",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_on", models.DateTimeField(auto_now_add=True)),
("updated_on", models.DateTimeField(auto_now=True)),
(
"status",
models.CharField(
choices=[
("created", "created"),
("fulfilled", "fulfilled"),
("failed", "failed"),
("refunded", "refunded"),
],
default="created",
max_length=30,
),
),
(
"total_price_paid",
models.DecimalField(decimal_places=2, max_digits=20),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="OrderAudit",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_on", models.DateTimeField(auto_now_add=True)),
("updated_on", models.DateTimeField(auto_now=True)),
(
"data_before",
django.contrib.postgres.fields.jsonb.JSONField(
blank=True, null=True
),
),
(
"data_after",
django.contrib.postgres.fields.jsonb.JSONField(
blank=True, null=True
),
),
(
"acting_user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
(
"order",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="ecommerce.Order",
),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="Receipt",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_on", models.DateTimeField(auto_now_add=True)),
("updated_on", models.DateTimeField(auto_now=True)),
("data", django.contrib.postgres.fields.jsonb.JSONField()),
(
"order",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="ecommerce.Order",
),
),
],
options={"abstract": False},
),
migrations.AddField(
model_name="line",
name="order",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="ecommerce.Order"
),
),
]
| 34.196203 | 81 | 0.393855 |
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [
migrations.CreateModel(
name="Line",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_on", models.DateTimeField(auto_now_add=True)),
("updated_on", models.DateTimeField(auto_now=True)),
("klasse_id", models.TextField()),
("price", models.DecimalField(decimal_places=2, max_digits=20)),
("description", models.TextField()),
],
options={"abstract": False},
),
migrations.CreateModel(
name="Order",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_on", models.DateTimeField(auto_now_add=True)),
("updated_on", models.DateTimeField(auto_now=True)),
(
"status",
models.CharField(
choices=[
("created", "created"),
("fulfilled", "fulfilled"),
("failed", "failed"),
("refunded", "refunded"),
],
default="created",
max_length=30,
),
),
(
"total_price_paid",
models.DecimalField(decimal_places=2, max_digits=20),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="OrderAudit",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_on", models.DateTimeField(auto_now_add=True)),
("updated_on", models.DateTimeField(auto_now=True)),
(
"data_before",
django.contrib.postgres.fields.jsonb.JSONField(
blank=True, null=True
),
),
(
"data_after",
django.contrib.postgres.fields.jsonb.JSONField(
blank=True, null=True
),
),
(
"acting_user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
(
"order",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="ecommerce.Order",
),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="Receipt",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_on", models.DateTimeField(auto_now_add=True)),
("updated_on", models.DateTimeField(auto_now=True)),
("data", django.contrib.postgres.fields.jsonb.JSONField()),
(
"order",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="ecommerce.Order",
),
),
],
options={"abstract": False},
),
migrations.AddField(
model_name="line",
name="order",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="ecommerce.Order"
),
),
]
| true | true |
f7fc7d70e0d022f438642dceac16636cba308c45 | 4,203 | py | Python | tasks.py | EuroPython/ep-tools | 3b7efce56a11625a5560313880aa6737a5afc16f | [
"MIT"
] | 3 | 2018-08-02T19:17:14.000Z | 2021-08-03T10:19:16.000Z | tasks.py | EuroPython/ep-tools | 3b7efce56a11625a5560313880aa6737a5afc16f | [
"MIT"
] | 5 | 2016-03-08T14:38:43.000Z | 2017-05-07T10:11:55.000Z | tasks.py | EuroPython/ep-tools | 3b7efce56a11625a5560313880aa6737a5afc16f | [
"MIT"
] | 4 | 2016-02-15T21:15:50.000Z | 2018-07-10T15:38:35.000Z | """
Invoke tasks to be run from the command line.
"""
import os
from invoke import task
from eptools import talks, people
from eptools.gspread_utils import get_api_key_file
from eptools.config import (
conference,
sponsors_billing_worksheet,
finaid_submissions_worksheet
)
@task
def sponsor_agreement(ctx, company_name, output_dir, template_file="", api_key_file=""):
""" Call docstamp to produce a sponsor agreement for `company_name`
using `template_file`. The output will be saved in `output_dir`.
Parameters
----------
company_name: str
Can be a substring of the company name in the spreadsheet.
template_file: str
output_dir: str
api_key_file: str
The path to the Google Credentials json file.
If left empty will try to look for its path in the config.py file.
"""
from eptools.sponsors import (
get_sponsor,
get_sponsors_ws_data,
create_sponsor_agreement,
contract_template,
company_name_column,
)
if not template_file:
template_file = contract_template
if not api_key_file:
api_key_file = get_api_key_file()
output_dir = os.path.abspath(output_dir)
responses = get_sponsors_ws_data(api_key_file=api_key_file, doc_key=sponsors_billing_worksheet[0])
try:
sponsor_data = get_sponsor(sponsor_name=company_name, sponsors=responses, col_name=company_name_column)
except Exception:
raise KeyError("Could not find data for sponsor {}.".format(company_name))
else:
fpath = create_sponsor_agreement(sponsor_data, template_file=template_file, output_dir=output_dir)
print("Created {}.".format(fpath))
@task
def finaid_receipt(ctx, applicant_name, output_dir, template_file="", api_key_file=""):
""" Call docstamp to produce a financial aid receipt
for `applicant_name` using `template_file`.
The output will be saved in `output_dir`.
Parameters
----------
applicant_name: str
template_file: str
output_dir: str
api_key_file: str
Path to the Google credentials json file.
If left empty will try to look for its path in the config.py file.
"""
from eptools.finaid import get_finaid_ws_data, get_applicant, receipt_template_spa, create_receipt
if not template_file:
template_file = receipt_template_spa
if not api_key_file:
api_key_file = get_api_key_file()
output_dir = os.path.abspath(output_dir)
responses = get_finaid_ws_data(api_key_file=api_key_file, doc_key=finaid_submissions_worksheet[0])
try:
applicant_data = get_applicant(applicant_name=applicant_name, submissions=responses, col_name="full_name")
except Exception:
raise KeyError("Could not find data for applicant {}.".format(applicant_name))
else:
fpath = create_receipt(applicant_data, template_file=template_file, output_dir=output_dir)
print("Created {}.".format(fpath))
@task
def fetch_ticket_profiles(ctx, out_filepath, conf=conference, status="all", nondups=False, raise_=False, ticket_id=""):
""" Create a json file with the all the tickets of the conference.
make_option('--status',
choices=['all', 'complete', 'incomplete'],
help='Status of the orders related with the tickets.',
make_option('--nondups',
help='If enables will remove the tickets with '
'same owner/email.',
make_option('--raise',
help='If enabled will raise any error that it may find.',
make_option('--ticket-id',
help='Will output the profile of the given ticket only.',
"""
return people.fetch_files(out_filepath, conf=conf, status=status, nondups=nondups, raise_=raise_, ticket_id=ticket_id)
@task
def fetch_talks_json(ctx, out_filepath="", status="proposed", conf=conference, host="europython.io", with_votes=False):
""" Return the talks in a json format. `status` choices: ['accepted', 'proposed']
"""
return talks.fetch_talks_json(out_filepath=out_filepath, status=status, conf=conf, host=host, with_votes=with_votes)
| 33.357143 | 122 | 0.692363 | import os
from invoke import task
from eptools import talks, people
from eptools.gspread_utils import get_api_key_file
from eptools.config import (
conference,
sponsors_billing_worksheet,
finaid_submissions_worksheet
)
@task
def sponsor_agreement(ctx, company_name, output_dir, template_file="", api_key_file=""):
from eptools.sponsors import (
get_sponsor,
get_sponsors_ws_data,
create_sponsor_agreement,
contract_template,
company_name_column,
)
if not template_file:
template_file = contract_template
if not api_key_file:
api_key_file = get_api_key_file()
output_dir = os.path.abspath(output_dir)
responses = get_sponsors_ws_data(api_key_file=api_key_file, doc_key=sponsors_billing_worksheet[0])
try:
sponsor_data = get_sponsor(sponsor_name=company_name, sponsors=responses, col_name=company_name_column)
except Exception:
raise KeyError("Could not find data for sponsor {}.".format(company_name))
else:
fpath = create_sponsor_agreement(sponsor_data, template_file=template_file, output_dir=output_dir)
print("Created {}.".format(fpath))
@task
def finaid_receipt(ctx, applicant_name, output_dir, template_file="", api_key_file=""):
from eptools.finaid import get_finaid_ws_data, get_applicant, receipt_template_spa, create_receipt
if not template_file:
template_file = receipt_template_spa
if not api_key_file:
api_key_file = get_api_key_file()
output_dir = os.path.abspath(output_dir)
responses = get_finaid_ws_data(api_key_file=api_key_file, doc_key=finaid_submissions_worksheet[0])
try:
applicant_data = get_applicant(applicant_name=applicant_name, submissions=responses, col_name="full_name")
except Exception:
raise KeyError("Could not find data for applicant {}.".format(applicant_name))
else:
fpath = create_receipt(applicant_data, template_file=template_file, output_dir=output_dir)
print("Created {}.".format(fpath))
@task
def fetch_ticket_profiles(ctx, out_filepath, conf=conference, status="all", nondups=False, raise_=False, ticket_id=""):
return people.fetch_files(out_filepath, conf=conf, status=status, nondups=nondups, raise_=raise_, ticket_id=ticket_id)
@task
def fetch_talks_json(ctx, out_filepath="", status="proposed", conf=conference, host="europython.io", with_votes=False):
return talks.fetch_talks_json(out_filepath=out_filepath, status=status, conf=conf, host=host, with_votes=with_votes)
| true | true |
f7fc7dd9bbeac171283b48484e35cdf8acf5d5ff | 4,555 | py | Python | solo/args/setup.py | wx-b/solo-learn | a516747e40a17050976c90e72b16a63ffcef42d6 | [
"MIT"
] | null | null | null | solo/args/setup.py | wx-b/solo-learn | a516747e40a17050976c90e72b16a63ffcef42d6 | [
"MIT"
] | null | null | null | solo/args/setup.py | wx-b/solo-learn | a516747e40a17050976c90e72b16a63ffcef42d6 | [
"MIT"
] | null | null | null | # Copyright 2021 solo-learn development team.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import argparse
import pytorch_lightning as pl
from solo.args.dataset import (
augmentations_args,
custom_dataset_args,
dataset_args,
linear_augmentations_args,
)
from solo.args.utils import additional_setup_linear, additional_setup_pretrain
from solo.methods import METHODS
from solo.utils.checkpointer import Checkpointer
try:
from solo.utils.auto_umap import AutoUMAP
except ImportError:
_umap_available = False
else:
_umap_available = True
def parse_args_pretrain() -> argparse.Namespace:
"""Parses dataset, augmentation, pytorch lightning, model specific and additional args.
First adds shared args such as dataset, augmentation and pytorch lightning args, then pulls the
model name from the command and proceeds to add model specific args from the desired class. If
wandb is enabled, it adds checkpointer args. Finally, adds additional non-user given parameters.
Returns:
argparse.Namespace: a namespace containing all args needed for pretraining.
"""
parser = argparse.ArgumentParser()
# add shared arguments
dataset_args(parser)
augmentations_args(parser)
custom_dataset_args(parser)
# add pytorch lightning trainer args
parser = pl.Trainer.add_argparse_args(parser)
# add method-specific arguments
parser.add_argument("--method", type=str)
# THIS LINE IS KEY TO PULL THE MODEL NAME
temp_args, _ = parser.parse_known_args()
# add model specific args
parser = METHODS[temp_args.method].add_model_specific_args(parser)
# add auto checkpoint/umap args
parser.add_argument("--save_checkpoint", action="store_true")
parser.add_argument("--auto_umap", action="store_true")
temp_args, _ = parser.parse_known_args()
# optionally add checkpointer and AutoUMAP args
if temp_args.save_checkpoint:
parser = Checkpointer.add_checkpointer_args(parser)
if _umap_available and temp_args.auto_umap:
parser = AutoUMAP.add_auto_umap_args(parser)
# parse args
args = parser.parse_args()
# prepare arguments with additional setup
additional_setup_pretrain(args)
return args
def parse_args_linear() -> argparse.Namespace:
"""Parses feature extractor, dataset, pytorch lightning, linear eval specific and additional args.
First adds and arg for the pretrained feature extractor, then adds dataset, pytorch lightning
and linear eval specific args. If wandb is enabled, it adds checkpointer args. Finally, adds
additional non-user given parameters.
Returns:
argparse.Namespace: a namespace containing all args needed for pretraining.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--pretrained_feature_extractor", type=str)
# add shared arguments
dataset_args(parser)
linear_augmentations_args(parser)
custom_dataset_args(parser)
# add pytorch lightning trainer args
parser = pl.Trainer.add_argparse_args(parser)
# linear model
parser = METHODS["linear"].add_model_specific_args(parser)
# THIS LINE IS KEY TO PULL WANDB AND SAVE_CHECKPOINT
parser.add_argument("--save_checkpoint", action="store_true")
temp_args, _ = parser.parse_known_args()
# optionally add checkpointer
if temp_args.save_checkpoint:
parser = Checkpointer.add_checkpointer_args(parser)
# parse args
args = parser.parse_args()
additional_setup_linear(args)
return args
| 34.770992 | 102 | 0.753897 |
import argparse
import pytorch_lightning as pl
from solo.args.dataset import (
augmentations_args,
custom_dataset_args,
dataset_args,
linear_augmentations_args,
)
from solo.args.utils import additional_setup_linear, additional_setup_pretrain
from solo.methods import METHODS
from solo.utils.checkpointer import Checkpointer
try:
from solo.utils.auto_umap import AutoUMAP
except ImportError:
_umap_available = False
else:
_umap_available = True
def parse_args_pretrain() -> argparse.Namespace:
parser = argparse.ArgumentParser()
dataset_args(parser)
augmentations_args(parser)
custom_dataset_args(parser)
parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument("--method", type=str)
temp_args, _ = parser.parse_known_args()
parser = METHODS[temp_args.method].add_model_specific_args(parser)
parser.add_argument("--save_checkpoint", action="store_true")
parser.add_argument("--auto_umap", action="store_true")
temp_args, _ = parser.parse_known_args()
if temp_args.save_checkpoint:
parser = Checkpointer.add_checkpointer_args(parser)
if _umap_available and temp_args.auto_umap:
parser = AutoUMAP.add_auto_umap_args(parser)
args = parser.parse_args()
additional_setup_pretrain(args)
return args
def parse_args_linear() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("--pretrained_feature_extractor", type=str)
dataset_args(parser)
linear_augmentations_args(parser)
custom_dataset_args(parser)
parser = pl.Trainer.add_argparse_args(parser)
parser = METHODS["linear"].add_model_specific_args(parser)
parser.add_argument("--save_checkpoint", action="store_true")
temp_args, _ = parser.parse_known_args()
if temp_args.save_checkpoint:
parser = Checkpointer.add_checkpointer_args(parser)
args = parser.parse_args()
additional_setup_linear(args)
return args
| true | true |
f7fc7e26a78cf93f54fea2297185f5131d6287f3 | 3,790 | py | Python | drf_registration/utils/email.py | rti/drf-registration | 0d631730e1730a7778398f4c1e811ca0df57e260 | [
"MIT"
] | 1 | 2020-12-07T04:44:51.000Z | 2020-12-07T04:44:51.000Z | drf_registration/utils/email.py | cunguyendev/drf-registration | 2a9e5ffbffa23bdc787c8363bdd0ffd170cf6bb6 | [
"MIT"
] | null | null | null | drf_registration/utils/email.py | cunguyendev/drf-registration | 2a9e5ffbffa23bdc787c8363bdd0ffd170cf6bb6 | [
"MIT"
] | null | null | null | from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.conf import settings
from django.urls import reverse
from drf_registration.constants import DEFAULT_EMAIL_BODY
from drf_registration.tokens import activation_token, reset_password_token
from drf_registration.utils.users import (
has_user_activate_token,
has_user_verify_code,
has_user_verified,
generate_uid_and_token,
)
from drf_registration.settings import drfr_settings
def send_verify_email(user, domain=''):
"""
Send verify email to user's valid email
Args:
user (object): The user instance
"""
if has_user_activate_token():
send_activate_token_email(user, domain)
if has_user_verify_code():
send_verify_code_email(user)
def send_activate_token_email(user, domain):
"""
Send activate token to user email
Args:
user (object): The user instance
domain (string): The current domain
"""
# Get activate link
activate_link = domain + \
reverse('activate', kwargs=generate_uid_and_token(user, activation_token))
# Default template message
default_message = DEFAULT_EMAIL_BODY['ACTIVATE'].format(activate_link=activate_link)
html_template = drfr_settings.USER_ACTIVATE_EMAIL_TEMPLATE
html_message = render_to_string(
html_template, {
'activate_link': activate_link,
'domain': domain
}
) if html_template else None
send_mail(
subject=drfr_settings.USER_ACTIVATE_EMAIL_SUBJECT,
message='',
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[user.email,],
html_message=html_message or default_message
)
def send_verify_code_email(user):
"""
Send verify code to email
Args:
user (object): The user object
"""
def send_email_welcome(user):
"""
Send welcome email to verified user if REGISTER_SEND_WELCOME_EMAIL_ENABLED is True
Args:
user (object): The user instance
"""
# Check to send welcome email to verified user
if has_user_verified(user) and drfr_settings.REGISTER_SEND_WELCOME_EMAIL_ENABLED:
# Default template message
default_message = DEFAULT_EMAIL_BODY['WELCOME']
html_template = drfr_settings.REGISTER_SEND_WELCOME_EMAIL_TEMPLATE
html_message = render_to_string(
html_template, {'user': user}
) if html_template else None
send_mail(
subject=drfr_settings.REGISTER_SEND_WELCOME_EMAIL_SUBJECT,
message='',
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[user.email,],
html_message=html_message or default_message
)
def send_reset_password_token_email(user, domain):
"""
Send reset password token to user email
Args:
user (object): The user instance
domain (string): The current domain
"""
# Get activate link
reset_password_link = domain + \
reverse('reset_password_confirm', \
kwargs=generate_uid_and_token(user, reset_password_token))
# Default template message
default_message = \
DEFAULT_EMAIL_BODY['RESET_PASSWORD'].format(reset_password_link=reset_password_link)
html_template = drfr_settings.RESET_PASSWORD_EMAIL_TEMPLATE
html_message = render_to_string(
html_template, {
'reset_password_link': reset_password_link,
'domain': domain
}
) if html_template else None
send_mail(
subject=drfr_settings.RESET_PASSWORD_EMAIL_SUBJECT,
message='',
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[user.email,],
html_message=html_message or default_message
)
| 28.074074 | 92 | 0.695515 | from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.conf import settings
from django.urls import reverse
from drf_registration.constants import DEFAULT_EMAIL_BODY
from drf_registration.tokens import activation_token, reset_password_token
from drf_registration.utils.users import (
has_user_activate_token,
has_user_verify_code,
has_user_verified,
generate_uid_and_token,
)
from drf_registration.settings import drfr_settings
def send_verify_email(user, domain=''):
if has_user_activate_token():
send_activate_token_email(user, domain)
if has_user_verify_code():
send_verify_code_email(user)
def send_activate_token_email(user, domain):
activate_link = domain + \
reverse('activate', kwargs=generate_uid_and_token(user, activation_token))
default_message = DEFAULT_EMAIL_BODY['ACTIVATE'].format(activate_link=activate_link)
html_template = drfr_settings.USER_ACTIVATE_EMAIL_TEMPLATE
html_message = render_to_string(
html_template, {
'activate_link': activate_link,
'domain': domain
}
) if html_template else None
send_mail(
subject=drfr_settings.USER_ACTIVATE_EMAIL_SUBJECT,
message='',
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[user.email,],
html_message=html_message or default_message
)
def send_verify_code_email(user):
def send_email_welcome(user):
if has_user_verified(user) and drfr_settings.REGISTER_SEND_WELCOME_EMAIL_ENABLED:
default_message = DEFAULT_EMAIL_BODY['WELCOME']
html_template = drfr_settings.REGISTER_SEND_WELCOME_EMAIL_TEMPLATE
html_message = render_to_string(
html_template, {'user': user}
) if html_template else None
send_mail(
subject=drfr_settings.REGISTER_SEND_WELCOME_EMAIL_SUBJECT,
message='',
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[user.email,],
html_message=html_message or default_message
)
def send_reset_password_token_email(user, domain):
reset_password_link = domain + \
reverse('reset_password_confirm', \
kwargs=generate_uid_and_token(user, reset_password_token))
default_message = \
DEFAULT_EMAIL_BODY['RESET_PASSWORD'].format(reset_password_link=reset_password_link)
html_template = drfr_settings.RESET_PASSWORD_EMAIL_TEMPLATE
html_message = render_to_string(
html_template, {
'reset_password_link': reset_password_link,
'domain': domain
}
) if html_template else None
send_mail(
subject=drfr_settings.RESET_PASSWORD_EMAIL_SUBJECT,
message='',
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[user.email,],
html_message=html_message or default_message
)
| true | true |
f7fc7eaf67f555dd6142377fd960383b759ec2fa | 750 | py | Python | python/ql/test/library-tests/PointsTo/new/code/a_simple.py | vadi2/codeql | a806a4f08696d241ab295a286999251b56a6860c | [
"MIT"
] | 4,036 | 2020-04-29T00:09:57.000Z | 2022-03-31T14:16:38.000Z | python/ql/test/library-tests/PointsTo/new/code/a_simple.py | vadi2/codeql | a806a4f08696d241ab295a286999251b56a6860c | [
"MIT"
] | 2,970 | 2020-04-28T17:24:18.000Z | 2022-03-31T22:40:46.000Z | python/ql/test/library-tests/PointsTo/new/code/a_simple.py | ScriptBox99/github-codeql | 2ecf0d3264db8fb4904b2056964da469372a235c | [
"MIT"
] | 794 | 2020-04-29T00:28:25.000Z | 2022-03-30T08:21:46.000Z |
f1 = 1.0
dict
tuple
i1 = 0
s = ()
def func():
pass
class C(object):
pass
def vararg_kwarg(*t, **d):
t
d
def multi_loop(seq):
x = None
for x, y in seq:
x
def with_definition(x):
with x as y:
y
def multi_loop_in_try(x):
try: # This causes additional exception edges, such that:
for p, q in x: # `x` and `p` are not in the same BB.
p
except KeyError:
pass
def f(*args, **kwargs):
not args[0]
not kwargs["x"]
def multi_assign_and_packing(a, b="b", c="c"):
t = 1, 2, 3
w = a, b, c
p, q, r = t
x, y, z = w
p
q
r
x
y
z
g, h, i = a, b, c
g
h
i
l, m = (1,) + (2,)
l
m
s, u = a
s
u
| 12.5 | 61 | 0.458667 |
f1 = 1.0
dict
tuple
i1 = 0
s = ()
def func():
pass
class C(object):
pass
def vararg_kwarg(*t, **d):
t
d
def multi_loop(seq):
x = None
for x, y in seq:
x
def with_definition(x):
with x as y:
y
def multi_loop_in_try(x):
try:
for p, q in x:
p
except KeyError:
pass
def f(*args, **kwargs):
not args[0]
not kwargs["x"]
def multi_assign_and_packing(a, b="b", c="c"):
t = 1, 2, 3
w = a, b, c
p, q, r = t
x, y, z = w
p
q
r
x
y
z
g, h, i = a, b, c
g
h
i
l, m = (1,) + (2,)
l
m
s, u = a
s
u
| true | true |
f7fc7eb94d597b11c40b9ad45838d791b39708f4 | 66 | py | Python | poster/data/__init__.py | chrisbrake/poster | 3044188fdcb9b8add0f8af4f77a57975c4d0108f | [
"BSD-3-Clause"
] | null | null | null | poster/data/__init__.py | chrisbrake/poster | 3044188fdcb9b8add0f8af4f77a57975c4d0108f | [
"BSD-3-Clause"
] | null | null | null | poster/data/__init__.py | chrisbrake/poster | 3044188fdcb9b8add0f8af4f77a57975c4d0108f | [
"BSD-3-Clause"
] | null | null | null | from .room import Room
from .message import Message
rooms = dict() | 22 | 28 | 0.772727 | from .room import Room
from .message import Message
rooms = dict() | true | true |
f7fc7fce6fef735ed9aa57b9a0b083d38a3e86a3 | 789 | py | Python | les_8/lab_8/01-classes-as-objects.py | Timurdov/Python3.Advanced | a99ae1ab9e0424aeb7f8e93c53d0e08319b426a2 | [
"Apache-2.0"
] | 1 | 2018-09-10T12:04:53.000Z | 2018-09-10T12:04:53.000Z | les_8/lab_8/01-classes-as-objects.py | Timurdov/Python3.Advanced | a99ae1ab9e0424aeb7f8e93c53d0e08319b426a2 | [
"Apache-2.0"
] | null | null | null | les_8/lab_8/01-classes-as-objects.py | Timurdov/Python3.Advanced | a99ae1ab9e0424aeb7f8e93c53d0e08319b426a2 | [
"Apache-2.0"
] | null | null | null | # В Python всё является объектами, в том числе
# и сами классы
# Объявление пустого класса MyClass
class MyClass:
pass
# Создание экземпляра класса
obj = MyClass()
# Объект obj -- это экземпляр класса MyClass,
# то есть он имеет тип MyClass
print(type(obj)) # <class '__main__.MyClass'>
# MyClass -- это класс, но также он является
# и объектом, экземпляром метакласса type,
# являющегося абстракцией понятия типа данных
print(type(MyClass)) # <class 'type'>
# Соответственно, с классами работать как
# с объектами, например, копировать
AnotherClass = MyClass
print(type(AnotherClass))
# Как видим, теперь AnotherClass -- это то же самое, что и MyClass,
# и obj является и экземпляром класса AnotherClass
print(isinstance(obj, AnotherClass)) # True | 28.178571 | 68 | 0.726236 |
class MyClass:
pass
obj = MyClass()
print(type(obj))
print(type(MyClass))
AnotherClass = MyClass
print(type(AnotherClass))
print(isinstance(obj, AnotherClass)) | true | true |
f7fc80a03be4788494b89c551f0987caa7bb87a1 | 1,525 | py | Python | samples/generated_samples/speech_v1p1beta1_generated_adaptation_list_phrase_set_async.py | zmtkr/python-speech | 6000370242a4c548a3306ae274a0302e2bbb2445 | [
"Apache-2.0"
] | null | null | null | samples/generated_samples/speech_v1p1beta1_generated_adaptation_list_phrase_set_async.py | zmtkr/python-speech | 6000370242a4c548a3306ae274a0302e2bbb2445 | [
"Apache-2.0"
] | null | null | null | samples/generated_samples/speech_v1p1beta1_generated_adaptation_list_phrase_set_async.py | zmtkr/python-speech | 6000370242a4c548a3306ae274a0302e2bbb2445 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListPhraseSet
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-speech
# [START speech_v1p1beta1_generated_Adaptation_ListPhraseSet_async]
from google.cloud import speech_v1p1beta1
async def sample_list_phrase_set():
# Create a client
client = speech_v1p1beta1.AdaptationAsyncClient()
# Initialize request argument(s)
request = speech_v1p1beta1.ListPhraseSetRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_phrase_set(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END speech_v1p1beta1_generated_Adaptation_ListPhraseSet_async]
| 32.446809 | 85 | 0.761311 |
from google.cloud import speech_v1p1beta1
async def sample_list_phrase_set():
client = speech_v1p1beta1.AdaptationAsyncClient()
request = speech_v1p1beta1.ListPhraseSetRequest(
parent="parent_value",
)
page_result = client.list_phrase_set(request=request)
async for response in page_result:
print(response)
| true | true |
f7fc816dfe173059b38770c65a9ae5130d92a79f | 17,455 | py | Python | sdk/lusid_asyncio/api/relation_definitions_api.py | finbourne/lusid-sdk-python-asyncio-preview | 290f93590ab5485661216c8622d3de9f7af0ed60 | [
"MIT"
] | null | null | null | sdk/lusid_asyncio/api/relation_definitions_api.py | finbourne/lusid-sdk-python-asyncio-preview | 290f93590ab5485661216c8622d3de9f7af0ed60 | [
"MIT"
] | null | null | null | sdk/lusid_asyncio/api/relation_definitions_api.py | finbourne/lusid-sdk-python-asyncio-preview | 290f93590ab5485661216c8622d3de9f7af0ed60 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.3923
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from lusid_asyncio.api_client import ApiClient
from lusid_asyncio.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
from lusid_asyncio.models.create_relation_definition_request import CreateRelationDefinitionRequest
from lusid_asyncio.models.lusid_problem_details import LusidProblemDetails
from lusid_asyncio.models.lusid_validation_problem_details import LusidValidationProblemDetails
from lusid_asyncio.models.relation_definition import RelationDefinition
class RelationDefinitionsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_relation_definition(self, create_relation_definition_request, **kwargs): # noqa: E501
"""[EXPERIMENTAL] CreateRelationDefinition: Create a relation definition # noqa: E501
Define a new relation. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_relation_definition(create_relation_definition_request, async_req=True)
>>> result = thread.get()
:param create_relation_definition_request: The definition of the new relation. (required)
:type create_relation_definition_request: CreateRelationDefinitionRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: RelationDefinition
"""
kwargs['_return_http_data_only'] = True
return self.create_relation_definition_with_http_info(create_relation_definition_request, **kwargs) # noqa: E501
def create_relation_definition_with_http_info(self, create_relation_definition_request, **kwargs): # noqa: E501
"""[EXPERIMENTAL] CreateRelationDefinition: Create a relation definition # noqa: E501
Define a new relation. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_relation_definition_with_http_info(create_relation_definition_request, async_req=True)
>>> result = thread.get()
:param create_relation_definition_request: The definition of the new relation. (required)
:type create_relation_definition_request: CreateRelationDefinitionRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object, the HTTP status code, and the headers.
If the method is called asynchronously,
returns the request thread.
:rtype: (RelationDefinition, int, HTTPHeaderDict)
"""
local_var_params = locals()
all_params = [
'create_relation_definition_request'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_relation_definition" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'create_relation_definition_request' is set
if self.api_client.client_side_validation and ('create_relation_definition_request' not in local_var_params or # noqa: E501
local_var_params['create_relation_definition_request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `create_relation_definition_request` when calling `create_relation_definition`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'create_relation_definition_request' in local_var_params:
body_params = local_var_params['create_relation_definition_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/json', 'text/json', 'application/*+json']) # noqa: E501
# set the LUSID header
header_params['X-LUSID-SDK-Language'] = 'Python'
header_params['X-LUSID-SDK-Version'] = '0.11.3923'
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
201: "RelationDefinition",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/relationdefinitions', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_relation_definition(self, scope, code, **kwargs): # noqa: E501
"""[EXPERIMENTAL] GetRelationDefinition: Get relation definition # noqa: E501
Retrieve the definition of a specified relation. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_relation_definition(scope, code, async_req=True)
>>> result = thread.get()
:param scope: The scope of the specified relation. (required)
:type scope: str
:param code: The code of the specified relation. Together with the domain and scope this uniquely identifies the relation. (required)
:type code: str
:param as_at: The asAt datetime at which to retrieve the relation definition. Defaults to return the latest version of the definition if not specified.
:type as_at: datetime
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: RelationDefinition
"""
kwargs['_return_http_data_only'] = True
return self.get_relation_definition_with_http_info(scope, code, **kwargs) # noqa: E501
def get_relation_definition_with_http_info(self, scope, code, **kwargs): # noqa: E501
"""[EXPERIMENTAL] GetRelationDefinition: Get relation definition # noqa: E501
Retrieve the definition of a specified relation. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_relation_definition_with_http_info(scope, code, async_req=True)
>>> result = thread.get()
:param scope: The scope of the specified relation. (required)
:type scope: str
:param code: The code of the specified relation. Together with the domain and scope this uniquely identifies the relation. (required)
:type code: str
:param as_at: The asAt datetime at which to retrieve the relation definition. Defaults to return the latest version of the definition if not specified.
:type as_at: datetime
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object, the HTTP status code, and the headers.
If the method is called asynchronously,
returns the request thread.
:rtype: (RelationDefinition, int, HTTPHeaderDict)
"""
local_var_params = locals()
all_params = [
'scope',
'code',
'as_at'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_relation_definition" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `get_relation_definition`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `get_relation_definition`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'scope' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['scope']): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `get_relation_definition`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_relation_definition`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_relation_definition`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'code' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['code']): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_relation_definition`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope' in local_var_params:
path_params['scope'] = local_var_params['scope'] # noqa: E501
if 'code' in local_var_params:
path_params['code'] = local_var_params['code'] # noqa: E501
query_params = []
if 'as_at' in local_var_params and local_var_params['as_at'] is not None: # noqa: E501
query_params.append(('asAt', local_var_params['as_at'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# set the LUSID header
header_params['X-LUSID-SDK-Language'] = 'Python'
header_params['X-LUSID-SDK-Version'] = '0.11.3923'
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "RelationDefinition",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/relationdefinitions/{scope}/{code}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
| 48.756983 | 174 | 0.626296 |
from __future__ import absolute_import
import re
import six
from lusid_asyncio.api_client import ApiClient
from lusid_asyncio.exceptions import (
ApiTypeError,
ApiValueError
)
from lusid_asyncio.models.create_relation_definition_request import CreateRelationDefinitionRequest
from lusid_asyncio.models.lusid_problem_details import LusidProblemDetails
from lusid_asyncio.models.lusid_validation_problem_details import LusidValidationProblemDetails
from lusid_asyncio.models.relation_definition import RelationDefinition
class RelationDefinitionsApi(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_relation_definition(self, create_relation_definition_request, **kwargs):
kwargs['_return_http_data_only'] = True
return self.create_relation_definition_with_http_info(create_relation_definition_request, **kwargs)
def create_relation_definition_with_http_info(self, create_relation_definition_request, **kwargs):
local_var_params = locals()
all_params = [
'create_relation_definition_request'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_relation_definition" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('create_relation_definition_request' not in local_var_params or
local_var_params['create_relation_definition_request'] is None):
raise ApiValueError("Missing the required parameter `create_relation_definition_request` when calling `create_relation_definition`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'create_relation_definition_request' in local_var_params:
body_params = local_var_params['create_relation_definition_request']
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json'])
header_params['Accept-Encoding'] = "gzip, deflate, br"
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json-patch+json', 'application/json', 'text/json', 'application/*+json'])
header_params['X-LUSID-SDK-Language'] = 'Python'
header_params['X-LUSID-SDK-Version'] = '0.11.3923'
auth_settings = ['oauth2']
response_types_map = {
201: "RelationDefinition",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/relationdefinitions', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_relation_definition(self, scope, code, **kwargs):
kwargs['_return_http_data_only'] = True
return self.get_relation_definition_with_http_info(scope, code, **kwargs)
def get_relation_definition_with_http_info(self, scope, code, **kwargs):
local_var_params = locals()
all_params = [
'scope',
'code',
'as_at'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_relation_definition" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('scope' in local_var_params and
len(local_var_params['scope']) > 64):
raise ApiValueError("Invalid value for parameter `scope` when calling `get_relation_definition`, length must be less than or equal to `64`")
if self.api_client.client_side_validation and ('scope' in local_var_params and
len(local_var_params['scope']) < 1):
raise ApiValueError("Invalid value for parameter `scope` when calling `get_relation_definition`, length must be greater than or equal to `1`")
if self.api_client.client_side_validation and 'scope' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['scope']):
raise ApiValueError("Invalid value for parameter `scope` when calling `get_relation_definition`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`")
if self.api_client.client_side_validation and ('code' in local_var_params and
len(local_var_params['code']) > 64):
raise ApiValueError("Invalid value for parameter `code` when calling `get_relation_definition`, length must be less than or equal to `64`")
if self.api_client.client_side_validation and ('code' in local_var_params and
len(local_var_params['code']) < 1):
raise ApiValueError("Invalid value for parameter `code` when calling `get_relation_definition`, length must be greater than or equal to `1`")
if self.api_client.client_side_validation and 'code' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['code']):
raise ApiValueError("Invalid value for parameter `code` when calling `get_relation_definition`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`")
collection_formats = {}
path_params = {}
if 'scope' in local_var_params:
path_params['scope'] = local_var_params['scope']
if 'code' in local_var_params:
path_params['code'] = local_var_params['code']
query_params = []
if 'as_at' in local_var_params and local_var_params['as_at'] is not None:
query_params.append(('asAt', local_var_params['as_at']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json'])
header_params['Accept-Encoding'] = "gzip, deflate, br"
header_params['X-LUSID-SDK-Language'] = 'Python'
header_params['X-LUSID-SDK-Version'] = '0.11.3923'
auth_settings = ['oauth2']
response_types_map = {
200: "RelationDefinition",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/relationdefinitions/{scope}/{code}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
| true | true |
f7fc816fb7ae10599f933fbcd095e85c1e016639 | 2,792 | py | Python | ML/train_keras.py | hoopoe/face_liveness_detection | 834963001ee36b36264624b3c2fbec90f2343ce3 | [
"MIT"
] | 5 | 2018-08-23T15:01:00.000Z | 2019-03-19T09:39:25.000Z | ML/train_keras.py | hoopoe/face_liveness_detection | 834963001ee36b36264624b3c2fbec90f2343ce3 | [
"MIT"
] | null | null | null | ML/train_keras.py | hoopoe/face_liveness_detection | 834963001ee36b36264624b3c2fbec90f2343ce3 | [
"MIT"
] | 2 | 2018-10-20T08:00:02.000Z | 2021-01-30T14:49:48.000Z | from __future__ import print_function
import os
from tqdm import tqdm
import cv2
import numpy as np
import argparse
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
batch_size = 32
num_classes = 2
epochs = 2
images = []
labels = []
im_w = 150
im_h = 150
input_shape = (im_w, im_h, 3)
def process(live_input_dirs, fraud_input_dir):
for images_dir in live_input_dirs:
files = [os.path.join(images_dir, name) for name in os.listdir(images_dir) if os.path.isfile(os.path.join(images_dir, name))]
for i in tqdm(range(0,len(files))):
filename = os.path.join(images_dir, str(i)+".png")
img = cv2.imread(filename, cv2.IMREAD_COLOR)
images.append(img)
labels.append(1)
for images_dir in fraud_input_dir:
files = [os.path.join(images_dir, name) for name in os.listdir(images_dir) if os.path.isfile(os.path.join(images_dir, name))]
for i in tqdm(range(0,len(files))):
filename = os.path.join(images_dir, str(i)+".png")
img = cv2.imread(filename, cv2.IMREAD_COLOR)
images.append(img)
labels.append(0)
X = np.array(images, dtype=float)
y = np.array(labels, dtype=float)
X /= 255
y= y.reshape((-1,1))
X = X.reshape((-1, im_h, im_w, 3))
from sklearn.preprocessing import OneHotEncoder
Oneencoder = OneHotEncoder()
y = Oneencoder.fit_transform(y)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(X, y,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_split=0.2)
# serialize model to YAML
model_yaml = model.to_yaml()
with open("model.yaml", "w") as yaml_file:
yaml_file.write(model_yaml)
# serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'Process video')
parser.add_argument('-l','--live', nargs='+', help='list of live optical flow images folders', required=True)
parser.add_argument('-f','--fraud', nargs='+', help='list of fraud ioptical flow mages folders', required=True)
args = parser.parse_args()
process(args.live, args.fraud) | 31.022222 | 129 | 0.687321 | from __future__ import print_function
import os
from tqdm import tqdm
import cv2
import numpy as np
import argparse
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
batch_size = 32
num_classes = 2
epochs = 2
images = []
labels = []
im_w = 150
im_h = 150
input_shape = (im_w, im_h, 3)
def process(live_input_dirs, fraud_input_dir):
for images_dir in live_input_dirs:
files = [os.path.join(images_dir, name) for name in os.listdir(images_dir) if os.path.isfile(os.path.join(images_dir, name))]
for i in tqdm(range(0,len(files))):
filename = os.path.join(images_dir, str(i)+".png")
img = cv2.imread(filename, cv2.IMREAD_COLOR)
images.append(img)
labels.append(1)
for images_dir in fraud_input_dir:
files = [os.path.join(images_dir, name) for name in os.listdir(images_dir) if os.path.isfile(os.path.join(images_dir, name))]
for i in tqdm(range(0,len(files))):
filename = os.path.join(images_dir, str(i)+".png")
img = cv2.imread(filename, cv2.IMREAD_COLOR)
images.append(img)
labels.append(0)
X = np.array(images, dtype=float)
y = np.array(labels, dtype=float)
X /= 255
y= y.reshape((-1,1))
X = X.reshape((-1, im_h, im_w, 3))
from sklearn.preprocessing import OneHotEncoder
Oneencoder = OneHotEncoder()
y = Oneencoder.fit_transform(y)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(X, y,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_split=0.2)
model_yaml = model.to_yaml()
with open("model.yaml", "w") as yaml_file:
yaml_file.write(model_yaml)
model.save_weights("model.h5")
print("Saved model to disk")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'Process video')
parser.add_argument('-l','--live', nargs='+', help='list of live optical flow images folders', required=True)
parser.add_argument('-f','--fraud', nargs='+', help='list of fraud ioptical flow mages folders', required=True)
args = parser.parse_args()
process(args.live, args.fraud) | true | true |
f7fc81c2a763630762303d95ef3011d8831f8a7d | 1,535 | py | Python | djangocms_reversion2/migrations/0001_initial.py | PeterW-LWL/djangocms-reversion2 | 06d113f803a10aa915bf9664bf09c7f2f35fa38a | [
"MIT"
] | 11 | 2017-02-01T12:53:23.000Z | 2019-12-28T22:25:23.000Z | djangocms_reversion2/migrations/0001_initial.py | PeterW-LWL/djangocms-reversion2 | 06d113f803a10aa915bf9664bf09c7f2f35fa38a | [
"MIT"
] | 39 | 2017-02-02T08:56:04.000Z | 2020-04-02T11:35:59.000Z | djangocms_reversion2/migrations/0001_initial.py | PeterW-LWL/djangocms-reversion2 | 06d113f803a10aa915bf9664bf09c7f2f35fa38a | [
"MIT"
] | 10 | 2017-02-01T12:54:31.000Z | 2020-05-25T01:00:45.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0016_auto_20160608_1535'),
]
operations = [
migrations.CreateModel(
name='PageVersion',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('path', models.CharField(unique=True, max_length=255)),
('depth', models.PositiveIntegerField()),
('numchild', models.PositiveIntegerField(default=0)),
('comment', models.TextField(help_text='Particular information concerning this Version', verbose_name='Comment', blank=True)),
('title', models.CharField(max_length=63, verbose_name='Name', blank=True)),
('active', models.BooleanField(default=False, help_text='This the active version of current draft. There can be only one such version per Page version tree.', verbose_name='Active')),
('draft', models.ForeignKey(related_name='page_versions', verbose_name='Draft', to='cms.Page', help_text='Current active draft.')),
('hidden_page', models.OneToOneField(related_name='page_version', verbose_name='Hidden Page', to='cms.Page', help_text='This Page object holds the versioned contents of this PageVersion.')),
],
options={
'abstract': False,
},
),
]
| 47.96875 | 206 | 0.628013 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0016_auto_20160608_1535'),
]
operations = [
migrations.CreateModel(
name='PageVersion',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('path', models.CharField(unique=True, max_length=255)),
('depth', models.PositiveIntegerField()),
('numchild', models.PositiveIntegerField(default=0)),
('comment', models.TextField(help_text='Particular information concerning this Version', verbose_name='Comment', blank=True)),
('title', models.CharField(max_length=63, verbose_name='Name', blank=True)),
('active', models.BooleanField(default=False, help_text='This the active version of current draft. There can be only one such version per Page version tree.', verbose_name='Active')),
('draft', models.ForeignKey(related_name='page_versions', verbose_name='Draft', to='cms.Page', help_text='Current active draft.')),
('hidden_page', models.OneToOneField(related_name='page_version', verbose_name='Hidden Page', to='cms.Page', help_text='This Page object holds the versioned contents of this PageVersion.')),
],
options={
'abstract': False,
},
),
]
| true | true |
f7fc828efbd7d492fb1b9747607f8c757b582053 | 158,410 | py | Python | yt_dlp/YoutubeDL.py | CHJ85/yt-dlp | 1720fd49aed36efce923a22b7df1074fc11958df | [
"Unlicense"
] | null | null | null | yt_dlp/YoutubeDL.py | CHJ85/yt-dlp | 1720fd49aed36efce923a22b7df1074fc11958df | [
"Unlicense"
] | null | null | null | yt_dlp/YoutubeDL.py | CHJ85/yt-dlp | 1720fd49aed36efce923a22b7df1074fc11958df | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
from __future__ import absolute_import, unicode_literals
import collections
import contextlib
import copy
import datetime
import errno
import fileinput
import io
import itertools
import json
import locale
import operator
import os
import platform
import re
import shutil
import subprocess
import sys
import tempfile
import time
import tokenize
import traceback
import random
from string import ascii_letters
from zipimport import zipimporter
from .compat import (
compat_basestring,
compat_get_terminal_size,
compat_kwargs,
compat_numeric_types,
compat_os_name,
compat_shlex_quote,
compat_str,
compat_tokenize_tokenize,
compat_urllib_error,
compat_urllib_request,
compat_urllib_request_DataHandler,
)
from .cookies import load_cookies
from .utils import (
age_restricted,
args_to_str,
ContentTooShortError,
date_from_str,
DateRange,
DEFAULT_OUTTMPL,
determine_ext,
determine_protocol,
DOT_DESKTOP_LINK_TEMPLATE,
DOT_URL_LINK_TEMPLATE,
DOT_WEBLOC_LINK_TEMPLATE,
DownloadError,
encode_compat_str,
encodeFilename,
EntryNotInPlaylist,
error_to_compat_str,
ExistingVideoReached,
expand_path,
ExtractorError,
float_or_none,
format_bytes,
format_field,
STR_FORMAT_RE_TMPL,
STR_FORMAT_TYPES,
formatSeconds,
GeoRestrictedError,
HEADRequest,
int_or_none,
iri_to_uri,
ISO3166Utils,
LazyList,
locked_file,
make_dir,
make_HTTPS_handler,
MaxDownloadsReached,
network_exceptions,
orderedSet,
OUTTMPL_TYPES,
PagedList,
parse_filesize,
PerRequestProxyHandler,
platform_name,
PostProcessingError,
preferredencoding,
prepend_extension,
process_communicate_or_kill,
register_socks_protocols,
RejectedVideoReached,
render_table,
replace_extension,
SameFileError,
sanitize_filename,
sanitize_path,
sanitize_url,
sanitized_Request,
std_headers,
str_or_none,
strftime_or_none,
subtitles_filename,
ThrottledDownload,
to_high_limit_path,
traverse_obj,
try_get,
UnavailableVideoError,
url_basename,
variadic,
version_tuple,
write_json_file,
write_string,
YoutubeDLCookieProcessor,
YoutubeDLHandler,
YoutubeDLRedirectHandler,
)
from .cache import Cache
from .extractor import (
gen_extractor_classes,
get_info_extractor,
_LAZY_LOADER,
_PLUGIN_CLASSES
)
from .extractor.openload import PhantomJSwrapper
from .downloader import (
FFmpegFD,
get_suitable_downloader,
shorten_protocol_name
)
from .downloader.rtmp import rtmpdump_version
from .postprocessor import (
get_postprocessor,
FFmpegFixupDurationPP,
FFmpegFixupM3u8PP,
FFmpegFixupM4aPP,
FFmpegFixupStretchedPP,
FFmpegFixupTimestampPP,
FFmpegMergerPP,
FFmpegPostProcessor,
MoveFilesAfterDownloadPP,
)
from .version import __version__
if compat_os_name == 'nt':
import ctypes
class YoutubeDL(object):
"""YoutubeDL class.
YoutubeDL objects are the ones responsible of downloading the
actual video file and writing it to disk if the user has requested
it, among some other tasks. In most cases there should be one per
program. As, given a video URL, the downloader doesn't know how to
extract all the needed information, task that InfoExtractors do, it
has to pass the URL to one of them.
For this, YoutubeDL objects have a method that allows
InfoExtractors to be registered in a given order. When it is passed
a URL, the YoutubeDL object handles it to the first InfoExtractor it
finds that reports being able to handle it. The InfoExtractor extracts
all the information about the video or videos the URL refers to, and
YoutubeDL process the extracted information, possibly using a File
Downloader to download the video.
YoutubeDL objects accept a lot of parameters. In order not to saturate
the object constructor with arguments, it receives a dictionary of
options instead. These options are available through the params
attribute for the InfoExtractors to use. The YoutubeDL also
registers itself as the downloader in charge for the InfoExtractors
that are added to it, so this is a "mutual registration".
Available options:
username: Username for authentication purposes.
password: Password for authentication purposes.
videopassword: Password for accessing a video.
ap_mso: Adobe Pass multiple-system operator identifier.
ap_username: Multiple-system operator account username.
ap_password: Multiple-system operator account password.
usenetrc: Use netrc for authentication instead.
verbose: Print additional info to stdout.
quiet: Do not print messages to stdout.
no_warnings: Do not print out anything for warnings.
forceprint: A list of templates to force print
forceurl: Force printing final URL. (Deprecated)
forcetitle: Force printing title. (Deprecated)
forceid: Force printing ID. (Deprecated)
forcethumbnail: Force printing thumbnail URL. (Deprecated)
forcedescription: Force printing description. (Deprecated)
forcefilename: Force printing final filename. (Deprecated)
forceduration: Force printing duration. (Deprecated)
forcejson: Force printing info_dict as JSON.
dump_single_json: Force printing the info_dict of the whole playlist
(or video) as a single JSON line.
force_write_download_archive: Force writing download archive regardless
of 'skip_download' or 'simulate'.
simulate: Do not download the video files. If unset (or None),
simulate only if listsubtitles, listformats or list_thumbnails is used
format: Video format code. see "FORMAT SELECTION" for more details.
allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
ignore_no_formats_error: Ignore "No video formats" error. Usefull for
extracting metadata even if the video is not actually
available for download (experimental)
format_sort: How to sort the video formats. see "Sorting Formats"
for more details.
format_sort_force: Force the given format_sort. see "Sorting Formats"
for more details.
allow_multiple_video_streams: Allow multiple video streams to be merged
into a single file
allow_multiple_audio_streams: Allow multiple audio streams to be merged
into a single file
check_formats Whether to test if the formats are downloadable.
Can be True (check all), False (check none)
or None (check only if requested by extractor)
paths: Dictionary of output paths. The allowed keys are 'home'
'temp' and the keys of OUTTMPL_TYPES (in utils.py)
outtmpl: Dictionary of templates for output names. Allowed keys
are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
For compatibility with youtube-dl, a single string can also be used
outtmpl_na_placeholder: Placeholder for unavailable meta fields.
restrictfilenames: Do not allow "&" and spaces in file names
trim_file_name: Limit length of filename (extension excluded)
windowsfilenames: Force the filenames to be windows compatible
ignoreerrors: Do not stop on download errors
(Default True when running yt-dlp,
but False when directly accessing YoutubeDL class)
skip_playlist_after_errors: Number of allowed failures until the rest of
the playlist is skipped
force_generic_extractor: Force downloader to use the generic extractor
overwrites: Overwrite all video and metadata files if True,
overwrite only non-video files if None
and don't overwrite any file if False
For compatibility with youtube-dl,
"nooverwrites" may also be used instead
playliststart: Playlist item to start at.
playlistend: Playlist item to end at.
playlist_items: Specific indices of playlist to download.
playlistreverse: Download playlist items in reverse order.
playlistrandom: Download playlist items in random order.
matchtitle: Download only matching titles.
rejecttitle: Reject downloads for matching titles.
logger: Log messages to a logging.Logger instance.
logtostderr: Log messages to stderr instead of stdout.
writedescription: Write the video description to a .description file
writeinfojson: Write the video description to a .info.json file
clean_infojson: Remove private fields from the infojson
getcomments: Extract video comments. This will not be written to disk
unless writeinfojson is also given
writeannotations: Write the video annotations to a .annotations.xml file
writethumbnail: Write the thumbnail image to a file
allow_playlist_files: Whether to write playlists' description, infojson etc
also to disk when using the 'write*' options
write_all_thumbnails: Write all thumbnail formats to files
writelink: Write an internet shortcut file, depending on the
current platform (.url/.webloc/.desktop)
writeurllink: Write a Windows internet shortcut file (.url)
writewebloclink: Write a macOS internet shortcut file (.webloc)
writedesktoplink: Write a Linux internet shortcut file (.desktop)
writesubtitles: Write the video subtitles to a file
writeautomaticsub: Write the automatically generated subtitles to a file
allsubtitles: Deprecated - Use subtitleslangs = ['all']
Downloads all the subtitles of the video
(requires writesubtitles or writeautomaticsub)
listsubtitles: Lists all available subtitles for the video
subtitlesformat: The format code for subtitles
subtitleslangs: List of languages of the subtitles to download (can be regex).
The list may contain "all" to refer to all the available
subtitles. The language can be prefixed with a "-" to
exclude it from the requested languages. Eg: ['all', '-live_chat']
keepvideo: Keep the video file after post-processing
daterange: A DateRange object, download only if the upload_date is in the range.
skip_download: Skip the actual download of the video file
cachedir: Location of the cache files in the filesystem.
False to disable filesystem cache.
noplaylist: Download single video instead of a playlist if in doubt.
age_limit: An integer representing the user's age in years.
Unsuitable videos for the given age are skipped.
min_views: An integer representing the minimum view count the video
must have in order to not be skipped.
Videos without view count information are always
downloaded. None for no limit.
max_views: An integer representing the maximum view count.
Videos that are more popular than that are not
downloaded.
Videos without view count information are always
downloaded. None for no limit.
download_archive: File name of a file where all downloads are recorded.
Videos already present in the file are not downloaded
again.
break_on_existing: Stop the download process after attempting to download a
file that is in the archive.
break_on_reject: Stop the download process when encountering a video that
has been filtered out.
cookiefile: File name where cookies should be read from and dumped to
cookiesfrombrowser: A tuple containing the name of the browser and the profile
name/path from where cookies are loaded.
Eg: ('chrome', ) or (vivaldi, 'default')
nocheckcertificate:Do not verify SSL certificates
prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
At the moment, this is only supported by YouTube.
proxy: URL of the proxy server to use
geo_verification_proxy: URL of the proxy to use for IP address verification
on geo-restricted sites.
socket_timeout: Time to wait for unresponsive hosts, in seconds
bidi_workaround: Work around buggy terminals without bidirectional text
support, using fridibi
debug_printtraffic:Print out sent and received HTTP traffic
include_ads: Download ads as well
default_search: Prepend this string if an input url is not valid.
'auto' for elaborate guessing
encoding: Use this encoding instead of the system-specified.
extract_flat: Do not resolve URLs, return the immediate result.
Pass in 'in_playlist' to only show this behavior for
playlist items.
postprocessors: A list of dictionaries, each with an entry
* key: The name of the postprocessor. See
yt_dlp/postprocessor/__init__.py for a list.
* when: When to run the postprocessor. Can be one of
pre_process|before_dl|post_process|after_move.
Assumed to be 'post_process' if not given
post_hooks: A list of functions that get called as the final step
for each video file, after all postprocessors have been
called. The filename will be passed as the only argument.
progress_hooks: A list of functions that get called on download
progress, with a dictionary with the entries
* status: One of "downloading", "error", or "finished".
Check this first and ignore unknown values.
* info_dict: The extracted info_dict
If status is one of "downloading", or "finished", the
following properties may also be present:
* filename: The final filename (always present)
* tmpfilename: The filename we're currently writing to
* downloaded_bytes: Bytes on disk
* total_bytes: Size of the whole file, None if unknown
* total_bytes_estimate: Guess of the eventual file size,
None if unavailable.
* elapsed: The number of seconds since download started.
* eta: The estimated time in seconds, None if unknown
* speed: The download speed in bytes/second, None if
unknown
* fragment_index: The counter of the currently
downloaded video fragment.
* fragment_count: The number of fragments (= individual
files that will be merged)
Progress hooks are guaranteed to be called at least once
(with status "finished") if the download is successful.
merge_output_format: Extension to use when merging formats.
final_ext: Expected final extension; used to detect when the file was
already downloaded and converted. "merge_output_format" is
replaced by this extension when given
fixup: Automatically correct known faults of the file.
One of:
- "never": do nothing
- "warn": only emit a warning
- "detect_or_warn": check whether we can do anything
about it, warn otherwise (default)
source_address: Client-side IP address to bind to.
call_home: Boolean, true iff we are allowed to contact the
yt-dlp servers for debugging. (BROKEN)
sleep_interval_requests: Number of seconds to sleep between requests
during extraction
sleep_interval: Number of seconds to sleep before each download when
used alone or a lower bound of a range for randomized
sleep before each download (minimum possible number
of seconds to sleep) when used along with
max_sleep_interval.
max_sleep_interval:Upper bound of a range for randomized sleep before each
download (maximum possible number of seconds to sleep).
Must only be used along with sleep_interval.
Actual sleep time will be a random float from range
[sleep_interval; max_sleep_interval].
sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
listformats: Print an overview of available video formats and exit.
list_thumbnails: Print a table of all thumbnails and exit.
match_filter: A function that gets called with the info_dict of
every video.
If it returns a message, the video is ignored.
If it returns None, the video is downloaded.
match_filter_func in utils.py is one example for this.
no_color: Do not emit color codes in output.
geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
HTTP header
geo_bypass_country:
Two-letter ISO 3166-2 country code that will be used for
explicit geographic restriction bypassing via faking
X-Forwarded-For HTTP header
geo_bypass_ip_block:
IP range in CIDR notation that will be used similarly to
geo_bypass_country
The following options determine which downloader is picked:
external_downloader: A dictionary of protocol keys and the executable of the
external downloader to use for it. The allowed protocols
are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
Set the value to 'native' to use the native downloader
hls_prefer_native: Deprecated - Use external_downloader = {'m3u8': 'native'}
or {'m3u8': 'ffmpeg'} instead.
Use the native HLS downloader instead of ffmpeg/avconv
if True, otherwise use ffmpeg/avconv if False, otherwise
use downloader suggested by extractor if None.
compat_opts: Compatibility options. See "Differences in default behavior".
The following options do not work when used through the API:
filename, abort-on-error, multistreams, no-live-chat,
no-clean-infojson, no-playlist-metafiles, no-keep-subs.
Refer __init__.py for their implementation
The following parameters are not used by YoutubeDL itself, they are used by
the downloader (see yt_dlp/downloader/common.py):
nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
max_filesize, test, noresizebuffer, retries, continuedl, noprogress, consoletitle,
xattr_set_filesize, external_downloader_args, hls_use_mpegts, http_chunk_size.
The following options are used by the post processors:
prefer_ffmpeg: If False, use avconv instead of ffmpeg if both are available,
otherwise prefer ffmpeg. (avconv support is deprecated)
ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
to the binary or its containing directory.
postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
and a list of additional command-line arguments for the
postprocessor/executable. The dict can also have "PP+EXE" keys
which are used when the given exe is used by the given PP.
Use 'default' as the name for arguments to passed to all PP
For compatibility with youtube-dl, a single list of args
can also be used
The following options are used by the extractors:
extractor_retries: Number of times to retry for known errors
dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
hls_split_discontinuity: Split HLS playlists to different formats at
discontinuities such as ad breaks (default: False)
extractor_args: A dictionary of arguments to be passed to the extractors.
See "EXTRACTOR ARGUMENTS" for details.
Eg: {'youtube': {'skip': ['dash', 'hls']}}
youtube_include_dash_manifest: Deprecated - Use extractor_args instead.
If True (default), DASH manifests and related
data will be downloaded and processed by extractor.
You can reduce network I/O by disabling it if you don't
care about DASH. (only for youtube)
youtube_include_hls_manifest: Deprecated - Use extractor_args instead.
If True (default), HLS manifests and related
data will be downloaded and processed by extractor.
You can reduce network I/O by disabling it if you don't
care about HLS. (only for youtube)
"""
_NUMERIC_FIELDS = set((
'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
'timestamp', 'upload_year', 'upload_month', 'upload_day',
'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
'average_rating', 'comment_count', 'age_limit',
'start_time', 'end_time',
'chapter_number', 'season_number', 'episode_number',
'track_number', 'disc_number', 'release_year',
'playlist_index',
))
params = None
_ies = []
_pps = {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
_printed_messages = set()
_first_webpage_request = True
_download_retcode = None
_num_downloads = None
_playlist_level = 0
_playlist_urls = set()
_screen_file = None
def __init__(self, params=None, auto_init=True):
"""Create a FileDownloader object with the given options."""
if params is None:
params = {}
self._ies = []
self._ies_instances = {}
self._pps = {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
self._printed_messages = set()
self._first_webpage_request = True
self._post_hooks = []
self._progress_hooks = []
self._download_retcode = 0
self._num_downloads = 0
self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
self._err_file = sys.stderr
self.params = {
# Default parameters
'nocheckcertificate': False,
}
self.params.update(params)
self.cache = Cache(self)
if sys.version_info < (3, 6):
self.report_warning(
'Python version %d.%d is not supported! Please update to Python 3.6 or above' % sys.version_info[:2])
def check_deprecated(param, option, suggestion):
if self.params.get(param) is not None:
self.report_warning('%s is deprecated. Use %s instead' % (option, suggestion))
return True
return False
if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
if self.params.get('geo_verification_proxy') is None:
self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
for msg in self.params.get('warnings', []):
self.report_warning(msg)
if self.params.get('final_ext'):
if self.params.get('merge_output_format'):
self.report_warning('--merge-output-format will be ignored since --remux-video or --recode-video is given')
self.params['merge_output_format'] = self.params['final_ext']
if self.params.get('overwrites') is None:
self.params.pop('overwrites', None)
elif self.params.get('nooverwrites') is not None:
# nooverwrites was unnecessarily changed to overwrites
# in 0c3d0f51778b153f65c21906031c2e091fcfb641
# This ensures compatibility with both keys
self.params['overwrites'] = not self.params['nooverwrites']
else:
self.params['nooverwrites'] = not self.params['overwrites']
if params.get('bidi_workaround', False):
try:
import pty
master, slave = pty.openpty()
width = compat_get_terminal_size().columns
if width is None:
width_args = []
else:
width_args = ['-w', str(width)]
sp_kwargs = dict(
stdin=subprocess.PIPE,
stdout=slave,
stderr=self._err_file)
try:
self._output_process = subprocess.Popen(
['bidiv'] + width_args, **sp_kwargs
)
except OSError:
self._output_process = subprocess.Popen(
['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
self._output_channel = os.fdopen(master, 'rb')
except OSError as ose:
if ose.errno == errno.ENOENT:
self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
else:
raise
if (sys.platform != 'win32'
and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
and not params.get('restrictfilenames', False)):
# Unicode filesystem API will throw errors (#1474, #13027)
self.report_warning(
'Assuming --restrict-filenames since file system encoding '
'cannot encode all characters. '
'Set the LC_ALL environment variable to fix this.')
self.params['restrictfilenames'] = True
self.outtmpl_dict = self.parse_outtmpl()
# Creating format selector here allows us to catch syntax errors before the extraction
self.format_selector = (
None if self.params.get('format') is None
else self.build_format_selector(self.params['format']))
self._setup_opener()
"""Preload the archive, if any is specified"""
def preload_download_archive(fn):
if fn is None:
return False
self.write_debug('Loading archive file %r\n' % fn)
try:
with locked_file(fn, 'r', encoding='utf-8') as archive_file:
for line in archive_file:
self.archive.add(line.strip())
except IOError as ioe:
if ioe.errno != errno.ENOENT:
raise
return False
return True
self.archive = set()
preload_download_archive(self.params.get('download_archive'))
if auto_init:
self.print_debug_header()
self.add_default_info_extractors()
for pp_def_raw in self.params.get('postprocessors', []):
pp_def = dict(pp_def_raw)
when = pp_def.pop('when', 'post_process')
pp_class = get_postprocessor(pp_def.pop('key'))
pp = pp_class(self, **compat_kwargs(pp_def))
self.add_post_processor(pp, when=when)
for ph in self.params.get('post_hooks', []):
self.add_post_hook(ph)
for ph in self.params.get('progress_hooks', []):
self.add_progress_hook(ph)
register_socks_protocols()
def warn_if_short_id(self, argv):
# short YouTube ID starting with dash?
idxs = [
i for i, a in enumerate(argv)
if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
if idxs:
correct_argv = (
['yt-dlp']
+ [a for i, a in enumerate(argv) if i not in idxs]
+ ['--'] + [argv[i] for i in idxs]
)
self.report_warning(
'Long argument string detected. '
'Use -- to separate parameters and URLs, like this:\n%s\n' %
args_to_str(correct_argv))
def add_info_extractor(self, ie):
"""Add an InfoExtractor object to the end of the list."""
self._ies.append(ie)
if not isinstance(ie, type):
self._ies_instances[ie.ie_key()] = ie
ie.set_downloader(self)
def get_info_extractor(self, ie_key):
"""
Get an instance of an IE with name ie_key, it will try to get one from
the _ies list, if there's no instance it will create a new one and add
it to the extractor list.
"""
ie = self._ies_instances.get(ie_key)
if ie is None:
ie = get_info_extractor(ie_key)()
self.add_info_extractor(ie)
return ie
def add_default_info_extractors(self):
"""
Add the InfoExtractors returned by gen_extractors to the end of the list
"""
for ie in gen_extractor_classes():
self.add_info_extractor(ie)
def add_post_processor(self, pp, when='post_process'):
"""Add a PostProcessor object to the end of the chain."""
self._pps[when].append(pp)
pp.set_downloader(self)
def add_post_hook(self, ph):
"""Add the post hook"""
self._post_hooks.append(ph)
def add_progress_hook(self, ph):
"""Add the progress hook (currently only for the file downloader)"""
self._progress_hooks.append(ph)
def _bidi_workaround(self, message):
if not hasattr(self, '_output_channel'):
return message
assert hasattr(self, '_output_process')
assert isinstance(message, compat_str)
line_count = message.count('\n') + 1
self._output_process.stdin.write((message + '\n').encode('utf-8'))
self._output_process.stdin.flush()
res = ''.join(self._output_channel.readline().decode('utf-8')
for _ in range(line_count))
return res[:-len('\n')]
def _write_string(self, message, out=None, only_once=False):
if only_once:
if message in self._printed_messages:
return
self._printed_messages.add(message)
write_string(message, out=out, encoding=self.params.get('encoding'))
def to_stdout(self, message, skip_eol=False, quiet=False):
"""Print message to stdout"""
if self.params.get('logger'):
self.params['logger'].debug(message)
elif not quiet or self.params.get('verbose'):
self._write_string(
'%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
self._err_file if quiet else self._screen_file)
def to_stderr(self, message, only_once=False):
"""Print message to stderr"""
assert isinstance(message, compat_str)
if self.params.get('logger'):
self.params['logger'].error(message)
else:
self._write_string('%s\n' % self._bidi_workaround(message), self._err_file, only_once=only_once)
def to_console_title(self, message):
if not self.params.get('consoletitle', False):
return
if compat_os_name == 'nt':
if ctypes.windll.kernel32.GetConsoleWindow():
# c_wchar_p() might not be necessary if `message` is
# already of type unicode()
ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
elif 'TERM' in os.environ:
self._write_string('\033]0;%s\007' % message, self._screen_file)
def save_console_title(self):
if not self.params.get('consoletitle', False):
return
if self.params.get('simulate'):
return
if compat_os_name != 'nt' and 'TERM' in os.environ:
# Save the title on stack
self._write_string('\033[22;0t', self._screen_file)
def restore_console_title(self):
if not self.params.get('consoletitle', False):
return
if self.params.get('simulate'):
return
if compat_os_name != 'nt' and 'TERM' in os.environ:
# Restore the title from stack
self._write_string('\033[23;0t', self._screen_file)
def __enter__(self):
self.save_console_title()
return self
def __exit__(self, *args):
self.restore_console_title()
if self.params.get('cookiefile') is not None:
self.cookiejar.save(ignore_discard=True, ignore_expires=True)
def trouble(self, message=None, tb=None):
"""Determine action to take when a download problem appears.
Depending on if the downloader has been configured to ignore
download errors or not, this method may throw an exception or
not when errors are found, after printing the message.
tb, if given, is additional traceback information.
"""
if message is not None:
self.to_stderr(message)
if self.params.get('verbose'):
if tb is None:
if sys.exc_info()[0]: # if .trouble has been called from an except block
tb = ''
if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
tb += encode_compat_str(traceback.format_exc())
else:
tb_data = traceback.format_list(traceback.extract_stack())
tb = ''.join(tb_data)
if tb:
self.to_stderr(tb)
if not self.params.get('ignoreerrors', False):
if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
exc_info = sys.exc_info()[1].exc_info
else:
exc_info = sys.exc_info()
raise DownloadError(message, exc_info)
self._download_retcode = 1
def to_screen(self, message, skip_eol=False):
"""Print message to stdout if not in quiet mode"""
self.to_stdout(
message, skip_eol, quiet=self.params.get('quiet', False))
def report_warning(self, message, only_once=False):
'''
Print the message to stderr, it will be prefixed with 'WARNING:'
If stderr is a tty file the 'WARNING:' will be colored
'''
if self.params.get('logger') is not None:
self.params['logger'].warning(message)
else:
if self.params.get('no_warnings'):
return
if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
_msg_header = '\033[0;33mWARNING:\033[0m'
else:
_msg_header = 'WARNING:'
warning_message = '%s %s' % (_msg_header, message)
self.to_stderr(warning_message, only_once)
def report_error(self, message, tb=None):
'''
Do the same as trouble, but prefixes the message with 'ERROR:', colored
in red if stderr is a tty file.
'''
if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
_msg_header = '\033[0;31mERROR:\033[0m'
else:
_msg_header = 'ERROR:'
error_message = '%s %s' % (_msg_header, message)
self.trouble(error_message, tb)
def write_debug(self, message, only_once=False):
'''Log debug message or Print message to stderr'''
if not self.params.get('verbose', False):
return
message = '[debug] %s' % message
if self.params.get('logger'):
self.params['logger'].debug(message)
else:
self.to_stderr(message, only_once)
def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded."""
try:
self.to_screen('[download] %s has already been downloaded' % file_name)
except UnicodeEncodeError:
self.to_screen('[download] The file has already been downloaded')
def report_file_delete(self, file_name):
"""Report that existing file will be deleted."""
try:
self.to_screen('Deleting existing file %s' % file_name)
except UnicodeEncodeError:
self.to_screen('Deleting existing file')
def parse_outtmpl(self):
outtmpl_dict = self.params.get('outtmpl', {})
if not isinstance(outtmpl_dict, dict):
outtmpl_dict = {'default': outtmpl_dict}
outtmpl_dict.update({
k: v for k, v in DEFAULT_OUTTMPL.items()
if not outtmpl_dict.get(k)})
for key, val in outtmpl_dict.items():
if isinstance(val, bytes):
self.report_warning(
'Parameter outtmpl is bytes, but should be a unicode string. '
'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
return outtmpl_dict
def get_output_path(self, dir_type='', filename=None):
paths = self.params.get('paths', {})
assert isinstance(paths, dict)
path = os.path.join(
expand_path(paths.get('home', '').strip()),
expand_path(paths.get(dir_type, '').strip()) if dir_type else '',
filename or '')
# Temporary fix for #4787
# 'Treat' all problem characters by passing filename through preferredencoding
# to workaround encoding issues with subprocess on python2 @ Windows
if sys.version_info < (3, 0) and sys.platform == 'win32':
path = encodeFilename(path, True).decode(preferredencoding())
return sanitize_path(path, force=self.params.get('windowsfilenames'))
@staticmethod
def _outtmpl_expandpath(outtmpl):
# expand_path translates '%%' into '%' and '$$' into '$'
# correspondingly that is not what we want since we need to keep
# '%%' intact for template dict substitution step. Working around
# with boundary-alike separator hack.
sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep))
# outtmpl should be expand_path'ed before template dict substitution
# because meta fields may contain env variables we don't want to
# be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
# title "Hello $PATH", we don't want `$PATH` to be expanded.
return expand_path(outtmpl).replace(sep, '')
@staticmethod
def escape_outtmpl(outtmpl):
''' Escape any remaining strings like %s, %abc% etc. '''
return re.sub(
STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
outtmpl)
@classmethod
def validate_outtmpl(cls, outtmpl):
''' @return None or Exception object '''
outtmpl = re.sub(
STR_FORMAT_RE_TMPL.format('[^)]*', '[ljq]'),
lambda mobj: f'{mobj.group(0)[:-1]}s',
cls._outtmpl_expandpath(outtmpl))
try:
cls.escape_outtmpl(outtmpl) % collections.defaultdict(int)
return None
except ValueError as err:
return err
def prepare_outtmpl(self, outtmpl, info_dict, sanitize=None):
""" Make the template and info_dict suitable for substitution : ydl.outtmpl_escape(outtmpl) % info_dict """
info_dict.setdefault('epoch', int(time.time())) # keep epoch consistent once set
info_dict = dict(info_dict) # Do not sanitize so as not to consume LazyList
for key in ('__original_infodict', '__postprocessors'):
info_dict.pop(key, None)
info_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
formatSeconds(info_dict['duration'], '-' if sanitize else ':')
if info_dict.get('duration', None) is not None
else None)
info_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads
if info_dict.get('resolution') is None:
info_dict['resolution'] = self.format_resolution(info_dict, default=None)
# For fields playlist_index and autonumber convert all occurrences
# of %(field)s to %(field)0Nd for backward compatibility
field_size_compat_map = {
'playlist_index': len(str(info_dict.get('_last_playlist_index') or '')),
'autonumber': self.params.get('autonumber_size') or 5,
}
TMPL_DICT = {}
EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljq]'))
MATH_FUNCTIONS = {
'+': float.__add__,
'-': float.__sub__,
}
# Field is of the form key1.key2...
# where keys (except first) can be string, int or slice
FIELD_RE = r'\w*(?:\.(?:\w+|{num}|{num}?(?::{num}?){{1,2}}))*'.format(num=r'(?:-?\d+)')
MATH_FIELD_RE = r'''{field}|{num}'''.format(field=FIELD_RE, num=r'-?\d+(?:.\d+)?')
MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
INTERNAL_FORMAT_RE = re.compile(r'''(?x)
(?P<negate>-)?
(?P<fields>{field})
(?P<maths>(?:{math_op}{math_field})*)
(?:>(?P<strf_format>.+?))?
(?:\|(?P<default>.*?))?
$'''.format(field=FIELD_RE, math_op=MATH_OPERATORS_RE, math_field=MATH_FIELD_RE))
def _traverse_infodict(k):
k = k.split('.')
if k[0] == '':
k.pop(0)
return traverse_obj(info_dict, k, is_user_input=True, traverse_string=True)
def get_value(mdict):
# Object traversal
value = _traverse_infodict(mdict['fields'])
# Negative
if mdict['negate']:
value = float_or_none(value)
if value is not None:
value *= -1
# Do maths
offset_key = mdict['maths']
if offset_key:
value = float_or_none(value)
operator = None
while offset_key:
item = re.match(
MATH_FIELD_RE if operator else MATH_OPERATORS_RE,
offset_key).group(0)
offset_key = offset_key[len(item):]
if operator is None:
operator = MATH_FUNCTIONS[item]
continue
item, multiplier = (item[1:], -1) if item[0] == '-' else (item, 1)
offset = float_or_none(item)
if offset is None:
offset = float_or_none(_traverse_infodict(item))
try:
value = operator(value, multiplier * offset)
except (TypeError, ZeroDivisionError):
return None
operator = None
# Datetime formatting
if mdict['strf_format']:
value = strftime_or_none(value, mdict['strf_format'])
return value
na = self.params.get('outtmpl_na_placeholder', 'NA')
def _dumpjson_default(obj):
if isinstance(obj, (set, LazyList)):
return list(obj)
raise TypeError(f'Object of type {type(obj).__name__} is not JSON serializable')
def create_key(outer_mobj):
if not outer_mobj.group('has_key'):
return f'%{outer_mobj.group(0)}'
key = outer_mobj.group('key')
mobj = re.match(INTERNAL_FORMAT_RE, key)
if mobj is None:
value, default, mobj = None, na, {'fields': ''}
else:
mobj = mobj.groupdict()
default = mobj['default'] if mobj['default'] is not None else na
value = get_value(mobj)
fmt = outer_mobj.group('format')
if fmt == 's' and value is not None and key in field_size_compat_map.keys():
fmt = '0{:d}d'.format(field_size_compat_map[key])
value = default if value is None else value
str_fmt = f'{fmt[:-1]}s'
if fmt[-1] == 'l':
value, fmt = ', '.join(variadic(value)), str_fmt
elif fmt[-1] == 'j':
value, fmt = json.dumps(value, default=_dumpjson_default), str_fmt
elif fmt[-1] == 'q':
value, fmt = compat_shlex_quote(str(value)), str_fmt
elif fmt[-1] == 'c':
value = str(value)
if value is None:
value, fmt = default, 's'
else:
value = value[0]
elif fmt[-1] not in 'rs': # numeric
value = float_or_none(value)
if value is None:
value, fmt = default, 's'
if sanitize:
if fmt[-1] == 'r':
# If value is an object, sanitize might convert it to a string
# So we convert it to repr first
value, fmt = repr(value), str_fmt
if fmt[-1] in 'csr':
value = sanitize(mobj['fields'].split('.')[-1], value)
key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
TMPL_DICT[key] = value
return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
return EXTERNAL_FORMAT_RE.sub(create_key, outtmpl), TMPL_DICT
def _prepare_filename(self, info_dict, tmpl_type='default'):
try:
sanitize = lambda k, v: sanitize_filename(
compat_str(v),
restricted=self.params.get('restrictfilenames'),
is_id=(k == 'id' or k.endswith('_id')))
outtmpl = self.outtmpl_dict.get(tmpl_type, self.outtmpl_dict['default'])
outtmpl, template_dict = self.prepare_outtmpl(outtmpl, info_dict, sanitize)
outtmpl = self.escape_outtmpl(self._outtmpl_expandpath(outtmpl))
filename = outtmpl % template_dict
force_ext = OUTTMPL_TYPES.get(tmpl_type)
if force_ext is not None:
filename = replace_extension(filename, force_ext, info_dict.get('ext'))
# https://github.com/blackjack4494/youtube-dlc/issues/85
trim_file_name = self.params.get('trim_file_name', False)
if trim_file_name:
fn_groups = filename.rsplit('.')
ext = fn_groups[-1]
sub_ext = ''
if len(fn_groups) > 2:
sub_ext = fn_groups[-2]
filename = '.'.join(filter(None, [fn_groups[0][:trim_file_name], sub_ext, ext]))
return filename
except ValueError as err:
self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
return None
def prepare_filename(self, info_dict, dir_type='', warn=False):
"""Generate the output filename."""
filename = self._prepare_filename(info_dict, dir_type or 'default')
if warn:
if not self.params.get('paths'):
pass
elif filename == '-':
self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
elif os.path.isabs(filename):
self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
if filename == '-' or not filename:
return filename
return self.get_output_path(dir_type, filename)
def _match_entry(self, info_dict, incomplete=False, silent=False):
""" Returns None if the file should be downloaded """
video_title = info_dict.get('title', info_dict.get('id', 'video'))
def check_filter():
if 'title' in info_dict:
# This can happen when we're just evaluating the playlist
title = info_dict['title']
matchtitle = self.params.get('matchtitle', False)
if matchtitle:
if not re.search(matchtitle, title, re.IGNORECASE):
return '"' + title + '" title did not match pattern "' + matchtitle + '"'
rejecttitle = self.params.get('rejecttitle', False)
if rejecttitle:
if re.search(rejecttitle, title, re.IGNORECASE):
return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
date = info_dict.get('upload_date')
if date is not None:
dateRange = self.params.get('daterange', DateRange())
if date not in dateRange:
return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
view_count = info_dict.get('view_count')
if view_count is not None:
min_views = self.params.get('min_views')
if min_views is not None and view_count < min_views:
return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
max_views = self.params.get('max_views')
if max_views is not None and view_count > max_views:
return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
return 'Skipping "%s" because it is age restricted' % video_title
if not incomplete:
match_filter = self.params.get('match_filter')
if match_filter is not None:
ret = match_filter(info_dict)
if ret is not None:
return ret
return None
if self.in_download_archive(info_dict):
reason = '%s has already been recorded in the archive' % video_title
break_opt, break_err = 'break_on_existing', ExistingVideoReached
else:
reason = check_filter()
break_opt, break_err = 'break_on_reject', RejectedVideoReached
if reason is not None:
if not silent:
self.to_screen('[download] ' + reason)
if self.params.get(break_opt, False):
raise break_err()
return reason
@staticmethod
def add_extra_info(info_dict, extra_info):
'''Set the keys from extra_info in info dict if they are missing'''
for key, value in extra_info.items():
info_dict.setdefault(key, value)
def extract_info(self, url, download=True, ie_key=None, extra_info={},
process=True, force_generic_extractor=False):
"""
Return a list with a dictionary for each video extracted.
Arguments:
url -- URL to extract
Keyword arguments:
download -- whether to download videos during extraction
ie_key -- extractor key hint
extra_info -- dictionary containing the extra values to add to each result
process -- whether to resolve all unresolved references (URLs, playlist items),
must be True for download to work.
force_generic_extractor -- force using the generic extractor
"""
if not ie_key and force_generic_extractor:
ie_key = 'Generic'
if ie_key:
ies = [self.get_info_extractor(ie_key)]
else:
ies = self._ies
for ie in ies:
if not ie.suitable(url):
continue
ie_key = ie.ie_key()
ie = self.get_info_extractor(ie_key)
if not ie.working():
self.report_warning('The program functionality for this site has been marked as broken, '
'and will probably not work.')
try:
temp_id = str_or_none(
ie.extract_id(url) if callable(getattr(ie, 'extract_id', None))
else ie._match_id(url))
except (AssertionError, IndexError, AttributeError):
temp_id = None
if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': ie_key}):
self.to_screen("[%s] %s: has already been recorded in archive" % (
ie_key, temp_id))
break
return self.__extract_info(url, ie, download, extra_info, process)
else:
self.report_error('no suitable InfoExtractor for URL %s' % url)
def __handle_extraction_exceptions(func, handle_all_errors=True):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except GeoRestrictedError as e:
msg = e.msg
if e.countries:
msg += '\nThis video is available in %s.' % ', '.join(
map(ISO3166Utils.short2full, e.countries))
msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
self.report_error(msg)
except ExtractorError as e: # An error we somewhat expected
self.report_error(compat_str(e), e.format_traceback())
except ThrottledDownload:
self.to_stderr('\r')
self.report_warning('The download speed is below throttle limit. Re-extracting data')
return wrapper(self, *args, **kwargs)
except (MaxDownloadsReached, ExistingVideoReached, RejectedVideoReached):
raise
except Exception as e:
if handle_all_errors and self.params.get('ignoreerrors', False):
self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc()))
else:
raise
return wrapper
@__handle_extraction_exceptions
def __extract_info(self, url, ie, download, extra_info, process):
ie_result = ie.extract(url)
if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
return
if isinstance(ie_result, list):
# Backwards compatibility: old IE result format
ie_result = {
'_type': 'compat_list',
'entries': ie_result,
}
if extra_info.get('original_url'):
ie_result.setdefault('original_url', extra_info['original_url'])
self.add_default_extra_info(ie_result, ie, url)
if process:
return self.process_ie_result(ie_result, download, extra_info)
else:
return ie_result
def add_default_extra_info(self, ie_result, ie, url):
if url is not None:
self.add_extra_info(ie_result, {
'webpage_url': url,
'original_url': url,
'webpage_url_basename': url_basename(url),
})
if ie is not None:
self.add_extra_info(ie_result, {
'extractor': ie.IE_NAME,
'extractor_key': ie.ie_key(),
})
def process_ie_result(self, ie_result, download=True, extra_info={}):
"""
Take the result of the ie(may be modified) and resolve all unresolved
references (URLs, playlist items).
It will also download the videos if 'download'.
Returns the resolved ie_result.
"""
result_type = ie_result.get('_type', 'video')
if result_type in ('url', 'url_transparent'):
ie_result['url'] = sanitize_url(ie_result['url'])
if ie_result.get('original_url'):
extra_info.setdefault('original_url', ie_result['original_url'])
extract_flat = self.params.get('extract_flat', False)
if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
or extract_flat is True):
info_copy = ie_result.copy()
self.add_extra_info(info_copy, extra_info)
ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
self.add_default_extra_info(info_copy, ie, ie_result['url'])
self.__forced_printings(info_copy, self.prepare_filename(info_copy), incomplete=True)
return ie_result
if result_type == 'video':
self.add_extra_info(ie_result, extra_info)
ie_result = self.process_video_result(ie_result, download=download)
additional_urls = (ie_result or {}).get('additional_urls')
if additional_urls:
# TODO: Improve MetadataParserPP to allow setting a list
if isinstance(additional_urls, compat_str):
additional_urls = [additional_urls]
self.to_screen(
'[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
ie_result['additional_entries'] = [
self.extract_info(
url, download, extra_info,
force_generic_extractor=self.params.get('force_generic_extractor'))
for url in additional_urls
]
return ie_result
elif result_type == 'url':
# We have to add extra_info to the results because it may be
# contained in a playlist
return self.extract_info(
ie_result['url'], download,
ie_key=ie_result.get('ie_key'),
extra_info=extra_info)
elif result_type == 'url_transparent':
# Use the information from the embedding page
info = self.extract_info(
ie_result['url'], ie_key=ie_result.get('ie_key'),
extra_info=extra_info, download=False, process=False)
# extract_info may return None when ignoreerrors is enabled and
# extraction failed with an error, don't crash and return early
# in this case
if not info:
return info
force_properties = dict(
(k, v) for k, v in ie_result.items() if v is not None)
for f in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'):
if f in force_properties:
del force_properties[f]
new_result = info.copy()
new_result.update(force_properties)
# Extracted info may not be a video result (i.e.
# info.get('_type', 'video') != video) but rather an url or
# url_transparent. In such cases outer metadata (from ie_result)
# should be propagated to inner one (info). For this to happen
# _type of info should be overridden with url_transparent. This
# fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
if new_result.get('_type') == 'url':
new_result['_type'] = 'url_transparent'
return self.process_ie_result(
new_result, download=download, extra_info=extra_info)
elif result_type in ('playlist', 'multi_video'):
# Protect from infinite recursion due to recursively nested playlists
# (see https://github.com/ytdl-org/youtube-dl/issues/27833)
webpage_url = ie_result['webpage_url']
if webpage_url in self._playlist_urls:
self.to_screen(
'[download] Skipping already downloaded playlist: %s'
% ie_result.get('title') or ie_result.get('id'))
return
self._playlist_level += 1
self._playlist_urls.add(webpage_url)
self._sanitize_thumbnails(ie_result)
try:
return self.__process_playlist(ie_result, download)
finally:
self._playlist_level -= 1
if not self._playlist_level:
self._playlist_urls.clear()
elif result_type == 'compat_list':
self.report_warning(
'Extractor %s returned a compat_list result. '
'It needs to be updated.' % ie_result.get('extractor'))
def _fixup(r):
self.add_extra_info(r, {
'extractor': ie_result['extractor'],
'webpage_url': ie_result['webpage_url'],
'webpage_url_basename': url_basename(ie_result['webpage_url']),
'extractor_key': ie_result['extractor_key'],
})
return r
ie_result['entries'] = [
self.process_ie_result(_fixup(r), download, extra_info)
for r in ie_result['entries']
]
return ie_result
else:
raise Exception('Invalid result type: %s' % result_type)
def _ensure_dir_exists(self, path):
return make_dir(path, self.report_error)
def __process_playlist(self, ie_result, download):
# We process each entry in the playlist
playlist = ie_result.get('title') or ie_result.get('id')
self.to_screen('[download] Downloading playlist: %s' % playlist)
if 'entries' not in ie_result:
raise EntryNotInPlaylist()
incomplete_entries = bool(ie_result.get('requested_entries'))
if incomplete_entries:
def fill_missing_entries(entries, indexes):
ret = [None] * max(*indexes)
for i, entry in zip(indexes, entries):
ret[i - 1] = entry
return ret
ie_result['entries'] = fill_missing_entries(ie_result['entries'], ie_result['requested_entries'])
playlist_results = []
playliststart = self.params.get('playliststart', 1)
playlistend = self.params.get('playlistend')
# For backwards compatibility, interpret -1 as whole list
if playlistend == -1:
playlistend = None
playlistitems_str = self.params.get('playlist_items')
playlistitems = None
if playlistitems_str is not None:
def iter_playlistitems(format):
for string_segment in format.split(','):
if '-' in string_segment:
start, end = string_segment.split('-')
for item in range(int(start), int(end) + 1):
yield int(item)
else:
yield int(string_segment)
playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
ie_entries = ie_result['entries']
msg = (
'Downloading %d videos' if not isinstance(ie_entries, list)
else 'Collected %d videos; downloading %%d of them' % len(ie_entries))
if not isinstance(ie_entries, (list, PagedList)):
ie_entries = LazyList(ie_entries)
def get_entry(i):
return YoutubeDL.__handle_extraction_exceptions(
lambda self, i: ie_entries[i - 1],
False
)(self, i)
entries = []
for i in playlistitems or itertools.count(playliststart):
if playlistitems is None and playlistend is not None and playlistend < i:
break
entry = None
try:
entry = get_entry(i)
if entry is None:
raise EntryNotInPlaylist()
except (IndexError, EntryNotInPlaylist):
if incomplete_entries:
raise EntryNotInPlaylist()
elif not playlistitems:
break
entries.append(entry)
try:
if entry is not None:
self._match_entry(entry, incomplete=True, silent=True)
except (ExistingVideoReached, RejectedVideoReached):
break
ie_result['entries'] = entries
# Save playlist_index before re-ordering
entries = [
((playlistitems[i - 1] if playlistitems else i), entry)
for i, entry in enumerate(entries, 1)
if entry is not None]
n_entries = len(entries)
if not playlistitems and (playliststart or playlistend):
playlistitems = list(range(playliststart, playliststart + n_entries))
ie_result['requested_entries'] = playlistitems
if self.params.get('allow_playlist_files', True):
ie_copy = {
'playlist': playlist,
'playlist_id': ie_result.get('id'),
'playlist_title': ie_result.get('title'),
'playlist_uploader': ie_result.get('uploader'),
'playlist_uploader_id': ie_result.get('uploader_id'),
'playlist_index': 0,
}
ie_copy.update(dict(ie_result))
if self.params.get('writeinfojson', False):
infofn = self.prepare_filename(ie_copy, 'pl_infojson')
if not self._ensure_dir_exists(encodeFilename(infofn)):
return
if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(infofn)):
self.to_screen('[info] Playlist metadata is already present')
else:
self.to_screen('[info] Writing playlist metadata as JSON to: ' + infofn)
try:
write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
except (OSError, IOError):
self.report_error('Cannot write playlist metadata to JSON file ' + infofn)
# TODO: This should be passed to ThumbnailsConvertor if necessary
self._write_thumbnails(ie_copy, self.prepare_filename(ie_copy, 'pl_thumbnail'))
if self.params.get('writedescription', False):
descfn = self.prepare_filename(ie_copy, 'pl_description')
if not self._ensure_dir_exists(encodeFilename(descfn)):
return
if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(descfn)):
self.to_screen('[info] Playlist description is already present')
elif ie_result.get('description') is None:
self.report_warning('There\'s no playlist description to write.')
else:
try:
self.to_screen('[info] Writing playlist description to: ' + descfn)
with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
descfile.write(ie_result['description'])
except (OSError, IOError):
self.report_error('Cannot write playlist description file ' + descfn)
return
if self.params.get('playlistreverse', False):
entries = entries[::-1]
if self.params.get('playlistrandom', False):
random.shuffle(entries)
x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
self.to_screen('[%s] playlist %s: %s' % (ie_result['extractor'], playlist, msg % n_entries))
failures = 0
max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
for i, entry_tuple in enumerate(entries, 1):
playlist_index, entry = entry_tuple
if 'playlist_index' in self.params.get('compat_options', []):
playlist_index = playlistitems[i - 1] if playlistitems else i
self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
# This __x_forwarded_for_ip thing is a bit ugly but requires
# minimal changes
if x_forwarded_for:
entry['__x_forwarded_for_ip'] = x_forwarded_for
extra = {
'n_entries': n_entries,
'_last_playlist_index': max(playlistitems) if playlistitems else (playlistend or n_entries),
'playlist_index': playlist_index,
'playlist_autonumber': i,
'playlist': playlist,
'playlist_id': ie_result.get('id'),
'playlist_title': ie_result.get('title'),
'playlist_uploader': ie_result.get('uploader'),
'playlist_uploader_id': ie_result.get('uploader_id'),
'extractor': ie_result['extractor'],
'webpage_url': ie_result['webpage_url'],
'webpage_url_basename': url_basename(ie_result['webpage_url']),
'extractor_key': ie_result['extractor_key'],
}
if self._match_entry(entry, incomplete=True) is not None:
continue
entry_result = self.__process_iterable_entry(entry, download, extra)
if not entry_result:
failures += 1
if failures >= max_failures:
self.report_error(
'Skipping the remaining entries in playlist "%s" since %d items failed extraction' % (playlist, failures))
break
# TODO: skip failed (empty) entries?
playlist_results.append(entry_result)
ie_result['entries'] = playlist_results
self.to_screen('[download] Finished downloading playlist: %s' % playlist)
return ie_result
@__handle_extraction_exceptions
def __process_iterable_entry(self, entry, download, extra_info):
return self.process_ie_result(
entry, download=download, extra_info=extra_info)
def _build_format_filter(self, filter_spec):
" Returns a function to filter the formats according to the filter_spec "
OPERATORS = {
'<': operator.lt,
'<=': operator.le,
'>': operator.gt,
'>=': operator.ge,
'=': operator.eq,
'!=': operator.ne,
}
operator_rex = re.compile(r'''(?x)\s*
(?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)\s*
(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
(?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
''' % '|'.join(map(re.escape, OPERATORS.keys())))
m = operator_rex.fullmatch(filter_spec)
if m:
try:
comparison_value = int(m.group('value'))
except ValueError:
comparison_value = parse_filesize(m.group('value'))
if comparison_value is None:
comparison_value = parse_filesize(m.group('value') + 'B')
if comparison_value is None:
raise ValueError(
'Invalid value %r in format specification %r' % (
m.group('value'), filter_spec))
op = OPERATORS[m.group('op')]
if not m:
STR_OPERATORS = {
'=': operator.eq,
'^=': lambda attr, value: attr.startswith(value),
'$=': lambda attr, value: attr.endswith(value),
'*=': lambda attr, value: value in attr,
}
str_operator_rex = re.compile(r'''(?x)\s*
(?P<key>[a-zA-Z0-9._-]+)\s*
(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
(?P<value>[a-zA-Z0-9._-]+)\s*
''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
m = str_operator_rex.fullmatch(filter_spec)
if m:
comparison_value = m.group('value')
str_op = STR_OPERATORS[m.group('op')]
if m.group('negation'):
op = lambda attr, value: not str_op(attr, value)
else:
op = str_op
if not m:
raise SyntaxError('Invalid filter specification %r' % filter_spec)
def _filter(f):
actual_value = f.get(m.group('key'))
if actual_value is None:
return m.group('none_inclusive')
return op(actual_value, comparison_value)
return _filter
def _default_format_spec(self, info_dict, download=True):
def can_merge():
merger = FFmpegMergerPP(self)
return merger.available and merger.can_merge()
prefer_best = (
not self.params.get('simulate')
and download
and (
not can_merge()
or info_dict.get('is_live', False)
or self.outtmpl_dict['default'] == '-'))
compat = (
prefer_best
or self.params.get('allow_multiple_audio_streams', False)
or 'format-spec' in self.params.get('compat_opts', []))
return (
'best/bestvideo+bestaudio' if prefer_best
else 'bestvideo*+bestaudio/best' if not compat
else 'bestvideo+bestaudio/best')
def build_format_selector(self, format_spec):
def syntax_error(note, start):
message = (
'Invalid format specification: '
'{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
return SyntaxError(message)
PICKFIRST = 'PICKFIRST'
MERGE = 'MERGE'
SINGLE = 'SINGLE'
GROUP = 'GROUP'
FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
'video': self.params.get('allow_multiple_video_streams', False)}
check_formats = self.params.get('check_formats')
def _parse_filter(tokens):
filter_parts = []
for type, string, start, _, _ in tokens:
if type == tokenize.OP and string == ']':
return ''.join(filter_parts)
else:
filter_parts.append(string)
def _remove_unused_ops(tokens):
# Remove operators that we don't use and join them with the surrounding strings
# for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
ALLOWED_OPS = ('/', '+', ',', '(', ')')
last_string, last_start, last_end, last_line = None, None, None, None
for type, string, start, end, line in tokens:
if type == tokenize.OP and string == '[':
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
last_string = None
yield type, string, start, end, line
# everything inside brackets will be handled by _parse_filter
for type, string, start, end, line in tokens:
yield type, string, start, end, line
if type == tokenize.OP and string == ']':
break
elif type == tokenize.OP and string in ALLOWED_OPS:
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
last_string = None
yield type, string, start, end, line
elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
if not last_string:
last_string = string
last_start = start
last_end = end
else:
last_string += string
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
selectors = []
current_selector = None
for type, string, start, _, _ in tokens:
# ENCODING is only defined in python 3.x
if type == getattr(tokenize, 'ENCODING', None):
continue
elif type in [tokenize.NAME, tokenize.NUMBER]:
current_selector = FormatSelector(SINGLE, string, [])
elif type == tokenize.OP:
if string == ')':
if not inside_group:
# ')' will be handled by the parentheses group
tokens.restore_last_token()
break
elif inside_merge and string in ['/', ',']:
tokens.restore_last_token()
break
elif inside_choice and string == ',':
tokens.restore_last_token()
break
elif string == ',':
if not current_selector:
raise syntax_error('"," must follow a format selector', start)
selectors.append(current_selector)
current_selector = None
elif string == '/':
if not current_selector:
raise syntax_error('"/" must follow a format selector', start)
first_choice = current_selector
second_choice = _parse_format_selection(tokens, inside_choice=True)
current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
elif string == '[':
if not current_selector:
current_selector = FormatSelector(SINGLE, 'best', [])
format_filter = _parse_filter(tokens)
current_selector.filters.append(format_filter)
elif string == '(':
if current_selector:
raise syntax_error('Unexpected "("', start)
group = _parse_format_selection(tokens, inside_group=True)
current_selector = FormatSelector(GROUP, group, [])
elif string == '+':
if not current_selector:
raise syntax_error('Unexpected "+"', start)
selector_1 = current_selector
selector_2 = _parse_format_selection(tokens, inside_merge=True)
if not selector_2:
raise syntax_error('Expected a selector', start)
current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
else:
raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
elif type == tokenize.ENDMARKER:
break
if current_selector:
selectors.append(current_selector)
return selectors
def _merge(formats_pair):
format_1, format_2 = formats_pair
formats_info = []
formats_info.extend(format_1.get('requested_formats', (format_1,)))
formats_info.extend(format_2.get('requested_formats', (format_2,)))
if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
get_no_more = {'video': False, 'audio': False}
for (i, fmt_info) in enumerate(formats_info):
if fmt_info.get('acodec') == fmt_info.get('vcodec') == 'none':
formats_info.pop(i)
continue
for aud_vid in ['audio', 'video']:
if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
if get_no_more[aud_vid]:
formats_info.pop(i)
break
get_no_more[aud_vid] = True
if len(formats_info) == 1:
return formats_info[0]
video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
output_ext = self.params.get('merge_output_format')
if not output_ext:
if the_only_video:
output_ext = the_only_video['ext']
elif the_only_audio and not video_fmts:
output_ext = the_only_audio['ext']
else:
output_ext = 'mkv'
new_dict = {
'requested_formats': formats_info,
'format': '+'.join(fmt_info.get('format') for fmt_info in formats_info),
'format_id': '+'.join(fmt_info.get('format_id') for fmt_info in formats_info),
'ext': output_ext,
}
if the_only_video:
new_dict.update({
'width': the_only_video.get('width'),
'height': the_only_video.get('height'),
'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
'fps': the_only_video.get('fps'),
'vcodec': the_only_video.get('vcodec'),
'vbr': the_only_video.get('vbr'),
'stretched_ratio': the_only_video.get('stretched_ratio'),
})
if the_only_audio:
new_dict.update({
'acodec': the_only_audio.get('acodec'),
'abr': the_only_audio.get('abr'),
})
return new_dict
def _check_formats(formats):
if not check_formats:
yield from formats
return
for f in formats:
self.to_screen('[info] Testing format %s' % f['format_id'])
temp_file = tempfile.NamedTemporaryFile(
suffix='.tmp', delete=False,
dir=self.get_output_path('temp') or None)
temp_file.close()
try:
success, _ = self.dl(temp_file.name, f, test=True)
except (DownloadError, IOError, OSError, ValueError) + network_exceptions:
success = False
finally:
if os.path.exists(temp_file.name):
try:
os.remove(temp_file.name)
except OSError:
self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
if success:
yield f
else:
self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
def _build_selector_function(selector):
if isinstance(selector, list): # ,
fs = [_build_selector_function(s) for s in selector]
def selector_function(ctx):
for f in fs:
yield from f(ctx)
return selector_function
elif selector.type == GROUP: # ()
selector_function = _build_selector_function(selector.selector)
elif selector.type == PICKFIRST: # /
fs = [_build_selector_function(s) for s in selector.selector]
def selector_function(ctx):
for f in fs:
picked_formats = list(f(ctx))
if picked_formats:
return picked_formats
return []
elif selector.type == MERGE: # +
selector_1, selector_2 = map(_build_selector_function, selector.selector)
def selector_function(ctx):
for pair in itertools.product(
selector_1(copy.deepcopy(ctx)), selector_2(copy.deepcopy(ctx))):
yield _merge(pair)
elif selector.type == SINGLE: # atom
format_spec = selector.selector or 'best'
# TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
if format_spec == 'all':
def selector_function(ctx):
yield from _check_formats(ctx['formats'])
elif format_spec == 'mergeall':
def selector_function(ctx):
formats = list(_check_formats(ctx['formats']))
if not formats:
return
merged_format = formats[-1]
for f in formats[-2::-1]:
merged_format = _merge((merged_format, f))
yield merged_format
else:
format_fallback, format_reverse, format_idx = False, True, 1
mobj = re.match(
r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
format_spec)
if mobj is not None:
format_idx = int_or_none(mobj.group('n'), default=1)
format_reverse = mobj.group('bw')[0] == 'b'
format_type = (mobj.group('type') or [None])[0]
not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
format_modified = mobj.group('mod') is not None
format_fallback = not format_type and not format_modified # for b, w
_filter_f = (
(lambda f: f.get('%scodec' % format_type) != 'none')
if format_type and format_modified # bv*, ba*, wv*, wa*
else (lambda f: f.get('%scodec' % not_format_type) == 'none')
if format_type # bv, ba, wv, wa
else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
if not format_modified # b, w
else lambda f: True) # b*, w*
filter_f = lambda f: _filter_f(f) and (
f.get('vcodec') != 'none' or f.get('acodec') != 'none')
else:
filter_f = ((lambda f: f.get('ext') == format_spec)
if format_spec in ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav'] # extension
else (lambda f: f.get('format_id') == format_spec)) # id
def selector_function(ctx):
formats = list(ctx['formats'])
matches = list(filter(filter_f, formats)) if filter_f is not None else formats
if format_fallback and ctx['incomplete_formats'] and not matches:
# for extractors with incomplete formats (audio only (soundcloud)
# or video only (imgur)) best/worst will fallback to
# best/worst {video,audio}-only format
matches = formats
matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
try:
yield matches[format_idx - 1]
except IndexError:
return
filters = [self._build_format_filter(f) for f in selector.filters]
def final_selector(ctx):
ctx_copy = copy.deepcopy(ctx)
for _filter in filters:
ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
return selector_function(ctx_copy)
return final_selector
stream = io.BytesIO(format_spec.encode('utf-8'))
try:
tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
except tokenize.TokenError:
raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
class TokenIterator(object):
def __init__(self, tokens):
self.tokens = tokens
self.counter = 0
def __iter__(self):
return self
def __next__(self):
if self.counter >= len(self.tokens):
raise StopIteration()
value = self.tokens[self.counter]
self.counter += 1
return value
next = __next__
def restore_last_token(self):
self.counter -= 1
parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
return _build_selector_function(parsed_selector)
def _calc_headers(self, info_dict):
res = std_headers.copy()
add_headers = info_dict.get('http_headers')
if add_headers:
res.update(add_headers)
cookies = self._calc_cookies(info_dict)
if cookies:
res['Cookie'] = cookies
if 'X-Forwarded-For' not in res:
x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
if x_forwarded_for_ip:
res['X-Forwarded-For'] = x_forwarded_for_ip
return res
def _calc_cookies(self, info_dict):
pr = sanitized_Request(info_dict['url'])
self.cookiejar.add_cookie_header(pr)
return pr.get_header('Cookie')
def _sanitize_thumbnails(self, info_dict):
thumbnails = info_dict.get('thumbnails')
if thumbnails is None:
thumbnail = info_dict.get('thumbnail')
if thumbnail:
info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
if thumbnails:
thumbnails.sort(key=lambda t: (
t.get('preference') if t.get('preference') is not None else -1,
t.get('width') if t.get('width') is not None else -1,
t.get('height') if t.get('height') is not None else -1,
t.get('id') if t.get('id') is not None else '',
t.get('url')))
def thumbnail_tester():
if self.params.get('check_formats'):
test_all = True
to_screen = lambda msg: self.to_screen(f'[info] {msg}')
else:
test_all = False
to_screen = self.write_debug
def test_thumbnail(t):
if not test_all and not t.get('_test_url'):
return True
to_screen('Testing thumbnail %s' % t['id'])
try:
self.urlopen(HEADRequest(t['url']))
except network_exceptions as err:
to_screen('Unable to connect to thumbnail %s URL "%s" - %s. Skipping...' % (
t['id'], t['url'], error_to_compat_str(err)))
return False
return True
return test_thumbnail
for i, t in enumerate(thumbnails):
if t.get('id') is None:
t['id'] = '%d' % i
if t.get('width') and t.get('height'):
t['resolution'] = '%dx%d' % (t['width'], t['height'])
t['url'] = sanitize_url(t['url'])
if self.params.get('check_formats') is not False:
info_dict['thumbnails'] = LazyList(filter(thumbnail_tester(), thumbnails[::-1])).reverse()
else:
info_dict['thumbnails'] = thumbnails
def process_video_result(self, info_dict, download=True):
assert info_dict.get('_type', 'video') == 'video'
if 'id' not in info_dict:
raise ExtractorError('Missing "id" field in extractor result')
if 'title' not in info_dict:
raise ExtractorError('Missing "title" field in extractor result')
def report_force_conversion(field, field_not, conversion):
self.report_warning(
'"%s" field is not %s - forcing %s conversion, there is an error in extractor'
% (field, field_not, conversion))
def sanitize_string_field(info, string_field):
field = info.get(string_field)
if field is None or isinstance(field, compat_str):
return
report_force_conversion(string_field, 'a string', 'string')
info[string_field] = compat_str(field)
def sanitize_numeric_fields(info):
for numeric_field in self._NUMERIC_FIELDS:
field = info.get(numeric_field)
if field is None or isinstance(field, compat_numeric_types):
continue
report_force_conversion(numeric_field, 'numeric', 'int')
info[numeric_field] = int_or_none(field)
sanitize_string_field(info_dict, 'id')
sanitize_numeric_fields(info_dict)
if 'playlist' not in info_dict:
# It isn't part of a playlist
info_dict['playlist'] = None
info_dict['playlist_index'] = None
self._sanitize_thumbnails(info_dict)
thumbnail = info_dict.get('thumbnail')
thumbnails = info_dict.get('thumbnails')
if thumbnail:
info_dict['thumbnail'] = sanitize_url(thumbnail)
elif thumbnails:
info_dict['thumbnail'] = thumbnails[-1]['url']
if info_dict.get('display_id') is None and 'id' in info_dict:
info_dict['display_id'] = info_dict['id']
for ts_key, date_key in (
('timestamp', 'upload_date'),
('release_timestamp', 'release_date'),
):
if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
# see http://bugs.python.org/issue1646728)
try:
upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
info_dict[date_key] = upload_date.strftime('%Y%m%d')
except (ValueError, OverflowError, OSError):
pass
live_keys = ('is_live', 'was_live')
live_status = info_dict.get('live_status')
if live_status is None:
for key in live_keys:
if info_dict.get(key) is False:
continue
if info_dict.get(key):
live_status = key
break
if all(info_dict.get(key) is False for key in live_keys):
live_status = 'not_live'
if live_status:
info_dict['live_status'] = live_status
for key in live_keys:
if info_dict.get(key) is None:
info_dict[key] = (live_status == key)
# Auto generate title fields corresponding to the *_number fields when missing
# in order to always have clean titles. This is very common for TV series.
for field in ('chapter', 'season', 'episode'):
if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
for cc_kind in ('subtitles', 'automatic_captions'):
cc = info_dict.get(cc_kind)
if cc:
for _, subtitle in cc.items():
for subtitle_format in subtitle:
if subtitle_format.get('url'):
subtitle_format['url'] = sanitize_url(subtitle_format['url'])
if subtitle_format.get('ext') is None:
subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
automatic_captions = info_dict.get('automatic_captions')
subtitles = info_dict.get('subtitles')
info_dict['requested_subtitles'] = self.process_subtitles(
info_dict['id'], subtitles, automatic_captions)
# We now pick which formats have to be downloaded
if info_dict.get('formats') is None:
# There's only one format available
formats = [info_dict]
else:
formats = info_dict['formats']
if not formats:
if not self.params.get('ignore_no_formats_error'):
raise ExtractorError('No video formats found!')
else:
self.report_warning('No video formats found!')
def is_wellformed(f):
url = f.get('url')
if not url:
self.report_warning(
'"url" field is missing or empty - skipping format, '
'there is an error in extractor')
return False
if isinstance(url, bytes):
sanitize_string_field(f, 'url')
return True
# Filter out malformed formats for better extraction robustness
formats = list(filter(is_wellformed, formats))
formats_dict = {}
# We check that all the formats have the format and format_id fields
for i, format in enumerate(formats):
sanitize_string_field(format, 'format_id')
sanitize_numeric_fields(format)
format['url'] = sanitize_url(format['url'])
if not format.get('format_id'):
format['format_id'] = compat_str(i)
else:
# Sanitize format_id from characters used in format selector expression
format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
format_id = format['format_id']
if format_id not in formats_dict:
formats_dict[format_id] = []
formats_dict[format_id].append(format)
# Make sure all formats have unique format_id
for format_id, ambiguous_formats in formats_dict.items():
if len(ambiguous_formats) > 1:
for i, format in enumerate(ambiguous_formats):
format['format_id'] = '%s-%d' % (format_id, i)
for i, format in enumerate(formats):
if format.get('format') is None:
format['format'] = '{id} - {res}{note}'.format(
id=format['format_id'],
res=self.format_resolution(format),
note=format_field(format, 'format_note', ' (%s)'),
)
# Automatically determine file extension if missing
if format.get('ext') is None:
format['ext'] = determine_ext(format['url']).lower()
# Automatically determine protocol if missing (useful for format
# selection purposes)
if format.get('protocol') is None:
format['protocol'] = determine_protocol(format)
# Add HTTP headers, so that external programs can use them from the
# json output
full_format_info = info_dict.copy()
full_format_info.update(format)
format['http_headers'] = self._calc_headers(full_format_info)
# Remove private housekeeping stuff
if '__x_forwarded_for_ip' in info_dict:
del info_dict['__x_forwarded_for_ip']
# TODO Central sorting goes here
if formats and formats[0] is not info_dict:
# only set the 'formats' fields if the original info_dict list them
# otherwise we end up with a circular reference, the first (and unique)
# element in the 'formats' field in info_dict is info_dict itself,
# which can't be exported to json
info_dict['formats'] = formats
info_dict, _ = self.pre_process(info_dict)
if self.params.get('list_thumbnails'):
self.list_thumbnails(info_dict)
if self.params.get('listformats'):
if not info_dict.get('formats') and not info_dict.get('url'):
raise ExtractorError('No video formats found', expected=True)
self.list_formats(info_dict)
if self.params.get('listsubtitles'):
if 'automatic_captions' in info_dict:
self.list_subtitles(
info_dict['id'], automatic_captions, 'automatic captions')
self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
list_only = self.params.get('simulate') is None and (
self.params.get('list_thumbnails') or self.params.get('listformats') or self.params.get('listsubtitles'))
if list_only:
# Without this printing, -F --print-json will not work
self.__forced_printings(info_dict, self.prepare_filename(info_dict), incomplete=True)
return
format_selector = self.format_selector
if format_selector is None:
req_format = self._default_format_spec(info_dict, download=download)
self.write_debug('Default format spec: %s' % req_format)
format_selector = self.build_format_selector(req_format)
# While in format selection we may need to have an access to the original
# format set in order to calculate some metrics or do some processing.
# For now we need to be able to guess whether original formats provided
# by extractor are incomplete or not (i.e. whether extractor provides only
# video-only or audio-only formats) for proper formats selection for
# extractors with such incomplete formats (see
# https://github.com/ytdl-org/youtube-dl/pull/5556).
# Since formats may be filtered during format selection and may not match
# the original formats the results may be incorrect. Thus original formats
# or pre-calculated metrics should be passed to format selection routines
# as well.
# We will pass a context object containing all necessary additional data
# instead of just formats.
# This fixes incorrect format selection issue (see
# https://github.com/ytdl-org/youtube-dl/issues/10083).
incomplete_formats = (
# All formats are video-only or
all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
# all formats are audio-only
or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
ctx = {
'formats': formats,
'incomplete_formats': incomplete_formats,
}
formats_to_download = list(format_selector(ctx))
if not formats_to_download:
if not self.params.get('ignore_no_formats_error'):
raise ExtractorError('Requested format is not available', expected=True)
else:
self.report_warning('Requested format is not available')
# Process what we can, even without any available formats.
self.process_info(dict(info_dict))
elif download:
self.to_screen(
'[info] %s: Downloading %d format(s): %s' % (
info_dict['id'], len(formats_to_download),
", ".join([f['format_id'] for f in formats_to_download])))
for fmt in formats_to_download:
new_info = dict(info_dict)
# Save a reference to the original info_dict so that it can be modified in process_info if needed
new_info['__original_infodict'] = info_dict
new_info.update(fmt)
self.process_info(new_info)
# We update the info dict with the best quality format (backwards compatibility)
if formats_to_download:
info_dict.update(formats_to_download[-1])
return info_dict
def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
"""Select the requested subtitles and their format"""
available_subs = {}
if normal_subtitles and self.params.get('writesubtitles'):
available_subs.update(normal_subtitles)
if automatic_captions and self.params.get('writeautomaticsub'):
for lang, cap_info in automatic_captions.items():
if lang not in available_subs:
available_subs[lang] = cap_info
if (not self.params.get('writesubtitles') and not
self.params.get('writeautomaticsub') or not
available_subs):
return None
all_sub_langs = available_subs.keys()
if self.params.get('allsubtitles', False):
requested_langs = all_sub_langs
elif self.params.get('subtitleslangs', False):
requested_langs = set()
for lang in self.params.get('subtitleslangs'):
if lang == 'all':
requested_langs.update(all_sub_langs)
continue
discard = lang[0] == '-'
if discard:
lang = lang[1:]
current_langs = filter(re.compile(lang + '$').match, all_sub_langs)
if discard:
for lang in current_langs:
requested_langs.discard(lang)
else:
requested_langs.update(current_langs)
elif 'en' in available_subs:
requested_langs = ['en']
else:
requested_langs = [list(all_sub_langs)[0]]
if requested_langs:
self.write_debug('Downloading subtitles: %s' % ', '.join(requested_langs))
formats_query = self.params.get('subtitlesformat', 'best')
formats_preference = formats_query.split('/') if formats_query else []
subs = {}
for lang in requested_langs:
formats = available_subs.get(lang)
if formats is None:
self.report_warning('%s subtitles not available for %s' % (lang, video_id))
continue
for ext in formats_preference:
if ext == 'best':
f = formats[-1]
break
matches = list(filter(lambda f: f['ext'] == ext, formats))
if matches:
f = matches[-1]
break
else:
f = formats[-1]
self.report_warning(
'No subtitle format found matching "%s" for language %s, '
'using %s' % (formats_query, lang, f['ext']))
subs[lang] = f
return subs
def __forced_printings(self, info_dict, filename, incomplete):
def print_mandatory(field, actual_field=None):
if actual_field is None:
actual_field = field
if (self.params.get('force%s' % field, False)
and (not incomplete or info_dict.get(actual_field) is not None)):
self.to_stdout(info_dict[actual_field])
def print_optional(field):
if (self.params.get('force%s' % field, False)
and info_dict.get(field) is not None):
self.to_stdout(info_dict[field])
info_dict = info_dict.copy()
if filename is not None:
info_dict['filename'] = filename
if info_dict.get('requested_formats') is not None:
# For RTMP URLs, also include the playpath
info_dict['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
elif 'url' in info_dict:
info_dict['urls'] = info_dict['url'] + info_dict.get('play_path', '')
if self.params.get('forceprint') or self.params.get('forcejson'):
self.post_extract(info_dict)
for tmpl in self.params.get('forceprint', []):
if re.match(r'\w+$', tmpl):
tmpl = '%({})s'.format(tmpl)
tmpl, info_copy = self.prepare_outtmpl(tmpl, info_dict)
self.to_stdout(self.escape_outtmpl(tmpl) % info_copy)
print_mandatory('title')
print_mandatory('id')
print_mandatory('url', 'urls')
print_optional('thumbnail')
print_optional('description')
print_optional('filename')
if self.params.get('forceduration') and info_dict.get('duration') is not None:
self.to_stdout(formatSeconds(info_dict['duration']))
print_mandatory('format')
if self.params.get('forcejson'):
self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
def dl(self, name, info, subtitle=False, test=False):
if test:
verbose = self.params.get('verbose')
params = {
'test': True,
'quiet': not verbose,
'verbose': verbose,
'noprogress': not verbose,
'nopart': True,
'skip_unavailable_fragments': False,
'keep_fragments': False,
'overwrites': True,
'_no_ytdl_file': True,
}
else:
params = self.params
fd = get_suitable_downloader(info, params, to_stdout=(name == '-'))(self, params)
if not test:
for ph in self._progress_hooks:
fd.add_progress_hook(ph)
urls = '", "'.join([f['url'] for f in info.get('requested_formats', [])] or [info['url']])
self.write_debug('Invoking downloader on "%s"' % urls)
new_info = dict(info)
if new_info.get('http_headers') is None:
new_info['http_headers'] = self._calc_headers(new_info)
return fd.download(name, new_info, subtitle)
def process_info(self, info_dict):
"""Process a single resolved IE result."""
assert info_dict.get('_type', 'video') == 'video'
max_downloads = self.params.get('max_downloads')
if max_downloads is not None:
if self._num_downloads >= int(max_downloads):
raise MaxDownloadsReached()
# TODO: backward compatibility, to be removed
info_dict['fulltitle'] = info_dict['title']
if 'format' not in info_dict and 'ext' in info_dict:
info_dict['format'] = info_dict['ext']
if self._match_entry(info_dict) is not None:
return
self.post_extract(info_dict)
self._num_downloads += 1
# info_dict['_filename'] needs to be set for backward compatibility
info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
temp_filename = self.prepare_filename(info_dict, 'temp')
files_to_move = {}
# Forced printings
self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
if self.params.get('simulate'):
if self.params.get('force_write_download_archive', False):
self.record_download_archive(info_dict)
# Do nothing else if in simulate mode
return
if full_filename is None:
return
if not self._ensure_dir_exists(encodeFilename(full_filename)):
return
if not self._ensure_dir_exists(encodeFilename(temp_filename)):
return
if self.params.get('writedescription', False):
descfn = self.prepare_filename(info_dict, 'description')
if not self._ensure_dir_exists(encodeFilename(descfn)):
return
if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(descfn)):
self.to_screen('[info] Video description is already present')
elif info_dict.get('description') is None:
self.report_warning('There\'s no description to write.')
else:
try:
self.to_screen('[info] Writing video description to: ' + descfn)
with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
descfile.write(info_dict['description'])
except (OSError, IOError):
self.report_error('Cannot write description file ' + descfn)
return
if self.params.get('writeannotations', False):
annofn = self.prepare_filename(info_dict, 'annotation')
if not self._ensure_dir_exists(encodeFilename(annofn)):
return
if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
self.to_screen('[info] Video annotations are already present')
elif not info_dict.get('annotations'):
self.report_warning('There are no annotations to write.')
else:
try:
self.to_screen('[info] Writing video annotations to: ' + annofn)
with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
annofile.write(info_dict['annotations'])
except (KeyError, TypeError):
self.report_warning('There are no annotations to write.')
except (OSError, IOError):
self.report_error('Cannot write annotations file: ' + annofn)
return
subtitles_are_requested = any([self.params.get('writesubtitles', False),
self.params.get('writeautomaticsub')])
if subtitles_are_requested and info_dict.get('requested_subtitles'):
# subtitles download errors are already managed as troubles in relevant IE
# that way it will silently go on when used with unsupporting IE
subtitles = info_dict['requested_subtitles']
# ie = self.get_info_extractor(info_dict['extractor_key'])
for sub_lang, sub_info in subtitles.items():
sub_format = sub_info['ext']
sub_filename = subtitles_filename(temp_filename, sub_lang, sub_format, info_dict.get('ext'))
sub_filename_final = subtitles_filename(
self.prepare_filename(info_dict, 'subtitle'), sub_lang, sub_format, info_dict.get('ext'))
if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(sub_filename)):
self.to_screen('[info] Video subtitle %s.%s is already present' % (sub_lang, sub_format))
sub_info['filepath'] = sub_filename
files_to_move[sub_filename] = sub_filename_final
else:
self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
if sub_info.get('data') is not None:
try:
# Use newline='' to prevent conversion of newline characters
# See https://github.com/ytdl-org/youtube-dl/issues/10268
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
subfile.write(sub_info['data'])
sub_info['filepath'] = sub_filename
files_to_move[sub_filename] = sub_filename_final
except (OSError, IOError):
self.report_error('Cannot write subtitles file ' + sub_filename)
return
else:
try:
self.dl(sub_filename, sub_info.copy(), subtitle=True)
sub_info['filepath'] = sub_filename
files_to_move[sub_filename] = sub_filename_final
except (ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
self.report_warning('Unable to download subtitle for "%s": %s' %
(sub_lang, error_to_compat_str(err)))
continue
if self.params.get('writeinfojson', False):
infofn = self.prepare_filename(info_dict, 'infojson')
if not self._ensure_dir_exists(encodeFilename(infofn)):
return
if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(infofn)):
self.to_screen('[info] Video metadata is already present')
else:
self.to_screen('[info] Writing video metadata as JSON to: ' + infofn)
try:
write_json_file(self.sanitize_info(info_dict, self.params.get('clean_infojson', True)), infofn)
except (OSError, IOError):
self.report_error('Cannot write video metadata to JSON file ' + infofn)
return
info_dict['__infojson_filename'] = infofn
for thumb_ext in self._write_thumbnails(info_dict, temp_filename):
thumb_filename_temp = replace_extension(temp_filename, thumb_ext, info_dict.get('ext'))
thumb_filename = replace_extension(
self.prepare_filename(info_dict, 'thumbnail'), thumb_ext, info_dict.get('ext'))
files_to_move[thumb_filename_temp] = thumb_filename
# Write internet shortcut files
url_link = webloc_link = desktop_link = False
if self.params.get('writelink', False):
if sys.platform == "darwin": # macOS.
webloc_link = True
elif sys.platform.startswith("linux"):
desktop_link = True
else: # if sys.platform in ['win32', 'cygwin']:
url_link = True
if self.params.get('writeurllink', False):
url_link = True
if self.params.get('writewebloclink', False):
webloc_link = True
if self.params.get('writedesktoplink', False):
desktop_link = True
if url_link or webloc_link or desktop_link:
if 'webpage_url' not in info_dict:
self.report_error('Cannot write internet shortcut file because the "webpage_url" field is missing in the media information')
return
ascii_url = iri_to_uri(info_dict['webpage_url'])
def _write_link_file(extension, template, newline, embed_filename):
linkfn = replace_extension(full_filename, extension, info_dict.get('ext'))
if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
self.to_screen('[info] Internet shortcut is already present')
else:
try:
self.to_screen('[info] Writing internet shortcut to: ' + linkfn)
with io.open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8', newline=newline) as linkfile:
template_vars = {'url': ascii_url}
if embed_filename:
template_vars['filename'] = linkfn[:-(len(extension) + 1)]
linkfile.write(template % template_vars)
except (OSError, IOError):
self.report_error('Cannot write internet shortcut ' + linkfn)
return False
return True
if url_link:
if not _write_link_file('url', DOT_URL_LINK_TEMPLATE, '\r\n', embed_filename=False):
return
if webloc_link:
if not _write_link_file('webloc', DOT_WEBLOC_LINK_TEMPLATE, '\n', embed_filename=False):
return
if desktop_link:
if not _write_link_file('desktop', DOT_DESKTOP_LINK_TEMPLATE, '\n', embed_filename=True):
return
try:
info_dict, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
except PostProcessingError as err:
self.report_error('Preprocessing: %s' % str(err))
return
must_record_download_archive = False
if self.params.get('skip_download', False):
info_dict['filepath'] = temp_filename
info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
info_dict['__files_to_move'] = files_to_move
info_dict = self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict)
else:
# Download
info_dict.setdefault('__postprocessors', [])
try:
def existing_file(*filepaths):
ext = info_dict.get('ext')
final_ext = self.params.get('final_ext', ext)
existing_files = []
for file in orderedSet(filepaths):
if final_ext != ext:
converted = replace_extension(file, final_ext, ext)
if os.path.exists(encodeFilename(converted)):
existing_files.append(converted)
if os.path.exists(encodeFilename(file)):
existing_files.append(file)
if not existing_files or self.params.get('overwrites', False):
for file in orderedSet(existing_files):
self.report_file_delete(file)
os.remove(encodeFilename(file))
return None
info_dict['ext'] = os.path.splitext(existing_files[0])[1][1:]
return existing_files[0]
success = True
if info_dict.get('requested_formats') is not None:
def compatible_formats(formats):
# TODO: some formats actually allow this (mkv, webm, ogg, mp4), but not all of them.
video_formats = [format for format in formats if format.get('vcodec') != 'none']
audio_formats = [format for format in formats if format.get('acodec') != 'none']
if len(video_formats) > 2 or len(audio_formats) > 2:
return False
# Check extension
exts = set(format.get('ext') for format in formats)
COMPATIBLE_EXTS = (
set(('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma')),
set(('webm',)),
)
for ext_sets in COMPATIBLE_EXTS:
if ext_sets.issuperset(exts):
return True
# TODO: Check acodec/vcodec
return False
requested_formats = info_dict['requested_formats']
old_ext = info_dict['ext']
if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats):
info_dict['ext'] = 'mkv'
self.report_warning(
'Requested formats are incompatible for merge and will be merged into mkv.')
new_ext = info_dict['ext']
def correct_ext(filename, ext=new_ext):
if filename == '-':
return filename
filename_real_ext = os.path.splitext(filename)[1][1:]
filename_wo_ext = (
os.path.splitext(filename)[0]
if filename_real_ext in (old_ext, new_ext)
else filename)
return '%s.%s' % (filename_wo_ext, ext)
# Ensure filename always has a correct extension for successful merge
full_filename = correct_ext(full_filename)
temp_filename = correct_ext(temp_filename)
dl_filename = existing_file(full_filename, temp_filename)
info_dict['__real_download'] = False
_protocols = set(determine_protocol(f) for f in requested_formats)
if len(_protocols) == 1: # All requested formats have same protocol
info_dict['protocol'] = _protocols.pop()
directly_mergable = FFmpegFD.can_merge_formats(info_dict)
if dl_filename is not None:
self.report_file_already_downloaded(dl_filename)
elif (directly_mergable and get_suitable_downloader(
info_dict, self.params, to_stdout=(temp_filename == '-')) == FFmpegFD):
info_dict['url'] = '\n'.join(f['url'] for f in requested_formats)
success, real_download = self.dl(temp_filename, info_dict)
info_dict['__real_download'] = real_download
else:
downloaded = []
merger = FFmpegMergerPP(self)
if self.params.get('allow_unplayable_formats'):
self.report_warning(
'You have requested merging of multiple formats '
'while also allowing unplayable formats to be downloaded. '
'The formats won\'t be merged to prevent data corruption.')
elif not merger.available:
self.report_warning(
'You have requested merging of multiple formats but ffmpeg is not installed. '
'The formats won\'t be merged.')
if temp_filename == '-':
reason = ('using a downloader other than ffmpeg' if directly_mergable
else 'but the formats are incompatible for simultaneous download' if merger.available
else 'but ffmpeg is not installed')
self.report_warning(
f'You have requested downloading multiple formats to stdout {reason}. '
'The formats will be streamed one after the other')
fname = temp_filename
for f in requested_formats:
new_info = dict(info_dict)
del new_info['requested_formats']
new_info.update(f)
if temp_filename != '-':
fname = prepend_extension(
correct_ext(temp_filename, new_info['ext']),
'f%s' % f['format_id'], new_info['ext'])
if not self._ensure_dir_exists(fname):
return
downloaded.append(fname)
partial_success, real_download = self.dl(fname, new_info)
info_dict['__real_download'] = info_dict['__real_download'] or real_download
success = success and partial_success
if merger.available and not self.params.get('allow_unplayable_formats'):
info_dict['__postprocessors'].append(merger)
info_dict['__files_to_merge'] = downloaded
# Even if there were no downloads, it is being merged only now
info_dict['__real_download'] = True
else:
for file in downloaded:
files_to_move[file] = None
else:
# Just a single file
dl_filename = existing_file(full_filename, temp_filename)
if dl_filename is None or dl_filename == temp_filename:
# dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
# So we should try to resume the download
success, real_download = self.dl(temp_filename, info_dict)
info_dict['__real_download'] = real_download
else:
self.report_file_already_downloaded(dl_filename)
dl_filename = dl_filename or temp_filename
info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
except network_exceptions as err:
self.report_error('unable to download video data: %s' % error_to_compat_str(err))
return
except (OSError, IOError) as err:
raise UnavailableVideoError(err)
except (ContentTooShortError, ) as err:
self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
return
if success and full_filename != '-':
def fixup():
do_fixup = True
fixup_policy = self.params.get('fixup')
vid = info_dict['id']
if fixup_policy in ('ignore', 'never'):
return
elif fixup_policy == 'warn':
do_fixup = False
elif fixup_policy != 'force':
assert fixup_policy in ('detect_or_warn', None)
if not info_dict.get('__real_download'):
do_fixup = False
def ffmpeg_fixup(cndn, msg, cls):
if not cndn:
return
if not do_fixup:
self.report_warning(f'{vid}: {msg}')
return
pp = cls(self)
if pp.available:
info_dict['__postprocessors'].append(pp)
else:
self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically')
stretched_ratio = info_dict.get('stretched_ratio')
ffmpeg_fixup(
stretched_ratio not in (1, None),
f'Non-uniform pixel ratio {stretched_ratio}',
FFmpegFixupStretchedPP)
ffmpeg_fixup(
(info_dict.get('requested_formats') is None
and info_dict.get('container') == 'm4a_dash'
and info_dict.get('ext') == 'm4a'),
'writing DASH m4a. Only some players support this container',
FFmpegFixupM4aPP)
downloader = (get_suitable_downloader(info_dict, self.params).__name__
if 'protocol' in info_dict else None)
ffmpeg_fixup(downloader == 'HlsFD', 'malformed AAC bitstream detected', FFmpegFixupM3u8PP)
ffmpeg_fixup(downloader == 'WebSocketFragmentFD', 'malformed timestamps detected', FFmpegFixupTimestampPP)
ffmpeg_fixup(downloader == 'WebSocketFragmentFD', 'malformed duration detected', FFmpegFixupDurationPP)
fixup()
try:
info_dict = self.post_process(dl_filename, info_dict, files_to_move)
except PostProcessingError as err:
self.report_error('Postprocessing: %s' % str(err))
return
try:
for ph in self._post_hooks:
ph(info_dict['filepath'])
except Exception as err:
self.report_error('post hooks: %s' % str(err))
return
must_record_download_archive = True
if must_record_download_archive or self.params.get('force_write_download_archive', False):
self.record_download_archive(info_dict)
max_downloads = self.params.get('max_downloads')
if max_downloads is not None and self._num_downloads >= int(max_downloads):
raise MaxDownloadsReached()
def download(self, url_list):
"""Download a given list of URLs."""
outtmpl = self.outtmpl_dict['default']
if (len(url_list) > 1
and outtmpl != '-'
and '%' not in outtmpl
and self.params.get('max_downloads') != 1):
raise SameFileError(outtmpl)
for url in url_list:
try:
# It also downloads the videos
res = self.extract_info(
url, force_generic_extractor=self.params.get('force_generic_extractor', False))
except UnavailableVideoError:
self.report_error('unable to download video')
except MaxDownloadsReached:
self.to_screen('[info] Maximum number of downloaded files reached')
raise
except ExistingVideoReached:
self.to_screen('[info] Encountered a file that is already in the archive, stopping due to --break-on-existing')
raise
except RejectedVideoReached:
self.to_screen('[info] Encountered a file that did not match filter, stopping due to --break-on-reject')
raise
else:
if self.params.get('dump_single_json', False):
self.post_extract(res)
self.to_stdout(json.dumps(self.sanitize_info(res)))
return self._download_retcode
def download_with_info_file(self, info_filename):
with contextlib.closing(fileinput.FileInput(
[info_filename], mode='r',
openhook=fileinput.hook_encoded('utf-8'))) as f:
# FileInput doesn't have a read method, we can't call json.load
info = self.sanitize_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
try:
self.process_ie_result(info, download=True)
except (DownloadError, EntryNotInPlaylist, ThrottledDownload):
webpage_url = info.get('webpage_url')
if webpage_url is not None:
self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
return self.download([webpage_url])
else:
raise
return self._download_retcode
@staticmethod
def sanitize_info(info_dict, remove_private_keys=False):
''' Sanitize the infodict for converting to json '''
info_dict.setdefault('epoch', int(time.time()))
remove_keys = {'__original_infodict'} # Always remove this since this may contain a copy of the entire dict
keep_keys = ['_type'], # Always keep this to facilitate load-info-json
if remove_private_keys:
remove_keys |= {
'requested_formats', 'requested_subtitles', 'requested_entries',
'filepath', 'entries', 'original_url', 'playlist_autonumber',
}
empty_values = (None, {}, [], set(), tuple())
reject = lambda k, v: k not in keep_keys and (
k.startswith('_') or k in remove_keys or v in empty_values)
else:
reject = lambda k, v: k in remove_keys
filter_fn = lambda obj: (
list(map(filter_fn, obj)) if isinstance(obj, (LazyList, list, tuple, set))
else obj if not isinstance(obj, dict)
else dict((k, filter_fn(v)) for k, v in obj.items() if not reject(k, v)))
return filter_fn(info_dict)
@staticmethod
def filter_requested_info(info_dict, actually_filter=True):
''' Alias of sanitize_info for backward compatibility '''
return YoutubeDL.sanitize_info(info_dict, actually_filter)
def run_pp(self, pp, infodict):
files_to_delete = []
if '__files_to_move' not in infodict:
infodict['__files_to_move'] = {}
files_to_delete, infodict = pp.run(infodict)
if not files_to_delete:
return infodict
if self.params.get('keepvideo', False):
for f in files_to_delete:
infodict['__files_to_move'].setdefault(f, '')
else:
for old_filename in set(files_to_delete):
self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
try:
os.remove(encodeFilename(old_filename))
except (IOError, OSError):
self.report_warning('Unable to remove downloaded original file')
if old_filename in infodict['__files_to_move']:
del infodict['__files_to_move'][old_filename]
return infodict
@staticmethod
def post_extract(info_dict):
def actual_post_extract(info_dict):
if info_dict.get('_type') in ('playlist', 'multi_video'):
for video_dict in info_dict.get('entries', {}):
actual_post_extract(video_dict or {})
return
post_extractor = info_dict.get('__post_extractor') or (lambda: {})
extra = post_extractor().items()
info_dict.update(extra)
info_dict.pop('__post_extractor', None)
original_infodict = info_dict.get('__original_infodict') or {}
original_infodict.update(extra)
original_infodict.pop('__post_extractor', None)
actual_post_extract(info_dict or {})
def pre_process(self, ie_info, key='pre_process', files_to_move=None):
info = dict(ie_info)
info['__files_to_move'] = files_to_move or {}
for pp in self._pps[key]:
info = self.run_pp(pp, info)
return info, info.pop('__files_to_move', None)
def post_process(self, filename, ie_info, files_to_move=None):
"""Run all the postprocessors on the given file."""
info = dict(ie_info)
info['filepath'] = filename
info['__files_to_move'] = files_to_move or {}
for pp in ie_info.get('__postprocessors', []) + self._pps['post_process']:
info = self.run_pp(pp, info)
info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
del info['__files_to_move']
for pp in self._pps['after_move']:
info = self.run_pp(pp, info)
return info
def _make_archive_id(self, info_dict):
video_id = info_dict.get('id')
if not video_id:
return
# Future-proof against any change in case
# and backwards compatibility with prior versions
extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
if extractor is None:
url = str_or_none(info_dict.get('url'))
if not url:
return
# Try to find matching extractor for the URL and take its ie_key
for ie in self._ies:
if ie.suitable(url):
extractor = ie.ie_key()
break
else:
return
return '%s %s' % (extractor.lower(), video_id)
def in_download_archive(self, info_dict):
fn = self.params.get('download_archive')
if fn is None:
return False
vid_id = self._make_archive_id(info_dict)
if not vid_id:
return False # Incomplete video information
return vid_id in self.archive
def record_download_archive(self, info_dict):
fn = self.params.get('download_archive')
if fn is None:
return
vid_id = self._make_archive_id(info_dict)
assert vid_id
with locked_file(fn, 'a', encoding='utf-8') as archive_file:
archive_file.write(vid_id + '\n')
self.archive.add(vid_id)
@staticmethod
def format_resolution(format, default='unknown'):
if format.get('vcodec') == 'none':
if format.get('acodec') == 'none':
return 'images'
return 'audio only'
if format.get('resolution') is not None:
return format['resolution']
if format.get('width') and format.get('height'):
res = '%dx%d' % (format['width'], format['height'])
elif format.get('height'):
res = '%sp' % format['height']
elif format.get('width'):
res = '%dx?' % format['width']
else:
res = default
return res
def _format_note(self, fdict):
res = ''
if fdict.get('ext') in ['f4f', 'f4m']:
res += '(unsupported) '
if fdict.get('language'):
if res:
res += ' '
res += '[%s] ' % fdict['language']
if fdict.get('format_note') is not None:
res += fdict['format_note'] + ' '
if fdict.get('tbr') is not None:
res += '%4dk ' % fdict['tbr']
if fdict.get('container') is not None:
if res:
res += ', '
res += '%s container' % fdict['container']
if (fdict.get('vcodec') is not None
and fdict.get('vcodec') != 'none'):
if res:
res += ', '
res += fdict['vcodec']
if fdict.get('vbr') is not None:
res += '@'
elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
res += 'video@'
if fdict.get('vbr') is not None:
res += '%4dk' % fdict['vbr']
if fdict.get('fps') is not None:
if res:
res += ', '
res += '%sfps' % fdict['fps']
if fdict.get('acodec') is not None:
if res:
res += ', '
if fdict['acodec'] == 'none':
res += 'video only'
else:
res += '%-5s' % fdict['acodec']
elif fdict.get('abr') is not None:
if res:
res += ', '
res += 'audio'
if fdict.get('abr') is not None:
res += '@%3dk' % fdict['abr']
if fdict.get('asr') is not None:
res += ' (%5dHz)' % fdict['asr']
if fdict.get('filesize') is not None:
if res:
res += ', '
res += format_bytes(fdict['filesize'])
elif fdict.get('filesize_approx') is not None:
if res:
res += ', '
res += '~' + format_bytes(fdict['filesize_approx'])
return res
def list_formats(self, info_dict):
formats = info_dict.get('formats', [info_dict])
new_format = (
'list-formats' not in self.params.get('compat_opts', [])
and self.params.get('listformats_table', True) is not False)
if new_format:
table = [
[
format_field(f, 'format_id'),
format_field(f, 'ext'),
self.format_resolution(f),
format_field(f, 'fps', '%d'),
'|',
format_field(f, 'filesize', ' %s', func=format_bytes) + format_field(f, 'filesize_approx', '~%s', func=format_bytes),
format_field(f, 'tbr', '%4dk'),
shorten_protocol_name(f.get('protocol', '').replace("native", "n")),
'|',
format_field(f, 'vcodec', default='unknown').replace('none', ''),
format_field(f, 'vbr', '%4dk'),
format_field(f, 'acodec', default='unknown').replace('none', ''),
format_field(f, 'abr', '%3dk'),
format_field(f, 'asr', '%5dHz'),
', '.join(filter(None, (
'UNSUPPORTED' if f.get('ext') in ('f4f', 'f4m') else '',
format_field(f, 'language', '[%s]'),
format_field(f, 'format_note'),
format_field(f, 'container', ignore=(None, f.get('ext'))),
))),
] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
header_line = ['ID', 'EXT', 'RESOLUTION', 'FPS', '|', ' FILESIZE', ' TBR', 'PROTO',
'|', 'VCODEC', ' VBR', 'ACODEC', ' ABR', ' ASR', 'MORE INFO']
else:
table = [
[
format_field(f, 'format_id'),
format_field(f, 'ext'),
self.format_resolution(f),
self._format_note(f)]
for f in formats
if f.get('preference') is None or f['preference'] >= -1000]
header_line = ['format code', 'extension', 'resolution', 'note']
self.to_screen(
'[info] Available formats for %s:' % info_dict['id'])
self.to_stdout(render_table(
header_line, table, delim=new_format, extraGap=(0 if new_format else 1), hideEmpty=new_format))
def list_thumbnails(self, info_dict):
thumbnails = list(info_dict.get('thumbnails'))
if not thumbnails:
self.to_screen('[info] No thumbnails present for %s' % info_dict['id'])
return
self.to_screen(
'[info] Thumbnails for %s:' % info_dict['id'])
self.to_stdout(render_table(
['ID', 'width', 'height', 'URL'],
[[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
def list_subtitles(self, video_id, subtitles, name='subtitles'):
if not subtitles:
self.to_screen('%s has no %s' % (video_id, name))
return
self.to_screen(
'Available %s for %s:' % (name, video_id))
def _row(lang, formats):
exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
if len(set(names)) == 1:
names = [] if names[0] == 'unknown' else names[:1]
return [lang, ', '.join(names), ', '.join(exts)]
self.to_stdout(render_table(
['Language', 'Name', 'Formats'],
[_row(lang, formats) for lang, formats in subtitles.items()],
hideEmpty=True))
def urlopen(self, req):
""" Start an HTTP download """
if isinstance(req, compat_basestring):
req = sanitized_Request(req)
return self._opener.open(req, timeout=self._socket_timeout)
def print_debug_header(self):
if not self.params.get('verbose'):
return
stdout_encoding = getattr(
sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
encoding_str = (
'[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
locale.getpreferredencoding(),
sys.getfilesystemencoding(),
stdout_encoding,
self.get_encoding()))
write_string(encoding_str, encoding=None)
source = (
'(exe)' if hasattr(sys, 'frozen')
else '(zip)' if isinstance(globals().get('__loader__'), zipimporter)
else '(source)' if os.path.basename(sys.argv[0]) == '__main__.py'
else '')
self._write_string('[debug] yt-dlp version %s %s\n' % (__version__, source))
if _LAZY_LOADER:
self._write_string('[debug] Lazy loading extractors enabled\n')
if _PLUGIN_CLASSES:
self._write_string(
'[debug] Plugin Extractors: %s\n' % [ie.ie_key() for ie in _PLUGIN_CLASSES])
if self.params.get('compat_opts'):
self._write_string(
'[debug] Compatibility options: %s\n' % ', '.join(self.params.get('compat_opts')))
try:
sp = subprocess.Popen(
['git', 'rev-parse', '--short', 'HEAD'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=os.path.dirname(os.path.abspath(__file__)))
out, err = process_communicate_or_kill(sp)
out = out.decode().strip()
if re.match('[0-9a-f]+', out):
self._write_string('[debug] Git HEAD: %s\n' % out)
except Exception:
try:
sys.exc_clear()
except Exception:
pass
def python_implementation():
impl_name = platform.python_implementation()
if impl_name == 'PyPy' and hasattr(sys, 'pypy_version_info'):
return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
return impl_name
self._write_string('[debug] Python version %s (%s %s) - %s\n' % (
platform.python_version(),
python_implementation(),
platform.architecture()[0],
platform_name()))
exe_versions = FFmpegPostProcessor.get_versions(self)
exe_versions['rtmpdump'] = rtmpdump_version()
exe_versions['phantomjs'] = PhantomJSwrapper._version()
exe_str = ', '.join(
f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
) or 'none'
self._write_string('[debug] exe versions: %s\n' % exe_str)
from .downloader.fragment import can_decrypt_frag
from .downloader.websocket import has_websockets
from .postprocessor.embedthumbnail import has_mutagen
from .cookies import SQLITE_AVAILABLE, KEYRING_AVAILABLE
lib_str = ', '.join(sorted(filter(None, (
can_decrypt_frag and 'pycryptodome',
has_websockets and 'websockets',
has_mutagen and 'mutagen',
SQLITE_AVAILABLE and 'sqlite',
KEYRING_AVAILABLE and 'keyring',
)))) or 'none'
self._write_string('[debug] Optional libraries: %s\n' % lib_str)
proxy_map = {}
for handler in self._opener.handlers:
if hasattr(handler, 'proxies'):
proxy_map.update(handler.proxies)
self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
if self.params.get('call_home', False):
ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
self._write_string('[debug] Public IP address: %s\n' % ipaddr)
return
latest_version = self.urlopen(
'https://yt-dl.org/latest/version').read().decode('utf-8')
if version_tuple(latest_version) > version_tuple(__version__):
self.report_warning(
'You are using an outdated version (newest version: %s)! '
'See https://yt-dl.org/update if you need help updating.' %
latest_version)
def _setup_opener(self):
timeout_val = self.params.get('socket_timeout')
self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
opts_cookiesfrombrowser = self.params.get('cookiesfrombrowser')
opts_cookiefile = self.params.get('cookiefile')
opts_proxy = self.params.get('proxy')
self.cookiejar = load_cookies(opts_cookiefile, opts_cookiesfrombrowser, self)
cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
if opts_proxy is not None:
if opts_proxy == '':
proxies = {}
else:
proxies = {'http': opts_proxy, 'https': opts_proxy}
else:
proxies = compat_urllib_request.getproxies()
# Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
if 'http' in proxies and 'https' not in proxies:
proxies['https'] = proxies['http']
proxy_handler = PerRequestProxyHandler(proxies)
debuglevel = 1 if self.params.get('debug_printtraffic') else 0
https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
redirect_handler = YoutubeDLRedirectHandler()
data_handler = compat_urllib_request_DataHandler()
# When passing our own FileHandler instance, build_opener won't add the
# default FileHandler and allows us to disable the file protocol, which
# can be used for malicious purposes (see
# https://github.com/ytdl-org/youtube-dl/issues/8227)
file_handler = compat_urllib_request.FileHandler()
def file_open(*args, **kwargs):
raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
file_handler.file_open = file_open
opener = compat_urllib_request.build_opener(
proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
# Delete the default user-agent header, which would otherwise apply in
# cases where our custom HTTP handler doesn't come into play
# (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
opener.addheaders = []
self._opener = opener
def encode(self, s):
if isinstance(s, bytes):
return s # Already encoded
try:
return s.encode(self.get_encoding())
except UnicodeEncodeError as err:
err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
raise
def get_encoding(self):
encoding = self.params.get('encoding')
if encoding is None:
encoding = preferredencoding()
return encoding
def _write_thumbnails(self, info_dict, filename): # return the extensions
write_all = self.params.get('write_all_thumbnails', False)
thumbnails = []
if write_all or self.params.get('writethumbnail', False):
thumbnails = info_dict.get('thumbnails') or []
multiple = write_all and len(thumbnails) > 1
ret = []
for t in thumbnails[::-1]:
thumb_ext = determine_ext(t['url'], 'jpg')
suffix = '%s.' % t['id'] if multiple else ''
thumb_display_id = '%s ' % t['id'] if multiple else ''
thumb_filename = replace_extension(filename, suffix + thumb_ext, info_dict.get('ext'))
if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(thumb_filename)):
ret.append(suffix + thumb_ext)
t['filepath'] = thumb_filename
self.to_screen('[%s] %s: Thumbnail %sis already present' %
(info_dict['extractor'], info_dict['id'], thumb_display_id))
else:
self.to_screen('[%s] %s: Downloading thumbnail %s ...' %
(info_dict['extractor'], info_dict['id'], thumb_display_id))
try:
uf = self.urlopen(t['url'])
with open(encodeFilename(thumb_filename), 'wb') as thumbf:
shutil.copyfileobj(uf, thumbf)
ret.append(suffix + thumb_ext)
self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
(info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
t['filepath'] = thumb_filename
except network_exceptions as err:
self.report_warning('Unable to download thumbnail "%s": %s' %
(t['url'], error_to_compat_str(err)))
if ret and not write_all:
break
return ret
| 46.728614 | 194 | 0.564238 |
from __future__ import absolute_import, unicode_literals
import collections
import contextlib
import copy
import datetime
import errno
import fileinput
import io
import itertools
import json
import locale
import operator
import os
import platform
import re
import shutil
import subprocess
import sys
import tempfile
import time
import tokenize
import traceback
import random
from string import ascii_letters
from zipimport import zipimporter
from .compat import (
compat_basestring,
compat_get_terminal_size,
compat_kwargs,
compat_numeric_types,
compat_os_name,
compat_shlex_quote,
compat_str,
compat_tokenize_tokenize,
compat_urllib_error,
compat_urllib_request,
compat_urllib_request_DataHandler,
)
from .cookies import load_cookies
from .utils import (
age_restricted,
args_to_str,
ContentTooShortError,
date_from_str,
DateRange,
DEFAULT_OUTTMPL,
determine_ext,
determine_protocol,
DOT_DESKTOP_LINK_TEMPLATE,
DOT_URL_LINK_TEMPLATE,
DOT_WEBLOC_LINK_TEMPLATE,
DownloadError,
encode_compat_str,
encodeFilename,
EntryNotInPlaylist,
error_to_compat_str,
ExistingVideoReached,
expand_path,
ExtractorError,
float_or_none,
format_bytes,
format_field,
STR_FORMAT_RE_TMPL,
STR_FORMAT_TYPES,
formatSeconds,
GeoRestrictedError,
HEADRequest,
int_or_none,
iri_to_uri,
ISO3166Utils,
LazyList,
locked_file,
make_dir,
make_HTTPS_handler,
MaxDownloadsReached,
network_exceptions,
orderedSet,
OUTTMPL_TYPES,
PagedList,
parse_filesize,
PerRequestProxyHandler,
platform_name,
PostProcessingError,
preferredencoding,
prepend_extension,
process_communicate_or_kill,
register_socks_protocols,
RejectedVideoReached,
render_table,
replace_extension,
SameFileError,
sanitize_filename,
sanitize_path,
sanitize_url,
sanitized_Request,
std_headers,
str_or_none,
strftime_or_none,
subtitles_filename,
ThrottledDownload,
to_high_limit_path,
traverse_obj,
try_get,
UnavailableVideoError,
url_basename,
variadic,
version_tuple,
write_json_file,
write_string,
YoutubeDLCookieProcessor,
YoutubeDLHandler,
YoutubeDLRedirectHandler,
)
from .cache import Cache
from .extractor import (
gen_extractor_classes,
get_info_extractor,
_LAZY_LOADER,
_PLUGIN_CLASSES
)
from .extractor.openload import PhantomJSwrapper
from .downloader import (
FFmpegFD,
get_suitable_downloader,
shorten_protocol_name
)
from .downloader.rtmp import rtmpdump_version
from .postprocessor import (
get_postprocessor,
FFmpegFixupDurationPP,
FFmpegFixupM3u8PP,
FFmpegFixupM4aPP,
FFmpegFixupStretchedPP,
FFmpegFixupTimestampPP,
FFmpegMergerPP,
FFmpegPostProcessor,
MoveFilesAfterDownloadPP,
)
from .version import __version__
if compat_os_name == 'nt':
import ctypes
class YoutubeDL(object):
_NUMERIC_FIELDS = set((
'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
'timestamp', 'upload_year', 'upload_month', 'upload_day',
'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
'average_rating', 'comment_count', 'age_limit',
'start_time', 'end_time',
'chapter_number', 'season_number', 'episode_number',
'track_number', 'disc_number', 'release_year',
'playlist_index',
))
params = None
_ies = []
_pps = {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
_printed_messages = set()
_first_webpage_request = True
_download_retcode = None
_num_downloads = None
_playlist_level = 0
_playlist_urls = set()
_screen_file = None
def __init__(self, params=None, auto_init=True):
if params is None:
params = {}
self._ies = []
self._ies_instances = {}
self._pps = {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
self._printed_messages = set()
self._first_webpage_request = True
self._post_hooks = []
self._progress_hooks = []
self._download_retcode = 0
self._num_downloads = 0
self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
self._err_file = sys.stderr
self.params = {
'nocheckcertificate': False,
}
self.params.update(params)
self.cache = Cache(self)
if sys.version_info < (3, 6):
self.report_warning(
'Python version %d.%d is not supported! Please update to Python 3.6 or above' % sys.version_info[:2])
def check_deprecated(param, option, suggestion):
if self.params.get(param) is not None:
self.report_warning('%s is deprecated. Use %s instead' % (option, suggestion))
return True
return False
if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
if self.params.get('geo_verification_proxy') is None:
self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
for msg in self.params.get('warnings', []):
self.report_warning(msg)
if self.params.get('final_ext'):
if self.params.get('merge_output_format'):
self.report_warning('--merge-output-format will be ignored since --remux-video or --recode-video is given')
self.params['merge_output_format'] = self.params['final_ext']
if self.params.get('overwrites') is None:
self.params.pop('overwrites', None)
elif self.params.get('nooverwrites') is not None:
self.params['overwrites'] = not self.params['nooverwrites']
else:
self.params['nooverwrites'] = not self.params['overwrites']
if params.get('bidi_workaround', False):
try:
import pty
master, slave = pty.openpty()
width = compat_get_terminal_size().columns
if width is None:
width_args = []
else:
width_args = ['-w', str(width)]
sp_kwargs = dict(
stdin=subprocess.PIPE,
stdout=slave,
stderr=self._err_file)
try:
self._output_process = subprocess.Popen(
['bidiv'] + width_args, **sp_kwargs
)
except OSError:
self._output_process = subprocess.Popen(
['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
self._output_channel = os.fdopen(master, 'rb')
except OSError as ose:
if ose.errno == errno.ENOENT:
self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
else:
raise
if (sys.platform != 'win32'
and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
and not params.get('restrictfilenames', False)):
ort_warning(
'Assuming --restrict-filenames since file system encoding '
'cannot encode all characters. '
'Set the LC_ALL environment variable to fix this.')
self.params['restrictfilenames'] = True
self.outtmpl_dict = self.parse_outtmpl()
self.format_selector = (
None if self.params.get('format') is None
else self.build_format_selector(self.params['format']))
self._setup_opener()
def preload_download_archive(fn):
if fn is None:
return False
self.write_debug('Loading archive file %r\n' % fn)
try:
with locked_file(fn, 'r', encoding='utf-8') as archive_file:
for line in archive_file:
self.archive.add(line.strip())
except IOError as ioe:
if ioe.errno != errno.ENOENT:
raise
return False
return True
self.archive = set()
preload_download_archive(self.params.get('download_archive'))
if auto_init:
self.print_debug_header()
self.add_default_info_extractors()
for pp_def_raw in self.params.get('postprocessors', []):
pp_def = dict(pp_def_raw)
when = pp_def.pop('when', 'post_process')
pp_class = get_postprocessor(pp_def.pop('key'))
pp = pp_class(self, **compat_kwargs(pp_def))
self.add_post_processor(pp, when=when)
for ph in self.params.get('post_hooks', []):
self.add_post_hook(ph)
for ph in self.params.get('progress_hooks', []):
self.add_progress_hook(ph)
register_socks_protocols()
def warn_if_short_id(self, argv):
idxs = [
i for i, a in enumerate(argv)
if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
if idxs:
correct_argv = (
['yt-dlp']
+ [a for i, a in enumerate(argv) if i not in idxs]
+ ['--'] + [argv[i] for i in idxs]
)
self.report_warning(
'Long argument string detected. '
'Use -- to separate parameters and URLs, like this:\n%s\n' %
args_to_str(correct_argv))
def add_info_extractor(self, ie):
self._ies.append(ie)
if not isinstance(ie, type):
self._ies_instances[ie.ie_key()] = ie
ie.set_downloader(self)
def get_info_extractor(self, ie_key):
ie = self._ies_instances.get(ie_key)
if ie is None:
ie = get_info_extractor(ie_key)()
self.add_info_extractor(ie)
return ie
def add_default_info_extractors(self):
for ie in gen_extractor_classes():
self.add_info_extractor(ie)
def add_post_processor(self, pp, when='post_process'):
self._pps[when].append(pp)
pp.set_downloader(self)
def add_post_hook(self, ph):
self._post_hooks.append(ph)
def add_progress_hook(self, ph):
self._progress_hooks.append(ph)
def _bidi_workaround(self, message):
if not hasattr(self, '_output_channel'):
return message
assert hasattr(self, '_output_process')
assert isinstance(message, compat_str)
line_count = message.count('\n') + 1
self._output_process.stdin.write((message + '\n').encode('utf-8'))
self._output_process.stdin.flush()
res = ''.join(self._output_channel.readline().decode('utf-8')
for _ in range(line_count))
return res[:-len('\n')]
def _write_string(self, message, out=None, only_once=False):
if only_once:
if message in self._printed_messages:
return
self._printed_messages.add(message)
write_string(message, out=out, encoding=self.params.get('encoding'))
def to_stdout(self, message, skip_eol=False, quiet=False):
if self.params.get('logger'):
self.params['logger'].debug(message)
elif not quiet or self.params.get('verbose'):
self._write_string(
'%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
self._err_file if quiet else self._screen_file)
def to_stderr(self, message, only_once=False):
assert isinstance(message, compat_str)
if self.params.get('logger'):
self.params['logger'].error(message)
else:
self._write_string('%s\n' % self._bidi_workaround(message), self._err_file, only_once=only_once)
def to_console_title(self, message):
if not self.params.get('consoletitle', False):
return
if compat_os_name == 'nt':
if ctypes.windll.kernel32.GetConsoleWindow():
ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
elif 'TERM' in os.environ:
self._write_string('\033]0;%s\007' % message, self._screen_file)
def save_console_title(self):
if not self.params.get('consoletitle', False):
return
if self.params.get('simulate'):
return
if compat_os_name != 'nt' and 'TERM' in os.environ:
self._write_string('\033[22;0t', self._screen_file)
def restore_console_title(self):
if not self.params.get('consoletitle', False):
return
if self.params.get('simulate'):
return
if compat_os_name != 'nt' and 'TERM' in os.environ:
self._write_string('\033[23;0t', self._screen_file)
def __enter__(self):
self.save_console_title()
return self
def __exit__(self, *args):
self.restore_console_title()
if self.params.get('cookiefile') is not None:
self.cookiejar.save(ignore_discard=True, ignore_expires=True)
def trouble(self, message=None, tb=None):
if message is not None:
self.to_stderr(message)
if self.params.get('verbose'):
if tb is None:
if sys.exc_info()[0]:
tb = ''
if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
tb += encode_compat_str(traceback.format_exc())
else:
tb_data = traceback.format_list(traceback.extract_stack())
tb = ''.join(tb_data)
if tb:
self.to_stderr(tb)
if not self.params.get('ignoreerrors', False):
if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
exc_info = sys.exc_info()[1].exc_info
else:
exc_info = sys.exc_info()
raise DownloadError(message, exc_info)
self._download_retcode = 1
def to_screen(self, message, skip_eol=False):
self.to_stdout(
message, skip_eol, quiet=self.params.get('quiet', False))
def report_warning(self, message, only_once=False):
if self.params.get('logger') is not None:
self.params['logger'].warning(message)
else:
if self.params.get('no_warnings'):
return
if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
_msg_header = '\033[0;33mWARNING:\033[0m'
else:
_msg_header = 'WARNING:'
warning_message = '%s %s' % (_msg_header, message)
self.to_stderr(warning_message, only_once)
def report_error(self, message, tb=None):
if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
_msg_header = '\033[0;31mERROR:\033[0m'
else:
_msg_header = 'ERROR:'
error_message = '%s %s' % (_msg_header, message)
self.trouble(error_message, tb)
def write_debug(self, message, only_once=False):
if not self.params.get('verbose', False):
return
message = '[debug] %s' % message
if self.params.get('logger'):
self.params['logger'].debug(message)
else:
self.to_stderr(message, only_once)
def report_file_already_downloaded(self, file_name):
try:
self.to_screen('[download] %s has already been downloaded' % file_name)
except UnicodeEncodeError:
self.to_screen('[download] The file has already been downloaded')
def report_file_delete(self, file_name):
try:
self.to_screen('Deleting existing file %s' % file_name)
except UnicodeEncodeError:
self.to_screen('Deleting existing file')
def parse_outtmpl(self):
outtmpl_dict = self.params.get('outtmpl', {})
if not isinstance(outtmpl_dict, dict):
outtmpl_dict = {'default': outtmpl_dict}
outtmpl_dict.update({
k: v for k, v in DEFAULT_OUTTMPL.items()
if not outtmpl_dict.get(k)})
for key, val in outtmpl_dict.items():
if isinstance(val, bytes):
self.report_warning(
'Parameter outtmpl is bytes, but should be a unicode string. '
'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
return outtmpl_dict
def get_output_path(self, dir_type='', filename=None):
paths = self.params.get('paths', {})
assert isinstance(paths, dict)
path = os.path.join(
expand_path(paths.get('home', '').strip()),
expand_path(paths.get(dir_type, '').strip()) if dir_type else '',
filename or '')
if sys.version_info < (3, 0) and sys.platform == 'win32':
path = encodeFilename(path, True).decode(preferredencoding())
return sanitize_path(path, force=self.params.get('windowsfilenames'))
@staticmethod
def _outtmpl_expandpath(outtmpl):
sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep))
# because meta fields may contain env variables we don't want to
return expand_path(outtmpl).replace(sep, '')
@staticmethod
def escape_outtmpl(outtmpl):
return re.sub(
STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
outtmpl)
@classmethod
def validate_outtmpl(cls, outtmpl):
outtmpl = re.sub(
STR_FORMAT_RE_TMPL.format('[^)]*', '[ljq]'),
lambda mobj: f'{mobj.group(0)[:-1]}s',
cls._outtmpl_expandpath(outtmpl))
try:
cls.escape_outtmpl(outtmpl) % collections.defaultdict(int)
return None
except ValueError as err:
return err
def prepare_outtmpl(self, outtmpl, info_dict, sanitize=None):
info_dict.setdefault('epoch', int(time.time())) # keep epoch consistent once set
info_dict = dict(info_dict) # Do not sanitize so as not to consume LazyList
for key in ('__original_infodict', '__postprocessors'):
info_dict.pop(key, None)
info_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
formatSeconds(info_dict['duration'], '-' if sanitize else ':')
if info_dict.get('duration', None) is not None
else None)
info_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads
if info_dict.get('resolution') is None:
info_dict['resolution'] = self.format_resolution(info_dict, default=None)
# For fields playlist_index and autonumber convert all occurrences
# of %(field)s to %(field)0Nd for backward compatibility
field_size_compat_map = {
'playlist_index': len(str(info_dict.get('_last_playlist_index') or '')),
'autonumber': self.params.get('autonumber_size') or 5,
}
TMPL_DICT = {}
EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljq]'))
MATH_FUNCTIONS = {
'+': float.__add__,
'-': float.__sub__,
}
# Field is of the form key1.key2...
# where keys (except first) can be string, int or slice
FIELD_RE = r'\w*(?:\.(?:\w+|{num}|{num}?(?::{num}?){{1,2}}))*'.format(num=r'(?:-?\d+)')
MATH_FIELD_RE = r'''{field}|{num}'''.format(field=FIELD_RE, num=r'-?\d+(?:.\d+)?')
MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
INTERNAL_FORMAT_RE = re.compile(r'''(?x)
(?P<negate>-)?
(?P<fields>{field})
(?P<maths>(?:{math_op}{math_field})*)
(?:>(?P<strf_format>.+?))?
(?:\|(?P<default>.*?))?
$'''.format(field=FIELD_RE, math_op=MATH_OPERATORS_RE, math_field=MATH_FIELD_RE))
def _traverse_infodict(k):
k = k.split('.')
if k[0] == '':
k.pop(0)
return traverse_obj(info_dict, k, is_user_input=True, traverse_string=True)
def get_value(mdict):
# Object traversal
value = _traverse_infodict(mdict['fields'])
# Negative
if mdict['negate']:
value = float_or_none(value)
if value is not None:
value *= -1
# Do maths
offset_key = mdict['maths']
if offset_key:
value = float_or_none(value)
operator = None
while offset_key:
item = re.match(
MATH_FIELD_RE if operator else MATH_OPERATORS_RE,
offset_key).group(0)
offset_key = offset_key[len(item):]
if operator is None:
operator = MATH_FUNCTIONS[item]
continue
item, multiplier = (item[1:], -1) if item[0] == '-' else (item, 1)
offset = float_or_none(item)
if offset is None:
offset = float_or_none(_traverse_infodict(item))
try:
value = operator(value, multiplier * offset)
except (TypeError, ZeroDivisionError):
return None
operator = None
# Datetime formatting
if mdict['strf_format']:
value = strftime_or_none(value, mdict['strf_format'])
return value
na = self.params.get('outtmpl_na_placeholder', 'NA')
def _dumpjson_default(obj):
if isinstance(obj, (set, LazyList)):
return list(obj)
raise TypeError(f'Object of type {type(obj).__name__} is not JSON serializable')
def create_key(outer_mobj):
if not outer_mobj.group('has_key'):
return f'%{outer_mobj.group(0)}'
key = outer_mobj.group('key')
mobj = re.match(INTERNAL_FORMAT_RE, key)
if mobj is None:
value, default, mobj = None, na, {'fields': ''}
else:
mobj = mobj.groupdict()
default = mobj['default'] if mobj['default'] is not None else na
value = get_value(mobj)
fmt = outer_mobj.group('format')
if fmt == 's' and value is not None and key in field_size_compat_map.keys():
fmt = '0{:d}d'.format(field_size_compat_map[key])
value = default if value is None else value
str_fmt = f'{fmt[:-1]}s'
if fmt[-1] == 'l':
value, fmt = ', '.join(variadic(value)), str_fmt
elif fmt[-1] == 'j':
value, fmt = json.dumps(value, default=_dumpjson_default), str_fmt
elif fmt[-1] == 'q':
value, fmt = compat_shlex_quote(str(value)), str_fmt
elif fmt[-1] == 'c':
value = str(value)
if value is None:
value, fmt = default, 's'
else:
value = value[0]
elif fmt[-1] not in 'rs': # numeric
value = float_or_none(value)
if value is None:
value, fmt = default, 's'
if sanitize:
if fmt[-1] == 'r':
# If value is an object, sanitize might convert it to a string
# So we convert it to repr first
value, fmt = repr(value), str_fmt
if fmt[-1] in 'csr':
value = sanitize(mobj['fields'].split('.')[-1], value)
key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
TMPL_DICT[key] = value
return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
return EXTERNAL_FORMAT_RE.sub(create_key, outtmpl), TMPL_DICT
def _prepare_filename(self, info_dict, tmpl_type='default'):
try:
sanitize = lambda k, v: sanitize_filename(
compat_str(v),
restricted=self.params.get('restrictfilenames'),
is_id=(k == 'id' or k.endswith('_id')))
outtmpl = self.outtmpl_dict.get(tmpl_type, self.outtmpl_dict['default'])
outtmpl, template_dict = self.prepare_outtmpl(outtmpl, info_dict, sanitize)
outtmpl = self.escape_outtmpl(self._outtmpl_expandpath(outtmpl))
filename = outtmpl % template_dict
force_ext = OUTTMPL_TYPES.get(tmpl_type)
if force_ext is not None:
filename = replace_extension(filename, force_ext, info_dict.get('ext'))
# https://github.com/blackjack4494/youtube-dlc/issues/85
trim_file_name = self.params.get('trim_file_name', False)
if trim_file_name:
fn_groups = filename.rsplit('.')
ext = fn_groups[-1]
sub_ext = ''
if len(fn_groups) > 2:
sub_ext = fn_groups[-2]
filename = '.'.join(filter(None, [fn_groups[0][:trim_file_name], sub_ext, ext]))
return filename
except ValueError as err:
self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
return None
def prepare_filename(self, info_dict, dir_type='', warn=False):
filename = self._prepare_filename(info_dict, dir_type or 'default')
if warn:
if not self.params.get('paths'):
pass
elif filename == '-':
self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
elif os.path.isabs(filename):
self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
if filename == '-' or not filename:
return filename
return self.get_output_path(dir_type, filename)
def _match_entry(self, info_dict, incomplete=False, silent=False):
video_title = info_dict.get('title', info_dict.get('id', 'video'))
def check_filter():
if 'title' in info_dict:
# This can happen when we're just evaluating the playlist
title = info_dict['title']
matchtitle = self.params.get('matchtitle', False)
if matchtitle:
if not re.search(matchtitle, title, re.IGNORECASE):
return '"' + title + '" title did not match pattern "' + matchtitle + '"'
rejecttitle = self.params.get('rejecttitle', False)
if rejecttitle:
if re.search(rejecttitle, title, re.IGNORECASE):
return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
date = info_dict.get('upload_date')
if date is not None:
dateRange = self.params.get('daterange', DateRange())
if date not in dateRange:
return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
view_count = info_dict.get('view_count')
if view_count is not None:
min_views = self.params.get('min_views')
if min_views is not None and view_count < min_views:
return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
max_views = self.params.get('max_views')
if max_views is not None and view_count > max_views:
return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
return 'Skipping "%s" because it is age restricted' % video_title
if not incomplete:
match_filter = self.params.get('match_filter')
if match_filter is not None:
ret = match_filter(info_dict)
if ret is not None:
return ret
return None
if self.in_download_archive(info_dict):
reason = '%s has already been recorded in the archive' % video_title
break_opt, break_err = 'break_on_existing', ExistingVideoReached
else:
reason = check_filter()
break_opt, break_err = 'break_on_reject', RejectedVideoReached
if reason is not None:
if not silent:
self.to_screen('[download] ' + reason)
if self.params.get(break_opt, False):
raise break_err()
return reason
@staticmethod
def add_extra_info(info_dict, extra_info):
for key, value in extra_info.items():
info_dict.setdefault(key, value)
def extract_info(self, url, download=True, ie_key=None, extra_info={},
process=True, force_generic_extractor=False):
if not ie_key and force_generic_extractor:
ie_key = 'Generic'
if ie_key:
ies = [self.get_info_extractor(ie_key)]
else:
ies = self._ies
for ie in ies:
if not ie.suitable(url):
continue
ie_key = ie.ie_key()
ie = self.get_info_extractor(ie_key)
if not ie.working():
self.report_warning('The program functionality for this site has been marked as broken, '
'and will probably not work.')
try:
temp_id = str_or_none(
ie.extract_id(url) if callable(getattr(ie, 'extract_id', None))
else ie._match_id(url))
except (AssertionError, IndexError, AttributeError):
temp_id = None
if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': ie_key}):
self.to_screen("[%s] %s: has already been recorded in archive" % (
ie_key, temp_id))
break
return self.__extract_info(url, ie, download, extra_info, process)
else:
self.report_error('no suitable InfoExtractor for URL %s' % url)
def __handle_extraction_exceptions(func, handle_all_errors=True):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except GeoRestrictedError as e:
msg = e.msg
if e.countries:
msg += '\nThis video is available in %s.' % ', '.join(
map(ISO3166Utils.short2full, e.countries))
msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
self.report_error(msg)
except ExtractorError as e:
self.report_error(compat_str(e), e.format_traceback())
except ThrottledDownload:
self.to_stderr('\r')
self.report_warning('The download speed is below throttle limit. Re-extracting data')
return wrapper(self, *args, **kwargs)
except (MaxDownloadsReached, ExistingVideoReached, RejectedVideoReached):
raise
except Exception as e:
if handle_all_errors and self.params.get('ignoreerrors', False):
self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc()))
else:
raise
return wrapper
@__handle_extraction_exceptions
def __extract_info(self, url, ie, download, extra_info, process):
ie_result = ie.extract(url)
if ie_result is None:
return
if isinstance(ie_result, list):
ie_result = {
'_type': 'compat_list',
'entries': ie_result,
}
if extra_info.get('original_url'):
ie_result.setdefault('original_url', extra_info['original_url'])
self.add_default_extra_info(ie_result, ie, url)
if process:
return self.process_ie_result(ie_result, download, extra_info)
else:
return ie_result
def add_default_extra_info(self, ie_result, ie, url):
if url is not None:
self.add_extra_info(ie_result, {
'webpage_url': url,
'original_url': url,
'webpage_url_basename': url_basename(url),
})
if ie is not None:
self.add_extra_info(ie_result, {
'extractor': ie.IE_NAME,
'extractor_key': ie.ie_key(),
})
def process_ie_result(self, ie_result, download=True, extra_info={}):
result_type = ie_result.get('_type', 'video')
if result_type in ('url', 'url_transparent'):
ie_result['url'] = sanitize_url(ie_result['url'])
if ie_result.get('original_url'):
extra_info.setdefault('original_url', ie_result['original_url'])
extract_flat = self.params.get('extract_flat', False)
if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
or extract_flat is True):
info_copy = ie_result.copy()
self.add_extra_info(info_copy, extra_info)
ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
self.add_default_extra_info(info_copy, ie, ie_result['url'])
self.__forced_printings(info_copy, self.prepare_filename(info_copy), incomplete=True)
return ie_result
if result_type == 'video':
self.add_extra_info(ie_result, extra_info)
ie_result = self.process_video_result(ie_result, download=download)
additional_urls = (ie_result or {}).get('additional_urls')
if additional_urls:
if isinstance(additional_urls, compat_str):
additional_urls = [additional_urls]
self.to_screen(
'[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
ie_result['additional_entries'] = [
self.extract_info(
url, download, extra_info,
force_generic_extractor=self.params.get('force_generic_extractor'))
for url in additional_urls
]
return ie_result
elif result_type == 'url':
return self.extract_info(
ie_result['url'], download,
ie_key=ie_result.get('ie_key'),
extra_info=extra_info)
elif result_type == 'url_transparent':
info = self.extract_info(
ie_result['url'], ie_key=ie_result.get('ie_key'),
extra_info=extra_info, download=False, process=False)
# in this case
if not info:
return info
force_properties = dict(
(k, v) for k, v in ie_result.items() if v is not None)
for f in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'):
if f in force_properties:
del force_properties[f]
new_result = info.copy()
new_result.update(force_properties)
# Extracted info may not be a video result (i.e.
# info.get('_type', 'video') != video) but rather an url or
# url_transparent. In such cases outer metadata (from ie_result)
# should be propagated to inner one (info). For this to happen
# _type of info should be overridden with url_transparent. This
# fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
if new_result.get('_type') == 'url':
new_result['_type'] = 'url_transparent'
return self.process_ie_result(
new_result, download=download, extra_info=extra_info)
elif result_type in ('playlist', 'multi_video'):
# Protect from infinite recursion due to recursively nested playlists
# (see https://github.com/ytdl-org/youtube-dl/issues/27833)
webpage_url = ie_result['webpage_url']
if webpage_url in self._playlist_urls:
self.to_screen(
'[download] Skipping already downloaded playlist: %s'
% ie_result.get('title') or ie_result.get('id'))
return
self._playlist_level += 1
self._playlist_urls.add(webpage_url)
self._sanitize_thumbnails(ie_result)
try:
return self.__process_playlist(ie_result, download)
finally:
self._playlist_level -= 1
if not self._playlist_level:
self._playlist_urls.clear()
elif result_type == 'compat_list':
self.report_warning(
'Extractor %s returned a compat_list result. '
'It needs to be updated.' % ie_result.get('extractor'))
def _fixup(r):
self.add_extra_info(r, {
'extractor': ie_result['extractor'],
'webpage_url': ie_result['webpage_url'],
'webpage_url_basename': url_basename(ie_result['webpage_url']),
'extractor_key': ie_result['extractor_key'],
})
return r
ie_result['entries'] = [
self.process_ie_result(_fixup(r), download, extra_info)
for r in ie_result['entries']
]
return ie_result
else:
raise Exception('Invalid result type: %s' % result_type)
def _ensure_dir_exists(self, path):
return make_dir(path, self.report_error)
def __process_playlist(self, ie_result, download):
# We process each entry in the playlist
playlist = ie_result.get('title') or ie_result.get('id')
self.to_screen('[download] Downloading playlist: %s' % playlist)
if 'entries' not in ie_result:
raise EntryNotInPlaylist()
incomplete_entries = bool(ie_result.get('requested_entries'))
if incomplete_entries:
def fill_missing_entries(entries, indexes):
ret = [None] * max(*indexes)
for i, entry in zip(indexes, entries):
ret[i - 1] = entry
return ret
ie_result['entries'] = fill_missing_entries(ie_result['entries'], ie_result['requested_entries'])
playlist_results = []
playliststart = self.params.get('playliststart', 1)
playlistend = self.params.get('playlistend')
# For backwards compatibility, interpret -1 as whole list
if playlistend == -1:
playlistend = None
playlistitems_str = self.params.get('playlist_items')
playlistitems = None
if playlistitems_str is not None:
def iter_playlistitems(format):
for string_segment in format.split(','):
if '-' in string_segment:
start, end = string_segment.split('-')
for item in range(int(start), int(end) + 1):
yield int(item)
else:
yield int(string_segment)
playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
ie_entries = ie_result['entries']
msg = (
'Downloading %d videos' if not isinstance(ie_entries, list)
else 'Collected %d videos; downloading %%d of them' % len(ie_entries))
if not isinstance(ie_entries, (list, PagedList)):
ie_entries = LazyList(ie_entries)
def get_entry(i):
return YoutubeDL.__handle_extraction_exceptions(
lambda self, i: ie_entries[i - 1],
False
)(self, i)
entries = []
for i in playlistitems or itertools.count(playliststart):
if playlistitems is None and playlistend is not None and playlistend < i:
break
entry = None
try:
entry = get_entry(i)
if entry is None:
raise EntryNotInPlaylist()
except (IndexError, EntryNotInPlaylist):
if incomplete_entries:
raise EntryNotInPlaylist()
elif not playlistitems:
break
entries.append(entry)
try:
if entry is not None:
self._match_entry(entry, incomplete=True, silent=True)
except (ExistingVideoReached, RejectedVideoReached):
break
ie_result['entries'] = entries
# Save playlist_index before re-ordering
entries = [
((playlistitems[i - 1] if playlistitems else i), entry)
for i, entry in enumerate(entries, 1)
if entry is not None]
n_entries = len(entries)
if not playlistitems and (playliststart or playlistend):
playlistitems = list(range(playliststart, playliststart + n_entries))
ie_result['requested_entries'] = playlistitems
if self.params.get('allow_playlist_files', True):
ie_copy = {
'playlist': playlist,
'playlist_id': ie_result.get('id'),
'playlist_title': ie_result.get('title'),
'playlist_uploader': ie_result.get('uploader'),
'playlist_uploader_id': ie_result.get('uploader_id'),
'playlist_index': 0,
}
ie_copy.update(dict(ie_result))
if self.params.get('writeinfojson', False):
infofn = self.prepare_filename(ie_copy, 'pl_infojson')
if not self._ensure_dir_exists(encodeFilename(infofn)):
return
if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(infofn)):
self.to_screen('[info] Playlist metadata is already present')
else:
self.to_screen('[info] Writing playlist metadata as JSON to: ' + infofn)
try:
write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
except (OSError, IOError):
self.report_error('Cannot write playlist metadata to JSON file ' + infofn)
# TODO: This should be passed to ThumbnailsConvertor if necessary
self._write_thumbnails(ie_copy, self.prepare_filename(ie_copy, 'pl_thumbnail'))
if self.params.get('writedescription', False):
descfn = self.prepare_filename(ie_copy, 'pl_description')
if not self._ensure_dir_exists(encodeFilename(descfn)):
return
if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(descfn)):
self.to_screen('[info] Playlist description is already present')
elif ie_result.get('description') is None:
self.report_warning('There\'s no playlist description to write.')
else:
try:
self.to_screen('[info] Writing playlist description to: ' + descfn)
with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
descfile.write(ie_result['description'])
except (OSError, IOError):
self.report_error('Cannot write playlist description file ' + descfn)
return
if self.params.get('playlistreverse', False):
entries = entries[::-1]
if self.params.get('playlistrandom', False):
random.shuffle(entries)
x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
self.to_screen('[%s] playlist %s: %s' % (ie_result['extractor'], playlist, msg % n_entries))
failures = 0
max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
for i, entry_tuple in enumerate(entries, 1):
playlist_index, entry = entry_tuple
if 'playlist_index' in self.params.get('compat_options', []):
playlist_index = playlistitems[i - 1] if playlistitems else i
self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
if x_forwarded_for:
entry['__x_forwarded_for_ip'] = x_forwarded_for
extra = {
'n_entries': n_entries,
'_last_playlist_index': max(playlistitems) if playlistitems else (playlistend or n_entries),
'playlist_index': playlist_index,
'playlist_autonumber': i,
'playlist': playlist,
'playlist_id': ie_result.get('id'),
'playlist_title': ie_result.get('title'),
'playlist_uploader': ie_result.get('uploader'),
'playlist_uploader_id': ie_result.get('uploader_id'),
'extractor': ie_result['extractor'],
'webpage_url': ie_result['webpage_url'],
'webpage_url_basename': url_basename(ie_result['webpage_url']),
'extractor_key': ie_result['extractor_key'],
}
if self._match_entry(entry, incomplete=True) is not None:
continue
entry_result = self.__process_iterable_entry(entry, download, extra)
if not entry_result:
failures += 1
if failures >= max_failures:
self.report_error(
'Skipping the remaining entries in playlist "%s" since %d items failed extraction' % (playlist, failures))
break
playlist_results.append(entry_result)
ie_result['entries'] = playlist_results
self.to_screen('[download] Finished downloading playlist: %s' % playlist)
return ie_result
@__handle_extraction_exceptions
def __process_iterable_entry(self, entry, download, extra_info):
return self.process_ie_result(
entry, download=download, extra_info=extra_info)
def _build_format_filter(self, filter_spec):
OPERATORS = {
'<': operator.lt,
'<=': operator.le,
'>': operator.gt,
'>=': operator.ge,
'=': operator.eq,
'!=': operator.ne,
}
operator_rex = re.compile(r'''(?x)\s*
(?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)\s*
(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
(?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
''' % '|'.join(map(re.escape, OPERATORS.keys())))
m = operator_rex.fullmatch(filter_spec)
if m:
try:
comparison_value = int(m.group('value'))
except ValueError:
comparison_value = parse_filesize(m.group('value'))
if comparison_value is None:
comparison_value = parse_filesize(m.group('value') + 'B')
if comparison_value is None:
raise ValueError(
'Invalid value %r in format specification %r' % (
m.group('value'), filter_spec))
op = OPERATORS[m.group('op')]
if not m:
STR_OPERATORS = {
'=': operator.eq,
'^=': lambda attr, value: attr.startswith(value),
'$=': lambda attr, value: attr.endswith(value),
'*=': lambda attr, value: value in attr,
}
str_operator_rex = re.compile(r'''(?x)\s*
(?P<key>[a-zA-Z0-9._-]+)\s*
(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
(?P<value>[a-zA-Z0-9._-]+)\s*
''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
m = str_operator_rex.fullmatch(filter_spec)
if m:
comparison_value = m.group('value')
str_op = STR_OPERATORS[m.group('op')]
if m.group('negation'):
op = lambda attr, value: not str_op(attr, value)
else:
op = str_op
if not m:
raise SyntaxError('Invalid filter specification %r' % filter_spec)
def _filter(f):
actual_value = f.get(m.group('key'))
if actual_value is None:
return m.group('none_inclusive')
return op(actual_value, comparison_value)
return _filter
def _default_format_spec(self, info_dict, download=True):
def can_merge():
merger = FFmpegMergerPP(self)
return merger.available and merger.can_merge()
prefer_best = (
not self.params.get('simulate')
and download
and (
not can_merge()
or info_dict.get('is_live', False)
or self.outtmpl_dict['default'] == '-'))
compat = (
prefer_best
or self.params.get('allow_multiple_audio_streams', False)
or 'format-spec' in self.params.get('compat_opts', []))
return (
'best/bestvideo+bestaudio' if prefer_best
else 'bestvideo*+bestaudio/best' if not compat
else 'bestvideo+bestaudio/best')
def build_format_selector(self, format_spec):
def syntax_error(note, start):
message = (
'Invalid format specification: '
'{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
return SyntaxError(message)
PICKFIRST = 'PICKFIRST'
MERGE = 'MERGE'
SINGLE = 'SINGLE'
GROUP = 'GROUP'
FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
'video': self.params.get('allow_multiple_video_streams', False)}
check_formats = self.params.get('check_formats')
def _parse_filter(tokens):
filter_parts = []
for type, string, start, _, _ in tokens:
if type == tokenize.OP and string == ']':
return ''.join(filter_parts)
else:
filter_parts.append(string)
def _remove_unused_ops(tokens):
# for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
ALLOWED_OPS = ('/', '+', ',', '(', ')')
last_string, last_start, last_end, last_line = None, None, None, None
for type, string, start, end, line in tokens:
if type == tokenize.OP and string == '[':
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
last_string = None
yield type, string, start, end, line
# everything inside brackets will be handled by _parse_filter
for type, string, start, end, line in tokens:
yield type, string, start, end, line
if type == tokenize.OP and string == ']':
break
elif type == tokenize.OP and string in ALLOWED_OPS:
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
last_string = None
yield type, string, start, end, line
elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
if not last_string:
last_string = string
last_start = start
last_end = end
else:
last_string += string
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
selectors = []
current_selector = None
for type, string, start, _, _ in tokens:
# ENCODING is only defined in python 3.x
if type == getattr(tokenize, 'ENCODING', None):
continue
elif type in [tokenize.NAME, tokenize.NUMBER]:
current_selector = FormatSelector(SINGLE, string, [])
elif type == tokenize.OP:
if string == ')':
if not inside_group:
# ')' will be handled by the parentheses group
tokens.restore_last_token()
break
elif inside_merge and string in ['/', ',']:
tokens.restore_last_token()
break
elif inside_choice and string == ',':
tokens.restore_last_token()
break
elif string == ',':
if not current_selector:
raise syntax_error('"," must follow a format selector', start)
selectors.append(current_selector)
current_selector = None
elif string == '/':
if not current_selector:
raise syntax_error('"/" must follow a format selector', start)
first_choice = current_selector
second_choice = _parse_format_selection(tokens, inside_choice=True)
current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
elif string == '[':
if not current_selector:
current_selector = FormatSelector(SINGLE, 'best', [])
format_filter = _parse_filter(tokens)
current_selector.filters.append(format_filter)
elif string == '(':
if current_selector:
raise syntax_error('Unexpected "("', start)
group = _parse_format_selection(tokens, inside_group=True)
current_selector = FormatSelector(GROUP, group, [])
elif string == '+':
if not current_selector:
raise syntax_error('Unexpected "+"', start)
selector_1 = current_selector
selector_2 = _parse_format_selection(tokens, inside_merge=True)
if not selector_2:
raise syntax_error('Expected a selector', start)
current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
else:
raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
elif type == tokenize.ENDMARKER:
break
if current_selector:
selectors.append(current_selector)
return selectors
def _merge(formats_pair):
format_1, format_2 = formats_pair
formats_info = []
formats_info.extend(format_1.get('requested_formats', (format_1,)))
formats_info.extend(format_2.get('requested_formats', (format_2,)))
if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
get_no_more = {'video': False, 'audio': False}
for (i, fmt_info) in enumerate(formats_info):
if fmt_info.get('acodec') == fmt_info.get('vcodec') == 'none':
formats_info.pop(i)
continue
for aud_vid in ['audio', 'video']:
if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
if get_no_more[aud_vid]:
formats_info.pop(i)
break
get_no_more[aud_vid] = True
if len(formats_info) == 1:
return formats_info[0]
video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
output_ext = self.params.get('merge_output_format')
if not output_ext:
if the_only_video:
output_ext = the_only_video['ext']
elif the_only_audio and not video_fmts:
output_ext = the_only_audio['ext']
else:
output_ext = 'mkv'
new_dict = {
'requested_formats': formats_info,
'format': '+'.join(fmt_info.get('format') for fmt_info in formats_info),
'format_id': '+'.join(fmt_info.get('format_id') for fmt_info in formats_info),
'ext': output_ext,
}
if the_only_video:
new_dict.update({
'width': the_only_video.get('width'),
'height': the_only_video.get('height'),
'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
'fps': the_only_video.get('fps'),
'vcodec': the_only_video.get('vcodec'),
'vbr': the_only_video.get('vbr'),
'stretched_ratio': the_only_video.get('stretched_ratio'),
})
if the_only_audio:
new_dict.update({
'acodec': the_only_audio.get('acodec'),
'abr': the_only_audio.get('abr'),
})
return new_dict
def _check_formats(formats):
if not check_formats:
yield from formats
return
for f in formats:
self.to_screen('[info] Testing format %s' % f['format_id'])
temp_file = tempfile.NamedTemporaryFile(
suffix='.tmp', delete=False,
dir=self.get_output_path('temp') or None)
temp_file.close()
try:
success, _ = self.dl(temp_file.name, f, test=True)
except (DownloadError, IOError, OSError, ValueError) + network_exceptions:
success = False
finally:
if os.path.exists(temp_file.name):
try:
os.remove(temp_file.name)
except OSError:
self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
if success:
yield f
else:
self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
def _build_selector_function(selector):
if isinstance(selector, list): # ,
fs = [_build_selector_function(s) for s in selector]
def selector_function(ctx):
for f in fs:
yield from f(ctx)
return selector_function
elif selector.type == GROUP: # ()
selector_function = _build_selector_function(selector.selector)
elif selector.type == PICKFIRST: # /
fs = [_build_selector_function(s) for s in selector.selector]
def selector_function(ctx):
for f in fs:
picked_formats = list(f(ctx))
if picked_formats:
return picked_formats
return []
elif selector.type == MERGE: # +
selector_1, selector_2 = map(_build_selector_function, selector.selector)
def selector_function(ctx):
for pair in itertools.product(
selector_1(copy.deepcopy(ctx)), selector_2(copy.deepcopy(ctx))):
yield _merge(pair)
elif selector.type == SINGLE: # atom
format_spec = selector.selector or 'best'
# TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
if format_spec == 'all':
def selector_function(ctx):
yield from _check_formats(ctx['formats'])
elif format_spec == 'mergeall':
def selector_function(ctx):
formats = list(_check_formats(ctx['formats']))
if not formats:
return
merged_format = formats[-1]
for f in formats[-2::-1]:
merged_format = _merge((merged_format, f))
yield merged_format
else:
format_fallback, format_reverse, format_idx = False, True, 1
mobj = re.match(
r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
format_spec)
if mobj is not None:
format_idx = int_or_none(mobj.group('n'), default=1)
format_reverse = mobj.group('bw')[0] == 'b'
format_type = (mobj.group('type') or [None])[0]
not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
format_modified = mobj.group('mod') is not None
format_fallback = not format_type and not format_modified # for b, w
_filter_f = (
(lambda f: f.get('%scodec' % format_type) != 'none')
if format_type and format_modified # bv*, ba*, wv*, wa*
else (lambda f: f.get('%scodec' % not_format_type) == 'none')
if format_type # bv, ba, wv, wa
else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
if not format_modified # b, w
else lambda f: True) # b*, w*
filter_f = lambda f: _filter_f(f) and (
f.get('vcodec') != 'none' or f.get('acodec') != 'none')
else:
filter_f = ((lambda f: f.get('ext') == format_spec)
if format_spec in ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav'] # extension
else (lambda f: f.get('format_id') == format_spec)) # id
def selector_function(ctx):
formats = list(ctx['formats'])
matches = list(filter(filter_f, formats)) if filter_f is not None else formats
if format_fallback and ctx['incomplete_formats'] and not matches:
# for extractors with incomplete formats (audio only (soundcloud)
# or video only (imgur)) best/worst will fallback to
# best/worst {video,audio}-only format
matches = formats
matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
try:
yield matches[format_idx - 1]
except IndexError:
return
filters = [self._build_format_filter(f) for f in selector.filters]
def final_selector(ctx):
ctx_copy = copy.deepcopy(ctx)
for _filter in filters:
ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
return selector_function(ctx_copy)
return final_selector
stream = io.BytesIO(format_spec.encode('utf-8'))
try:
tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
except tokenize.TokenError:
raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
class TokenIterator(object):
def __init__(self, tokens):
self.tokens = tokens
self.counter = 0
def __iter__(self):
return self
def __next__(self):
if self.counter >= len(self.tokens):
raise StopIteration()
value = self.tokens[self.counter]
self.counter += 1
return value
next = __next__
def restore_last_token(self):
self.counter -= 1
parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
return _build_selector_function(parsed_selector)
def _calc_headers(self, info_dict):
res = std_headers.copy()
add_headers = info_dict.get('http_headers')
if add_headers:
res.update(add_headers)
cookies = self._calc_cookies(info_dict)
if cookies:
res['Cookie'] = cookies
if 'X-Forwarded-For' not in res:
x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
if x_forwarded_for_ip:
res['X-Forwarded-For'] = x_forwarded_for_ip
return res
def _calc_cookies(self, info_dict):
pr = sanitized_Request(info_dict['url'])
self.cookiejar.add_cookie_header(pr)
return pr.get_header('Cookie')
def _sanitize_thumbnails(self, info_dict):
thumbnails = info_dict.get('thumbnails')
if thumbnails is None:
thumbnail = info_dict.get('thumbnail')
if thumbnail:
info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
if thumbnails:
thumbnails.sort(key=lambda t: (
t.get('preference') if t.get('preference') is not None else -1,
t.get('width') if t.get('width') is not None else -1,
t.get('height') if t.get('height') is not None else -1,
t.get('id') if t.get('id') is not None else '',
t.get('url')))
def thumbnail_tester():
if self.params.get('check_formats'):
test_all = True
to_screen = lambda msg: self.to_screen(f'[info] {msg}')
else:
test_all = False
to_screen = self.write_debug
def test_thumbnail(t):
if not test_all and not t.get('_test_url'):
return True
to_screen('Testing thumbnail %s' % t['id'])
try:
self.urlopen(HEADRequest(t['url']))
except network_exceptions as err:
to_screen('Unable to connect to thumbnail %s URL "%s" - %s. Skipping...' % (
t['id'], t['url'], error_to_compat_str(err)))
return False
return True
return test_thumbnail
for i, t in enumerate(thumbnails):
if t.get('id') is None:
t['id'] = '%d' % i
if t.get('width') and t.get('height'):
t['resolution'] = '%dx%d' % (t['width'], t['height'])
t['url'] = sanitize_url(t['url'])
if self.params.get('check_formats') is not False:
info_dict['thumbnails'] = LazyList(filter(thumbnail_tester(), thumbnails[::-1])).reverse()
else:
info_dict['thumbnails'] = thumbnails
def process_video_result(self, info_dict, download=True):
assert info_dict.get('_type', 'video') == 'video'
if 'id' not in info_dict:
raise ExtractorError('Missing "id" field in extractor result')
if 'title' not in info_dict:
raise ExtractorError('Missing "title" field in extractor result')
def report_force_conversion(field, field_not, conversion):
self.report_warning(
'"%s" field is not %s - forcing %s conversion, there is an error in extractor'
% (field, field_not, conversion))
def sanitize_string_field(info, string_field):
field = info.get(string_field)
if field is None or isinstance(field, compat_str):
return
report_force_conversion(string_field, 'a string', 'string')
info[string_field] = compat_str(field)
def sanitize_numeric_fields(info):
for numeric_field in self._NUMERIC_FIELDS:
field = info.get(numeric_field)
if field is None or isinstance(field, compat_numeric_types):
continue
report_force_conversion(numeric_field, 'numeric', 'int')
info[numeric_field] = int_or_none(field)
sanitize_string_field(info_dict, 'id')
sanitize_numeric_fields(info_dict)
if 'playlist' not in info_dict:
# It isn't part of a playlist
info_dict['playlist'] = None
info_dict['playlist_index'] = None
self._sanitize_thumbnails(info_dict)
thumbnail = info_dict.get('thumbnail')
thumbnails = info_dict.get('thumbnails')
if thumbnail:
info_dict['thumbnail'] = sanitize_url(thumbnail)
elif thumbnails:
info_dict['thumbnail'] = thumbnails[-1]['url']
if info_dict.get('display_id') is None and 'id' in info_dict:
info_dict['display_id'] = info_dict['id']
for ts_key, date_key in (
('timestamp', 'upload_date'),
('release_timestamp', 'release_date'),
):
if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
try:
upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
info_dict[date_key] = upload_date.strftime('%Y%m%d')
except (ValueError, OverflowError, OSError):
pass
live_keys = ('is_live', 'was_live')
live_status = info_dict.get('live_status')
if live_status is None:
for key in live_keys:
if info_dict.get(key) is False:
continue
if info_dict.get(key):
live_status = key
break
if all(info_dict.get(key) is False for key in live_keys):
live_status = 'not_live'
if live_status:
info_dict['live_status'] = live_status
for key in live_keys:
if info_dict.get(key) is None:
info_dict[key] = (live_status == key)
for field in ('chapter', 'season', 'episode'):
if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
for cc_kind in ('subtitles', 'automatic_captions'):
cc = info_dict.get(cc_kind)
if cc:
for _, subtitle in cc.items():
for subtitle_format in subtitle:
if subtitle_format.get('url'):
subtitle_format['url'] = sanitize_url(subtitle_format['url'])
if subtitle_format.get('ext') is None:
subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
automatic_captions = info_dict.get('automatic_captions')
subtitles = info_dict.get('subtitles')
info_dict['requested_subtitles'] = self.process_subtitles(
info_dict['id'], subtitles, automatic_captions)
if info_dict.get('formats') is None:
formats = [info_dict]
else:
formats = info_dict['formats']
if not formats:
if not self.params.get('ignore_no_formats_error'):
raise ExtractorError('No video formats found!')
else:
self.report_warning('No video formats found!')
def is_wellformed(f):
url = f.get('url')
if not url:
self.report_warning(
'"url" field is missing or empty - skipping format, '
'there is an error in extractor')
return False
if isinstance(url, bytes):
sanitize_string_field(f, 'url')
return True
# Filter out malformed formats for better extraction robustness
formats = list(filter(is_wellformed, formats))
formats_dict = {}
# We check that all the formats have the format and format_id fields
for i, format in enumerate(formats):
sanitize_string_field(format, 'format_id')
sanitize_numeric_fields(format)
format['url'] = sanitize_url(format['url'])
if not format.get('format_id'):
format['format_id'] = compat_str(i)
else:
# Sanitize format_id from characters used in format selector expression
format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
format_id = format['format_id']
if format_id not in formats_dict:
formats_dict[format_id] = []
formats_dict[format_id].append(format)
# Make sure all formats have unique format_id
for format_id, ambiguous_formats in formats_dict.items():
if len(ambiguous_formats) > 1:
for i, format in enumerate(ambiguous_formats):
format['format_id'] = '%s-%d' % (format_id, i)
for i, format in enumerate(formats):
if format.get('format') is None:
format['format'] = '{id} - {res}{note}'.format(
id=format['format_id'],
res=self.format_resolution(format),
note=format_field(format, 'format_note', ' (%s)'),
)
# Automatically determine file extension if missing
if format.get('ext') is None:
format['ext'] = determine_ext(format['url']).lower()
# Automatically determine protocol if missing (useful for format
# selection purposes)
if format.get('protocol') is None:
format['protocol'] = determine_protocol(format)
# Add HTTP headers, so that external programs can use them from the
# json output
full_format_info = info_dict.copy()
full_format_info.update(format)
format['http_headers'] = self._calc_headers(full_format_info)
# Remove private housekeeping stuff
if '__x_forwarded_for_ip' in info_dict:
del info_dict['__x_forwarded_for_ip']
# TODO Central sorting goes here
if formats and formats[0] is not info_dict:
# only set the 'formats' fields if the original info_dict list them
# otherwise we end up with a circular reference, the first (and unique)
# element in the 'formats' field in info_dict is info_dict itself,
# which can't be exported to json
info_dict['formats'] = formats
info_dict, _ = self.pre_process(info_dict)
if self.params.get('list_thumbnails'):
self.list_thumbnails(info_dict)
if self.params.get('listformats'):
if not info_dict.get('formats') and not info_dict.get('url'):
raise ExtractorError('No video formats found', expected=True)
self.list_formats(info_dict)
if self.params.get('listsubtitles'):
if 'automatic_captions' in info_dict:
self.list_subtitles(
info_dict['id'], automatic_captions, 'automatic captions')
self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
list_only = self.params.get('simulate') is None and (
self.params.get('list_thumbnails') or self.params.get('listformats') or self.params.get('listsubtitles'))
if list_only:
self.__forced_printings(info_dict, self.prepare_filename(info_dict), incomplete=True)
return
format_selector = self.format_selector
if format_selector is None:
req_format = self._default_format_spec(info_dict, download=download)
self.write_debug('Default format spec: %s' % req_format)
format_selector = self.build_format_selector(req_format)
incomplete_formats = (
all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
ctx = {
'formats': formats,
'incomplete_formats': incomplete_formats,
}
formats_to_download = list(format_selector(ctx))
if not formats_to_download:
if not self.params.get('ignore_no_formats_error'):
raise ExtractorError('Requested format is not available', expected=True)
else:
self.report_warning('Requested format is not available')
self.process_info(dict(info_dict))
elif download:
self.to_screen(
'[info] %s: Downloading %d format(s): %s' % (
info_dict['id'], len(formats_to_download),
", ".join([f['format_id'] for f in formats_to_download])))
for fmt in formats_to_download:
new_info = dict(info_dict)
new_info['__original_infodict'] = info_dict
new_info.update(fmt)
self.process_info(new_info)
if formats_to_download:
info_dict.update(formats_to_download[-1])
return info_dict
def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
available_subs = {}
if normal_subtitles and self.params.get('writesubtitles'):
available_subs.update(normal_subtitles)
if automatic_captions and self.params.get('writeautomaticsub'):
for lang, cap_info in automatic_captions.items():
if lang not in available_subs:
available_subs[lang] = cap_info
if (not self.params.get('writesubtitles') and not
self.params.get('writeautomaticsub') or not
available_subs):
return None
all_sub_langs = available_subs.keys()
if self.params.get('allsubtitles', False):
requested_langs = all_sub_langs
elif self.params.get('subtitleslangs', False):
requested_langs = set()
for lang in self.params.get('subtitleslangs'):
if lang == 'all':
requested_langs.update(all_sub_langs)
continue
discard = lang[0] == '-'
if discard:
lang = lang[1:]
current_langs = filter(re.compile(lang + '$').match, all_sub_langs)
if discard:
for lang in current_langs:
requested_langs.discard(lang)
else:
requested_langs.update(current_langs)
elif 'en' in available_subs:
requested_langs = ['en']
else:
requested_langs = [list(all_sub_langs)[0]]
if requested_langs:
self.write_debug('Downloading subtitles: %s' % ', '.join(requested_langs))
formats_query = self.params.get('subtitlesformat', 'best')
formats_preference = formats_query.split('/') if formats_query else []
subs = {}
for lang in requested_langs:
formats = available_subs.get(lang)
if formats is None:
self.report_warning('%s subtitles not available for %s' % (lang, video_id))
continue
for ext in formats_preference:
if ext == 'best':
f = formats[-1]
break
matches = list(filter(lambda f: f['ext'] == ext, formats))
if matches:
f = matches[-1]
break
else:
f = formats[-1]
self.report_warning(
'No subtitle format found matching "%s" for language %s, '
'using %s' % (formats_query, lang, f['ext']))
subs[lang] = f
return subs
def __forced_printings(self, info_dict, filename, incomplete):
def print_mandatory(field, actual_field=None):
if actual_field is None:
actual_field = field
if (self.params.get('force%s' % field, False)
and (not incomplete or info_dict.get(actual_field) is not None)):
self.to_stdout(info_dict[actual_field])
def print_optional(field):
if (self.params.get('force%s' % field, False)
and info_dict.get(field) is not None):
self.to_stdout(info_dict[field])
info_dict = info_dict.copy()
if filename is not None:
info_dict['filename'] = filename
if info_dict.get('requested_formats') is not None:
info_dict['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
elif 'url' in info_dict:
info_dict['urls'] = info_dict['url'] + info_dict.get('play_path', '')
if self.params.get('forceprint') or self.params.get('forcejson'):
self.post_extract(info_dict)
for tmpl in self.params.get('forceprint', []):
if re.match(r'\w+$', tmpl):
tmpl = '%({})s'.format(tmpl)
tmpl, info_copy = self.prepare_outtmpl(tmpl, info_dict)
self.to_stdout(self.escape_outtmpl(tmpl) % info_copy)
print_mandatory('title')
print_mandatory('id')
print_mandatory('url', 'urls')
print_optional('thumbnail')
print_optional('description')
print_optional('filename')
if self.params.get('forceduration') and info_dict.get('duration') is not None:
self.to_stdout(formatSeconds(info_dict['duration']))
print_mandatory('format')
if self.params.get('forcejson'):
self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
def dl(self, name, info, subtitle=False, test=False):
if test:
verbose = self.params.get('verbose')
params = {
'test': True,
'quiet': not verbose,
'verbose': verbose,
'noprogress': not verbose,
'nopart': True,
'skip_unavailable_fragments': False,
'keep_fragments': False,
'overwrites': True,
'_no_ytdl_file': True,
}
else:
params = self.params
fd = get_suitable_downloader(info, params, to_stdout=(name == '-'))(self, params)
if not test:
for ph in self._progress_hooks:
fd.add_progress_hook(ph)
urls = '", "'.join([f['url'] for f in info.get('requested_formats', [])] or [info['url']])
self.write_debug('Invoking downloader on "%s"' % urls)
new_info = dict(info)
if new_info.get('http_headers') is None:
new_info['http_headers'] = self._calc_headers(new_info)
return fd.download(name, new_info, subtitle)
def process_info(self, info_dict):
assert info_dict.get('_type', 'video') == 'video'
max_downloads = self.params.get('max_downloads')
if max_downloads is not None:
if self._num_downloads >= int(max_downloads):
raise MaxDownloadsReached()
info_dict['fulltitle'] = info_dict['title']
if 'format' not in info_dict and 'ext' in info_dict:
info_dict['format'] = info_dict['ext']
if self._match_entry(info_dict) is not None:
return
self.post_extract(info_dict)
self._num_downloads += 1
info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
temp_filename = self.prepare_filename(info_dict, 'temp')
files_to_move = {}
self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
if self.params.get('simulate'):
if self.params.get('force_write_download_archive', False):
self.record_download_archive(info_dict)
return
if full_filename is None:
return
if not self._ensure_dir_exists(encodeFilename(full_filename)):
return
if not self._ensure_dir_exists(encodeFilename(temp_filename)):
return
if self.params.get('writedescription', False):
descfn = self.prepare_filename(info_dict, 'description')
if not self._ensure_dir_exists(encodeFilename(descfn)):
return
if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(descfn)):
self.to_screen('[info] Video description is already present')
elif info_dict.get('description') is None:
self.report_warning('There\'s no description to write.')
else:
try:
self.to_screen('[info] Writing video description to: ' + descfn)
with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
descfile.write(info_dict['description'])
except (OSError, IOError):
self.report_error('Cannot write description file ' + descfn)
return
if self.params.get('writeannotations', False):
annofn = self.prepare_filename(info_dict, 'annotation')
if not self._ensure_dir_exists(encodeFilename(annofn)):
return
if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
self.to_screen('[info] Video annotations are already present')
elif not info_dict.get('annotations'):
self.report_warning('There are no annotations to write.')
else:
try:
self.to_screen('[info] Writing video annotations to: ' + annofn)
with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
annofile.write(info_dict['annotations'])
except (KeyError, TypeError):
self.report_warning('There are no annotations to write.')
except (OSError, IOError):
self.report_error('Cannot write annotations file: ' + annofn)
return
subtitles_are_requested = any([self.params.get('writesubtitles', False),
self.params.get('writeautomaticsub')])
if subtitles_are_requested and info_dict.get('requested_subtitles'):
# subtitles download errors are already managed as troubles in relevant IE
# that way it will silently go on when used with unsupporting IE
subtitles = info_dict['requested_subtitles']
# ie = self.get_info_extractor(info_dict['extractor_key'])
for sub_lang, sub_info in subtitles.items():
sub_format = sub_info['ext']
sub_filename = subtitles_filename(temp_filename, sub_lang, sub_format, info_dict.get('ext'))
sub_filename_final = subtitles_filename(
self.prepare_filename(info_dict, 'subtitle'), sub_lang, sub_format, info_dict.get('ext'))
if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(sub_filename)):
self.to_screen('[info] Video subtitle %s.%s is already present' % (sub_lang, sub_format))
sub_info['filepath'] = sub_filename
files_to_move[sub_filename] = sub_filename_final
else:
self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
if sub_info.get('data') is not None:
try:
# Use newline='' to prevent conversion of newline characters
# See https://github.com/ytdl-org/youtube-dl/issues/10268
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
subfile.write(sub_info['data'])
sub_info['filepath'] = sub_filename
files_to_move[sub_filename] = sub_filename_final
except (OSError, IOError):
self.report_error('Cannot write subtitles file ' + sub_filename)
return
else:
try:
self.dl(sub_filename, sub_info.copy(), subtitle=True)
sub_info['filepath'] = sub_filename
files_to_move[sub_filename] = sub_filename_final
except (ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
self.report_warning('Unable to download subtitle for "%s": %s' %
(sub_lang, error_to_compat_str(err)))
continue
if self.params.get('writeinfojson', False):
infofn = self.prepare_filename(info_dict, 'infojson')
if not self._ensure_dir_exists(encodeFilename(infofn)):
return
if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(infofn)):
self.to_screen('[info] Video metadata is already present')
else:
self.to_screen('[info] Writing video metadata as JSON to: ' + infofn)
try:
write_json_file(self.sanitize_info(info_dict, self.params.get('clean_infojson', True)), infofn)
except (OSError, IOError):
self.report_error('Cannot write video metadata to JSON file ' + infofn)
return
info_dict['__infojson_filename'] = infofn
for thumb_ext in self._write_thumbnails(info_dict, temp_filename):
thumb_filename_temp = replace_extension(temp_filename, thumb_ext, info_dict.get('ext'))
thumb_filename = replace_extension(
self.prepare_filename(info_dict, 'thumbnail'), thumb_ext, info_dict.get('ext'))
files_to_move[thumb_filename_temp] = thumb_filename
# Write internet shortcut files
url_link = webloc_link = desktop_link = False
if self.params.get('writelink', False):
if sys.platform == "darwin": # macOS.
webloc_link = True
elif sys.platform.startswith("linux"):
desktop_link = True
else: # if sys.platform in ['win32', 'cygwin']:
url_link = True
if self.params.get('writeurllink', False):
url_link = True
if self.params.get('writewebloclink', False):
webloc_link = True
if self.params.get('writedesktoplink', False):
desktop_link = True
if url_link or webloc_link or desktop_link:
if 'webpage_url' not in info_dict:
self.report_error('Cannot write internet shortcut file because the "webpage_url" field is missing in the media information')
return
ascii_url = iri_to_uri(info_dict['webpage_url'])
def _write_link_file(extension, template, newline, embed_filename):
linkfn = replace_extension(full_filename, extension, info_dict.get('ext'))
if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
self.to_screen('[info] Internet shortcut is already present')
else:
try:
self.to_screen('[info] Writing internet shortcut to: ' + linkfn)
with io.open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8', newline=newline) as linkfile:
template_vars = {'url': ascii_url}
if embed_filename:
template_vars['filename'] = linkfn[:-(len(extension) + 1)]
linkfile.write(template % template_vars)
except (OSError, IOError):
self.report_error('Cannot write internet shortcut ' + linkfn)
return False
return True
if url_link:
if not _write_link_file('url', DOT_URL_LINK_TEMPLATE, '\r\n', embed_filename=False):
return
if webloc_link:
if not _write_link_file('webloc', DOT_WEBLOC_LINK_TEMPLATE, '\n', embed_filename=False):
return
if desktop_link:
if not _write_link_file('desktop', DOT_DESKTOP_LINK_TEMPLATE, '\n', embed_filename=True):
return
try:
info_dict, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
except PostProcessingError as err:
self.report_error('Preprocessing: %s' % str(err))
return
must_record_download_archive = False
if self.params.get('skip_download', False):
info_dict['filepath'] = temp_filename
info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
info_dict['__files_to_move'] = files_to_move
info_dict = self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict)
else:
# Download
info_dict.setdefault('__postprocessors', [])
try:
def existing_file(*filepaths):
ext = info_dict.get('ext')
final_ext = self.params.get('final_ext', ext)
existing_files = []
for file in orderedSet(filepaths):
if final_ext != ext:
converted = replace_extension(file, final_ext, ext)
if os.path.exists(encodeFilename(converted)):
existing_files.append(converted)
if os.path.exists(encodeFilename(file)):
existing_files.append(file)
if not existing_files or self.params.get('overwrites', False):
for file in orderedSet(existing_files):
self.report_file_delete(file)
os.remove(encodeFilename(file))
return None
info_dict['ext'] = os.path.splitext(existing_files[0])[1][1:]
return existing_files[0]
success = True
if info_dict.get('requested_formats') is not None:
def compatible_formats(formats):
# TODO: some formats actually allow this (mkv, webm, ogg, mp4), but not all of them.
video_formats = [format for format in formats if format.get('vcodec') != 'none']
audio_formats = [format for format in formats if format.get('acodec') != 'none']
if len(video_formats) > 2 or len(audio_formats) > 2:
return False
# Check extension
exts = set(format.get('ext') for format in formats)
COMPATIBLE_EXTS = (
set(('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma')),
set(('webm',)),
)
for ext_sets in COMPATIBLE_EXTS:
if ext_sets.issuperset(exts):
return True
# TODO: Check acodec/vcodec
return False
requested_formats = info_dict['requested_formats']
old_ext = info_dict['ext']
if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats):
info_dict['ext'] = 'mkv'
self.report_warning(
'Requested formats are incompatible for merge and will be merged into mkv.')
new_ext = info_dict['ext']
def correct_ext(filename, ext=new_ext):
if filename == '-':
return filename
filename_real_ext = os.path.splitext(filename)[1][1:]
filename_wo_ext = (
os.path.splitext(filename)[0]
if filename_real_ext in (old_ext, new_ext)
else filename)
return '%s.%s' % (filename_wo_ext, ext)
# Ensure filename always has a correct extension for successful merge
full_filename = correct_ext(full_filename)
temp_filename = correct_ext(temp_filename)
dl_filename = existing_file(full_filename, temp_filename)
info_dict['__real_download'] = False
_protocols = set(determine_protocol(f) for f in requested_formats)
if len(_protocols) == 1: # All requested formats have same protocol
info_dict['protocol'] = _protocols.pop()
directly_mergable = FFmpegFD.can_merge_formats(info_dict)
if dl_filename is not None:
self.report_file_already_downloaded(dl_filename)
elif (directly_mergable and get_suitable_downloader(
info_dict, self.params, to_stdout=(temp_filename == '-')) == FFmpegFD):
info_dict['url'] = '\n'.join(f['url'] for f in requested_formats)
success, real_download = self.dl(temp_filename, info_dict)
info_dict['__real_download'] = real_download
else:
downloaded = []
merger = FFmpegMergerPP(self)
if self.params.get('allow_unplayable_formats'):
self.report_warning(
'You have requested merging of multiple formats '
'while also allowing unplayable formats to be downloaded. '
'The formats won\'t be merged to prevent data corruption.')
elif not merger.available:
self.report_warning(
'You have requested merging of multiple formats but ffmpeg is not installed. '
'The formats won\'t be merged.')
if temp_filename == '-':
reason = ('using a downloader other than ffmpeg' if directly_mergable
else 'but the formats are incompatible for simultaneous download' if merger.available
else 'but ffmpeg is not installed')
self.report_warning(
f'You have requested downloading multiple formats to stdout {reason}. '
'The formats will be streamed one after the other')
fname = temp_filename
for f in requested_formats:
new_info = dict(info_dict)
del new_info['requested_formats']
new_info.update(f)
if temp_filename != '-':
fname = prepend_extension(
correct_ext(temp_filename, new_info['ext']),
'f%s' % f['format_id'], new_info['ext'])
if not self._ensure_dir_exists(fname):
return
downloaded.append(fname)
partial_success, real_download = self.dl(fname, new_info)
info_dict['__real_download'] = info_dict['__real_download'] or real_download
success = success and partial_success
if merger.available and not self.params.get('allow_unplayable_formats'):
info_dict['__postprocessors'].append(merger)
info_dict['__files_to_merge'] = downloaded
# Even if there were no downloads, it is being merged only now
info_dict['__real_download'] = True
else:
for file in downloaded:
files_to_move[file] = None
else:
# Just a single file
dl_filename = existing_file(full_filename, temp_filename)
if dl_filename is None or dl_filename == temp_filename:
# dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
# So we should try to resume the download
success, real_download = self.dl(temp_filename, info_dict)
info_dict['__real_download'] = real_download
else:
self.report_file_already_downloaded(dl_filename)
dl_filename = dl_filename or temp_filename
info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
except network_exceptions as err:
self.report_error('unable to download video data: %s' % error_to_compat_str(err))
return
except (OSError, IOError) as err:
raise UnavailableVideoError(err)
except (ContentTooShortError, ) as err:
self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
return
if success and full_filename != '-':
def fixup():
do_fixup = True
fixup_policy = self.params.get('fixup')
vid = info_dict['id']
if fixup_policy in ('ignore', 'never'):
return
elif fixup_policy == 'warn':
do_fixup = False
elif fixup_policy != 'force':
assert fixup_policy in ('detect_or_warn', None)
if not info_dict.get('__real_download'):
do_fixup = False
def ffmpeg_fixup(cndn, msg, cls):
if not cndn:
return
if not do_fixup:
self.report_warning(f'{vid}: {msg}')
return
pp = cls(self)
if pp.available:
info_dict['__postprocessors'].append(pp)
else:
self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically')
stretched_ratio = info_dict.get('stretched_ratio')
ffmpeg_fixup(
stretched_ratio not in (1, None),
f'Non-uniform pixel ratio {stretched_ratio}',
FFmpegFixupStretchedPP)
ffmpeg_fixup(
(info_dict.get('requested_formats') is None
and info_dict.get('container') == 'm4a_dash'
and info_dict.get('ext') == 'm4a'),
'writing DASH m4a. Only some players support this container',
FFmpegFixupM4aPP)
downloader = (get_suitable_downloader(info_dict, self.params).__name__
if 'protocol' in info_dict else None)
ffmpeg_fixup(downloader == 'HlsFD', 'malformed AAC bitstream detected', FFmpegFixupM3u8PP)
ffmpeg_fixup(downloader == 'WebSocketFragmentFD', 'malformed timestamps detected', FFmpegFixupTimestampPP)
ffmpeg_fixup(downloader == 'WebSocketFragmentFD', 'malformed duration detected', FFmpegFixupDurationPP)
fixup()
try:
info_dict = self.post_process(dl_filename, info_dict, files_to_move)
except PostProcessingError as err:
self.report_error('Postprocessing: %s' % str(err))
return
try:
for ph in self._post_hooks:
ph(info_dict['filepath'])
except Exception as err:
self.report_error('post hooks: %s' % str(err))
return
must_record_download_archive = True
if must_record_download_archive or self.params.get('force_write_download_archive', False):
self.record_download_archive(info_dict)
max_downloads = self.params.get('max_downloads')
if max_downloads is not None and self._num_downloads >= int(max_downloads):
raise MaxDownloadsReached()
def download(self, url_list):
outtmpl = self.outtmpl_dict['default']
if (len(url_list) > 1
and outtmpl != '-'
and '%' not in outtmpl
and self.params.get('max_downloads') != 1):
raise SameFileError(outtmpl)
for url in url_list:
try:
# It also downloads the videos
res = self.extract_info(
url, force_generic_extractor=self.params.get('force_generic_extractor', False))
except UnavailableVideoError:
self.report_error('unable to download video')
except MaxDownloadsReached:
self.to_screen('[info] Maximum number of downloaded files reached')
raise
except ExistingVideoReached:
self.to_screen('[info] Encountered a file that is already in the archive, stopping due to --break-on-existing')
raise
except RejectedVideoReached:
self.to_screen('[info] Encountered a file that did not match filter, stopping due to --break-on-reject')
raise
else:
if self.params.get('dump_single_json', False):
self.post_extract(res)
self.to_stdout(json.dumps(self.sanitize_info(res)))
return self._download_retcode
def download_with_info_file(self, info_filename):
with contextlib.closing(fileinput.FileInput(
[info_filename], mode='r',
openhook=fileinput.hook_encoded('utf-8'))) as f:
# FileInput doesn't have a read method, we can't call json.load
info = self.sanitize_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
try:
self.process_ie_result(info, download=True)
except (DownloadError, EntryNotInPlaylist, ThrottledDownload):
webpage_url = info.get('webpage_url')
if webpage_url is not None:
self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
return self.download([webpage_url])
else:
raise
return self._download_retcode
@staticmethod
def sanitize_info(info_dict, remove_private_keys=False):
info_dict.setdefault('epoch', int(time.time()))
remove_keys = {'__original_infodict'} # Always remove this since this may contain a copy of the entire dict
keep_keys = ['_type'], # Always keep this to facilitate load-info-json
if remove_private_keys:
remove_keys |= {
'requested_formats', 'requested_subtitles', 'requested_entries',
'filepath', 'entries', 'original_url', 'playlist_autonumber',
}
empty_values = (None, {}, [], set(), tuple())
reject = lambda k, v: k not in keep_keys and (
k.startswith('_') or k in remove_keys or v in empty_values)
else:
reject = lambda k, v: k in remove_keys
filter_fn = lambda obj: (
list(map(filter_fn, obj)) if isinstance(obj, (LazyList, list, tuple, set))
else obj if not isinstance(obj, dict)
else dict((k, filter_fn(v)) for k, v in obj.items() if not reject(k, v)))
return filter_fn(info_dict)
@staticmethod
def filter_requested_info(info_dict, actually_filter=True):
return YoutubeDL.sanitize_info(info_dict, actually_filter)
def run_pp(self, pp, infodict):
files_to_delete = []
if '__files_to_move' not in infodict:
infodict['__files_to_move'] = {}
files_to_delete, infodict = pp.run(infodict)
if not files_to_delete:
return infodict
if self.params.get('keepvideo', False):
for f in files_to_delete:
infodict['__files_to_move'].setdefault(f, '')
else:
for old_filename in set(files_to_delete):
self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
try:
os.remove(encodeFilename(old_filename))
except (IOError, OSError):
self.report_warning('Unable to remove downloaded original file')
if old_filename in infodict['__files_to_move']:
del infodict['__files_to_move'][old_filename]
return infodict
@staticmethod
def post_extract(info_dict):
def actual_post_extract(info_dict):
if info_dict.get('_type') in ('playlist', 'multi_video'):
for video_dict in info_dict.get('entries', {}):
actual_post_extract(video_dict or {})
return
post_extractor = info_dict.get('__post_extractor') or (lambda: {})
extra = post_extractor().items()
info_dict.update(extra)
info_dict.pop('__post_extractor', None)
original_infodict = info_dict.get('__original_infodict') or {}
original_infodict.update(extra)
original_infodict.pop('__post_extractor', None)
actual_post_extract(info_dict or {})
def pre_process(self, ie_info, key='pre_process', files_to_move=None):
info = dict(ie_info)
info['__files_to_move'] = files_to_move or {}
for pp in self._pps[key]:
info = self.run_pp(pp, info)
return info, info.pop('__files_to_move', None)
def post_process(self, filename, ie_info, files_to_move=None):
info = dict(ie_info)
info['filepath'] = filename
info['__files_to_move'] = files_to_move or {}
for pp in ie_info.get('__postprocessors', []) + self._pps['post_process']:
info = self.run_pp(pp, info)
info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
del info['__files_to_move']
for pp in self._pps['after_move']:
info = self.run_pp(pp, info)
return info
def _make_archive_id(self, info_dict):
video_id = info_dict.get('id')
if not video_id:
return
# Future-proof against any change in case
# and backwards compatibility with prior versions
extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
if extractor is None:
url = str_or_none(info_dict.get('url'))
if not url:
return
# Try to find matching extractor for the URL and take its ie_key
for ie in self._ies:
if ie.suitable(url):
extractor = ie.ie_key()
break
else:
return
return '%s %s' % (extractor.lower(), video_id)
def in_download_archive(self, info_dict):
fn = self.params.get('download_archive')
if fn is None:
return False
vid_id = self._make_archive_id(info_dict)
if not vid_id:
return False # Incomplete video information
return vid_id in self.archive
def record_download_archive(self, info_dict):
fn = self.params.get('download_archive')
if fn is None:
return
vid_id = self._make_archive_id(info_dict)
assert vid_id
with locked_file(fn, 'a', encoding='utf-8') as archive_file:
archive_file.write(vid_id + '\n')
self.archive.add(vid_id)
@staticmethod
def format_resolution(format, default='unknown'):
if format.get('vcodec') == 'none':
if format.get('acodec') == 'none':
return 'images'
return 'audio only'
if format.get('resolution') is not None:
return format['resolution']
if format.get('width') and format.get('height'):
res = '%dx%d' % (format['width'], format['height'])
elif format.get('height'):
res = '%sp' % format['height']
elif format.get('width'):
res = '%dx?' % format['width']
else:
res = default
return res
def _format_note(self, fdict):
res = ''
if fdict.get('ext') in ['f4f', 'f4m']:
res += '(unsupported) '
if fdict.get('language'):
if res:
res += ' '
res += '[%s] ' % fdict['language']
if fdict.get('format_note') is not None:
res += fdict['format_note'] + ' '
if fdict.get('tbr') is not None:
res += '%4dk ' % fdict['tbr']
if fdict.get('container') is not None:
if res:
res += ', '
res += '%s container' % fdict['container']
if (fdict.get('vcodec') is not None
and fdict.get('vcodec') != 'none'):
if res:
res += ', '
res += fdict['vcodec']
if fdict.get('vbr') is not None:
res += '@'
elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
res += 'video@'
if fdict.get('vbr') is not None:
res += '%4dk' % fdict['vbr']
if fdict.get('fps') is not None:
if res:
res += ', '
res += '%sfps' % fdict['fps']
if fdict.get('acodec') is not None:
if res:
res += ', '
if fdict['acodec'] == 'none':
res += 'video only'
else:
res += '%-5s' % fdict['acodec']
elif fdict.get('abr') is not None:
if res:
res += ', '
res += 'audio'
if fdict.get('abr') is not None:
res += '@%3dk' % fdict['abr']
if fdict.get('asr') is not None:
res += ' (%5dHz)' % fdict['asr']
if fdict.get('filesize') is not None:
if res:
res += ', '
res += format_bytes(fdict['filesize'])
elif fdict.get('filesize_approx') is not None:
if res:
res += ', '
res += '~' + format_bytes(fdict['filesize_approx'])
return res
def list_formats(self, info_dict):
formats = info_dict.get('formats', [info_dict])
new_format = (
'list-formats' not in self.params.get('compat_opts', [])
and self.params.get('listformats_table', True) is not False)
if new_format:
table = [
[
format_field(f, 'format_id'),
format_field(f, 'ext'),
self.format_resolution(f),
format_field(f, 'fps', '%d'),
'|',
format_field(f, 'filesize', ' %s', func=format_bytes) + format_field(f, 'filesize_approx', '~%s', func=format_bytes),
format_field(f, 'tbr', '%4dk'),
shorten_protocol_name(f.get('protocol', '').replace("native", "n")),
'|',
format_field(f, 'vcodec', default='unknown').replace('none', ''),
format_field(f, 'vbr', '%4dk'),
format_field(f, 'acodec', default='unknown').replace('none', ''),
format_field(f, 'abr', '%3dk'),
format_field(f, 'asr', '%5dHz'),
', '.join(filter(None, (
'UNSUPPORTED' if f.get('ext') in ('f4f', 'f4m') else '',
format_field(f, 'language', '[%s]'),
format_field(f, 'format_note'),
format_field(f, 'container', ignore=(None, f.get('ext'))),
))),
] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
header_line = ['ID', 'EXT', 'RESOLUTION', 'FPS', '|', ' FILESIZE', ' TBR', 'PROTO',
'|', 'VCODEC', ' VBR', 'ACODEC', ' ABR', ' ASR', 'MORE INFO']
else:
table = [
[
format_field(f, 'format_id'),
format_field(f, 'ext'),
self.format_resolution(f),
self._format_note(f)]
for f in formats
if f.get('preference') is None or f['preference'] >= -1000]
header_line = ['format code', 'extension', 'resolution', 'note']
self.to_screen(
'[info] Available formats for %s:' % info_dict['id'])
self.to_stdout(render_table(
header_line, table, delim=new_format, extraGap=(0 if new_format else 1), hideEmpty=new_format))
def list_thumbnails(self, info_dict):
thumbnails = list(info_dict.get('thumbnails'))
if not thumbnails:
self.to_screen('[info] No thumbnails present for %s' % info_dict['id'])
return
self.to_screen(
'[info] Thumbnails for %s:' % info_dict['id'])
self.to_stdout(render_table(
['ID', 'width', 'height', 'URL'],
[[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
def list_subtitles(self, video_id, subtitles, name='subtitles'):
if not subtitles:
self.to_screen('%s has no %s' % (video_id, name))
return
self.to_screen(
'Available %s for %s:' % (name, video_id))
def _row(lang, formats):
exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
if len(set(names)) == 1:
names = [] if names[0] == 'unknown' else names[:1]
return [lang, ', '.join(names), ', '.join(exts)]
self.to_stdout(render_table(
['Language', 'Name', 'Formats'],
[_row(lang, formats) for lang, formats in subtitles.items()],
hideEmpty=True))
def urlopen(self, req):
if isinstance(req, compat_basestring):
req = sanitized_Request(req)
return self._opener.open(req, timeout=self._socket_timeout)
def print_debug_header(self):
if not self.params.get('verbose'):
return
stdout_encoding = getattr(
sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
encoding_str = (
'[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
locale.getpreferredencoding(),
sys.getfilesystemencoding(),
stdout_encoding,
self.get_encoding()))
write_string(encoding_str, encoding=None)
source = (
'(exe)' if hasattr(sys, 'frozen')
else '(zip)' if isinstance(globals().get('__loader__'), zipimporter)
else '(source)' if os.path.basename(sys.argv[0]) == '__main__.py'
else '')
self._write_string('[debug] yt-dlp version %s %s\n' % (__version__, source))
if _LAZY_LOADER:
self._write_string('[debug] Lazy loading extractors enabled\n')
if _PLUGIN_CLASSES:
self._write_string(
'[debug] Plugin Extractors: %s\n' % [ie.ie_key() for ie in _PLUGIN_CLASSES])
if self.params.get('compat_opts'):
self._write_string(
'[debug] Compatibility options: %s\n' % ', '.join(self.params.get('compat_opts')))
try:
sp = subprocess.Popen(
['git', 'rev-parse', '--short', 'HEAD'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=os.path.dirname(os.path.abspath(__file__)))
out, err = process_communicate_or_kill(sp)
out = out.decode().strip()
if re.match('[0-9a-f]+', out):
self._write_string('[debug] Git HEAD: %s\n' % out)
except Exception:
try:
sys.exc_clear()
except Exception:
pass
def python_implementation():
impl_name = platform.python_implementation()
if impl_name == 'PyPy' and hasattr(sys, 'pypy_version_info'):
return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
return impl_name
self._write_string('[debug] Python version %s (%s %s) - %s\n' % (
platform.python_version(),
python_implementation(),
platform.architecture()[0],
platform_name()))
exe_versions = FFmpegPostProcessor.get_versions(self)
exe_versions['rtmpdump'] = rtmpdump_version()
exe_versions['phantomjs'] = PhantomJSwrapper._version()
exe_str = ', '.join(
f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
) or 'none'
self._write_string('[debug] exe versions: %s\n' % exe_str)
from .downloader.fragment import can_decrypt_frag
from .downloader.websocket import has_websockets
from .postprocessor.embedthumbnail import has_mutagen
from .cookies import SQLITE_AVAILABLE, KEYRING_AVAILABLE
lib_str = ', '.join(sorted(filter(None, (
can_decrypt_frag and 'pycryptodome',
has_websockets and 'websockets',
has_mutagen and 'mutagen',
SQLITE_AVAILABLE and 'sqlite',
KEYRING_AVAILABLE and 'keyring',
)))) or 'none'
self._write_string('[debug] Optional libraries: %s\n' % lib_str)
proxy_map = {}
for handler in self._opener.handlers:
if hasattr(handler, 'proxies'):
proxy_map.update(handler.proxies)
self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
if self.params.get('call_home', False):
ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
self._write_string('[debug] Public IP address: %s\n' % ipaddr)
return
latest_version = self.urlopen(
'https://yt-dl.org/latest/version').read().decode('utf-8')
if version_tuple(latest_version) > version_tuple(__version__):
self.report_warning(
'You are using an outdated version (newest version: %s)! '
'See https://yt-dl.org/update if you need help updating.' %
latest_version)
def _setup_opener(self):
timeout_val = self.params.get('socket_timeout')
self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
opts_cookiesfrombrowser = self.params.get('cookiesfrombrowser')
opts_cookiefile = self.params.get('cookiefile')
opts_proxy = self.params.get('proxy')
self.cookiejar = load_cookies(opts_cookiefile, opts_cookiesfrombrowser, self)
cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
if opts_proxy is not None:
if opts_proxy == '':
proxies = {}
else:
proxies = {'http': opts_proxy, 'https': opts_proxy}
else:
proxies = compat_urllib_request.getproxies()
# Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
if 'http' in proxies and 'https' not in proxies:
proxies['https'] = proxies['http']
proxy_handler = PerRequestProxyHandler(proxies)
debuglevel = 1 if self.params.get('debug_printtraffic') else 0
https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
redirect_handler = YoutubeDLRedirectHandler()
data_handler = compat_urllib_request_DataHandler()
# When passing our own FileHandler instance, build_opener won't add the
file_handler = compat_urllib_request.FileHandler()
def file_open(*args, **kwargs):
raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
file_handler.file_open = file_open
opener = compat_urllib_request.build_opener(
proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
# (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
opener.addheaders = []
self._opener = opener
def encode(self, s):
if isinstance(s, bytes):
return s # Already encoded
try:
return s.encode(self.get_encoding())
except UnicodeEncodeError as err:
err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
raise
def get_encoding(self):
encoding = self.params.get('encoding')
if encoding is None:
encoding = preferredencoding()
return encoding
def _write_thumbnails(self, info_dict, filename): # return the extensions
write_all = self.params.get('write_all_thumbnails', False)
thumbnails = []
if write_all or self.params.get('writethumbnail', False):
thumbnails = info_dict.get('thumbnails') or []
multiple = write_all and len(thumbnails) > 1
ret = []
for t in thumbnails[::-1]:
thumb_ext = determine_ext(t['url'], 'jpg')
suffix = '%s.' % t['id'] if multiple else ''
thumb_display_id = '%s ' % t['id'] if multiple else ''
thumb_filename = replace_extension(filename, suffix + thumb_ext, info_dict.get('ext'))
if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(thumb_filename)):
ret.append(suffix + thumb_ext)
t['filepath'] = thumb_filename
self.to_screen('[%s] %s: Thumbnail %sis already present' %
(info_dict['extractor'], info_dict['id'], thumb_display_id))
else:
self.to_screen('[%s] %s: Downloading thumbnail %s ...' %
(info_dict['extractor'], info_dict['id'], thumb_display_id))
try:
uf = self.urlopen(t['url'])
with open(encodeFilename(thumb_filename), 'wb') as thumbf:
shutil.copyfileobj(uf, thumbf)
ret.append(suffix + thumb_ext)
self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
(info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
t['filepath'] = thumb_filename
except network_exceptions as err:
self.report_warning('Unable to download thumbnail "%s": %s' %
(t['url'], error_to_compat_str(err)))
if ret and not write_all:
break
return ret
| true | true |
f7fc83a1e47fd15ed999dae9cb79ffc725ec6bfa | 8,154 | py | Python | hexrd/ui/overlay_manager.py | HEXRD/hexrdgui | d92915463f237e0521b5830655ae73bc5bcd9f80 | [
"BSD-3-Clause"
] | 13 | 2020-02-18T00:23:02.000Z | 2022-02-24T20:04:36.000Z | hexrd/ui/overlay_manager.py | HEXRD/hexrdgui | d92915463f237e0521b5830655ae73bc5bcd9f80 | [
"BSD-3-Clause"
] | 656 | 2020-01-14T02:33:40.000Z | 2022-03-26T15:31:17.000Z | hexrd/ui/overlay_manager.py | HEXRD/hexrdgui | d92915463f237e0521b5830655ae73bc5bcd9f80 | [
"BSD-3-Clause"
] | 6 | 2020-01-17T15:02:53.000Z | 2020-11-01T22:02:48.000Z | from PySide2.QtCore import Qt, QItemSelectionModel, QSignalBlocker
from PySide2.QtWidgets import (
QCheckBox, QComboBox, QHBoxLayout, QSizePolicy, QWidget
)
from hexrd.ui.constants import OverlayType
from hexrd.ui.hexrd_config import HexrdConfig
from hexrd.ui.overlay_editor import OverlayEditor
from hexrd.ui.overlay_style_picker import OverlayStylePicker
from hexrd.ui.ui_loader import UiLoader
COLUMNS = {
'material': 0,
'type': 1,
'visible': 2
}
class OverlayManager:
def __init__(self, parent=None):
loader = UiLoader()
self.ui = loader.load_file('overlay_manager.ui', parent)
self.overlay_editor = OverlayEditor(self.ui)
self.ui.overlay_editor_layout.addWidget(self.overlay_editor.ui)
flags = self.ui.windowFlags()
self.ui.setWindowFlags(flags | Qt.Tool)
self.material_combos = []
self.type_combos = []
self.visibility_boxes = []
self.setup_connections()
def setup_connections(self):
self.ui.table.selectionModel().selectionChanged.connect(
self.selection_changed)
self.ui.add_button.pressed.connect(self.add)
self.ui.remove_button.pressed.connect(self.remove)
self.ui.edit_style_button.pressed.connect(self.edit_style)
HexrdConfig().calibration_complete.connect(self.update_overlay_editor)
HexrdConfig().material_renamed.connect(self.update_table)
HexrdConfig().materials_removed.connect(self.update_table)
HexrdConfig().state_loaded.connect(self.update_table)
def show(self):
self.update_table()
self.ui.show()
@staticmethod
def format_type(type):
types = {
OverlayType.powder: 'Powder',
OverlayType.laue: 'Laue',
OverlayType.rotation_series: 'Rotation Series',
}
if type not in types:
raise Exception(f'Unknown type: {type}')
return types[type]
def create_materials_combo(self, v):
materials = list(HexrdConfig().materials.keys())
if v not in materials:
raise Exception(f'Unknown material: {v}')
cb = QComboBox(self.ui.table)
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
cb.setSizePolicy(size_policy)
for mat in materials:
cb.addItem(mat, mat)
cb.setCurrentIndex(materials.index(v))
cb.currentIndexChanged.connect(self.update_config_materials)
self.material_combos.append(cb)
return self.create_table_widget(cb)
def create_type_combo(self, v):
types = list(OverlayType)
if v not in types:
raise Exception(f'Unknown type: {v}')
cb = QComboBox(self.ui.table)
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
cb.setSizePolicy(size_policy)
for type in types:
cb.addItem(self.format_type(type), type.value)
cb.setCurrentIndex(types.index(v))
cb.currentIndexChanged.connect(self.update_config_types)
self.type_combos.append(cb)
return self.create_table_widget(cb)
def create_visibility_checkbox(self, v):
cb = QCheckBox(self.ui.table)
cb.setChecked(v)
cb.toggled.connect(self.update_config_visibilities)
self.visibility_boxes.append(cb)
return self.create_table_widget(cb)
def create_table_widget(self, w):
# These are required to center the widget...
tw = QWidget(self.ui.table)
layout = QHBoxLayout(tw)
layout.addWidget(w)
layout.setAlignment(Qt.AlignCenter)
layout.setContentsMargins(0, 0, 0, 0)
return tw
def clear_table(self):
self.material_combos.clear()
self.type_combos.clear()
self.visibility_boxes.clear()
self.ui.table.clearContents()
def update_table(self):
block_list = [
self.ui.table,
self.ui.table.selectionModel()
]
blockers = [QSignalBlocker(x) for x in block_list] # noqa: F841
prev_selected = self.selected_row
overlays = HexrdConfig().overlays
self.clear_table()
self.ui.table.setRowCount(len(overlays))
for i, overlay in enumerate(overlays):
w = self.create_materials_combo(overlay['material'])
self.ui.table.setCellWidget(i, COLUMNS['material'], w)
w = self.create_type_combo(overlay['type'])
self.ui.table.setCellWidget(i, COLUMNS['type'], w)
w = self.create_visibility_checkbox(overlay['visible'])
self.ui.table.setCellWidget(i, COLUMNS['visible'], w)
if prev_selected is not None:
select_row = (prev_selected if prev_selected < len(overlays)
else len(overlays) - 1)
self.select_row(select_row)
self.ui.table.resizeColumnsToContents()
# Just in case the selection actually changed...
self.selection_changed()
def select_row(self, i):
if i is None or i >= self.ui.table.rowCount():
# Out of range. Don't do anything.
return
# Select the row
selection_model = self.ui.table.selectionModel()
selection_model.clearSelection()
model_index = selection_model.model().index(i, 0)
command = QItemSelectionModel.Select | QItemSelectionModel.Rows
selection_model.select(model_index, command)
@property
def selected_row(self):
selected = self.ui.table.selectionModel().selectedRows()
return selected[0].row() if selected else None
def selection_changed(self):
self.update_enable_states()
self.update_overlay_editor()
def update_enable_states(self):
row_selected = self.selected_row is not None
self.ui.remove_button.setEnabled(row_selected)
self.ui.edit_style_button.setEnabled(row_selected)
def update_overlay_editor(self):
self.overlay_editor.overlay = self.active_overlay
def update_config_materials(self):
for i in range(self.ui.table.rowCount()):
w = self.material_combos[i]
overlay = HexrdConfig().overlays[i]
if overlay['material'] != w.currentData():
overlay['material'] = w.currentData()
overlay['update_needed'] = True
# In case the active widget depends on material settings
self.overlay_editor.update_active_widget_gui()
HexrdConfig().update_visible_material_energies()
HexrdConfig().overlay_config_changed.emit()
def update_config_types(self):
for i in range(self.ui.table.rowCount()):
w = self.type_combos[i]
# This won't do anything if the type already matches
HexrdConfig().change_overlay_type(i, OverlayType(w.currentData()))
HexrdConfig().overlay_config_changed.emit()
self.update_overlay_editor()
def update_config_visibilities(self):
for i in range(self.ui.table.rowCount()):
w = self.visibility_boxes[i]
HexrdConfig().overlays[i]['visible'] = w.isChecked()
HexrdConfig().update_visible_material_energies()
HexrdConfig().overlay_config_changed.emit()
@property
def active_material_name(self):
return HexrdConfig().active_material_name
@property
def active_overlay(self):
i = self.selected_row
return HexrdConfig().overlays[i] if i is not None else None
def update_refinement_options(self):
self.overlay_editor.update_refinement_options()
def add(self):
HexrdConfig().append_overlay(self.active_material_name,
OverlayType.powder)
self.update_table()
self.select_row(len(HexrdConfig().overlays) - 1)
def remove(self):
HexrdConfig().overlays.pop(self.selected_row)
HexrdConfig().overlay_config_changed.emit()
self.update_table()
def edit_style(self):
self._style_picker = OverlayStylePicker(self.active_overlay, self.ui)
self._style_picker.ui.exec_()
| 34.117155 | 79 | 0.654035 | from PySide2.QtCore import Qt, QItemSelectionModel, QSignalBlocker
from PySide2.QtWidgets import (
QCheckBox, QComboBox, QHBoxLayout, QSizePolicy, QWidget
)
from hexrd.ui.constants import OverlayType
from hexrd.ui.hexrd_config import HexrdConfig
from hexrd.ui.overlay_editor import OverlayEditor
from hexrd.ui.overlay_style_picker import OverlayStylePicker
from hexrd.ui.ui_loader import UiLoader
COLUMNS = {
'material': 0,
'type': 1,
'visible': 2
}
class OverlayManager:
def __init__(self, parent=None):
loader = UiLoader()
self.ui = loader.load_file('overlay_manager.ui', parent)
self.overlay_editor = OverlayEditor(self.ui)
self.ui.overlay_editor_layout.addWidget(self.overlay_editor.ui)
flags = self.ui.windowFlags()
self.ui.setWindowFlags(flags | Qt.Tool)
self.material_combos = []
self.type_combos = []
self.visibility_boxes = []
self.setup_connections()
def setup_connections(self):
self.ui.table.selectionModel().selectionChanged.connect(
self.selection_changed)
self.ui.add_button.pressed.connect(self.add)
self.ui.remove_button.pressed.connect(self.remove)
self.ui.edit_style_button.pressed.connect(self.edit_style)
HexrdConfig().calibration_complete.connect(self.update_overlay_editor)
HexrdConfig().material_renamed.connect(self.update_table)
HexrdConfig().materials_removed.connect(self.update_table)
HexrdConfig().state_loaded.connect(self.update_table)
def show(self):
self.update_table()
self.ui.show()
@staticmethod
def format_type(type):
types = {
OverlayType.powder: 'Powder',
OverlayType.laue: 'Laue',
OverlayType.rotation_series: 'Rotation Series',
}
if type not in types:
raise Exception(f'Unknown type: {type}')
return types[type]
def create_materials_combo(self, v):
materials = list(HexrdConfig().materials.keys())
if v not in materials:
raise Exception(f'Unknown material: {v}')
cb = QComboBox(self.ui.table)
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
cb.setSizePolicy(size_policy)
for mat in materials:
cb.addItem(mat, mat)
cb.setCurrentIndex(materials.index(v))
cb.currentIndexChanged.connect(self.update_config_materials)
self.material_combos.append(cb)
return self.create_table_widget(cb)
def create_type_combo(self, v):
types = list(OverlayType)
if v not in types:
raise Exception(f'Unknown type: {v}')
cb = QComboBox(self.ui.table)
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
cb.setSizePolicy(size_policy)
for type in types:
cb.addItem(self.format_type(type), type.value)
cb.setCurrentIndex(types.index(v))
cb.currentIndexChanged.connect(self.update_config_types)
self.type_combos.append(cb)
return self.create_table_widget(cb)
def create_visibility_checkbox(self, v):
cb = QCheckBox(self.ui.table)
cb.setChecked(v)
cb.toggled.connect(self.update_config_visibilities)
self.visibility_boxes.append(cb)
return self.create_table_widget(cb)
def create_table_widget(self, w):
tw = QWidget(self.ui.table)
layout = QHBoxLayout(tw)
layout.addWidget(w)
layout.setAlignment(Qt.AlignCenter)
layout.setContentsMargins(0, 0, 0, 0)
return tw
def clear_table(self):
self.material_combos.clear()
self.type_combos.clear()
self.visibility_boxes.clear()
self.ui.table.clearContents()
def update_table(self):
block_list = [
self.ui.table,
self.ui.table.selectionModel()
]
blockers = [QSignalBlocker(x) for x in block_list]
prev_selected = self.selected_row
overlays = HexrdConfig().overlays
self.clear_table()
self.ui.table.setRowCount(len(overlays))
for i, overlay in enumerate(overlays):
w = self.create_materials_combo(overlay['material'])
self.ui.table.setCellWidget(i, COLUMNS['material'], w)
w = self.create_type_combo(overlay['type'])
self.ui.table.setCellWidget(i, COLUMNS['type'], w)
w = self.create_visibility_checkbox(overlay['visible'])
self.ui.table.setCellWidget(i, COLUMNS['visible'], w)
if prev_selected is not None:
select_row = (prev_selected if prev_selected < len(overlays)
else len(overlays) - 1)
self.select_row(select_row)
self.ui.table.resizeColumnsToContents()
self.selection_changed()
def select_row(self, i):
if i is None or i >= self.ui.table.rowCount():
return
# Select the row
selection_model = self.ui.table.selectionModel()
selection_model.clearSelection()
model_index = selection_model.model().index(i, 0)
command = QItemSelectionModel.Select | QItemSelectionModel.Rows
selection_model.select(model_index, command)
@property
def selected_row(self):
selected = self.ui.table.selectionModel().selectedRows()
return selected[0].row() if selected else None
def selection_changed(self):
self.update_enable_states()
self.update_overlay_editor()
def update_enable_states(self):
row_selected = self.selected_row is not None
self.ui.remove_button.setEnabled(row_selected)
self.ui.edit_style_button.setEnabled(row_selected)
def update_overlay_editor(self):
self.overlay_editor.overlay = self.active_overlay
def update_config_materials(self):
for i in range(self.ui.table.rowCount()):
w = self.material_combos[i]
overlay = HexrdConfig().overlays[i]
if overlay['material'] != w.currentData():
overlay['material'] = w.currentData()
overlay['update_needed'] = True
# In case the active widget depends on material settings
self.overlay_editor.update_active_widget_gui()
HexrdConfig().update_visible_material_energies()
HexrdConfig().overlay_config_changed.emit()
def update_config_types(self):
for i in range(self.ui.table.rowCount()):
w = self.type_combos[i]
# This won't do anything if the type already matches
HexrdConfig().change_overlay_type(i, OverlayType(w.currentData()))
HexrdConfig().overlay_config_changed.emit()
self.update_overlay_editor()
def update_config_visibilities(self):
for i in range(self.ui.table.rowCount()):
w = self.visibility_boxes[i]
HexrdConfig().overlays[i]['visible'] = w.isChecked()
HexrdConfig().update_visible_material_energies()
HexrdConfig().overlay_config_changed.emit()
@property
def active_material_name(self):
return HexrdConfig().active_material_name
@property
def active_overlay(self):
i = self.selected_row
return HexrdConfig().overlays[i] if i is not None else None
def update_refinement_options(self):
self.overlay_editor.update_refinement_options()
def add(self):
HexrdConfig().append_overlay(self.active_material_name,
OverlayType.powder)
self.update_table()
self.select_row(len(HexrdConfig().overlays) - 1)
def remove(self):
HexrdConfig().overlays.pop(self.selected_row)
HexrdConfig().overlay_config_changed.emit()
self.update_table()
def edit_style(self):
self._style_picker = OverlayStylePicker(self.active_overlay, self.ui)
self._style_picker.ui.exec_()
| true | true |
f7fc83b37257da7c212147a2b2ab73f0f54f497f | 1,695 | py | Python | postshiftapi.py | retart1337/PyTempMail | 45bc30c2dbd8c43b30a78025f8668e765adb0251 | [
"Unlicense"
] | 1 | 2019-06-25T20:51:47.000Z | 2019-06-25T20:51:47.000Z | postshiftapi.py | retart1337/PyTempMail | 45bc30c2dbd8c43b30a78025f8668e765adb0251 | [
"Unlicense"
] | null | null | null | postshiftapi.py | retart1337/PyTempMail | 45bc30c2dbd8c43b30a78025f8668e765adb0251 | [
"Unlicense"
] | null | null | null | import requests
import json
class Api(object):
def ReturnContent(self, res):
return json.loads(res.content.decode('utf-8'))
def CreateNew(self, name):
if name is not None:
return Api.ReturnContent(0, requests.get('https://post-shift.ru/api.php?action=new&name='+name+'&type=json'))
else:
return Api.ReturnContent(0, requests.get('https://post-shift.ru/api.php?action=new&type=json'))
def GetList(self, key):
return eval(requests.get('https://post-shift.ru/api.php?action=getlist&key=' + key + '&type=json').content.decode('utf-8'))
def GetText(self, key, mID):
return json.loads(requests.get('https://post-shift.ru/api.php?action=getmail&key='+ key+'&id='+str(mID) + '&type=json').content.decode('utf-8'))
def GetLiveTime(self, key):
return json.loads(requests.get('https://post-shift.ru/api.php?action=livetime&key='+ key + '&type=json').content.decode('utf-8'))
def UpdateLiveTime(self, key):
return requests.get('https://post-shift.ru/api.php?action=update&key='+ key + '&type=json')
def DeleteMail(self, key):
return requests.get('https://post-shift.ru/api.php?action=delete&key='+ key + '&type=json')
def ClearMail(self, key):
return Api.ReturnContent(0, 'https://post-shift.ru/api.php?action=clear&key='+ key + '&type=json')
def CheckInbox(self, key):
r = json.loads(requests.get('https://post-shift.ru/api.php?action=getlist&key=' + key + '&type=json').content.decode('utf-8'))
try:
if r.get('error') is not None:
return False
else: return True
except AttributeError:
return True | 56.5 | 152 | 0.634218 | import requests
import json
class Api(object):
def ReturnContent(self, res):
return json.loads(res.content.decode('utf-8'))
def CreateNew(self, name):
if name is not None:
return Api.ReturnContent(0, requests.get('https://post-shift.ru/api.php?action=new&name='+name+'&type=json'))
else:
return Api.ReturnContent(0, requests.get('https://post-shift.ru/api.php?action=new&type=json'))
def GetList(self, key):
return eval(requests.get('https://post-shift.ru/api.php?action=getlist&key=' + key + '&type=json').content.decode('utf-8'))
def GetText(self, key, mID):
return json.loads(requests.get('https://post-shift.ru/api.php?action=getmail&key='+ key+'&id='+str(mID) + '&type=json').content.decode('utf-8'))
def GetLiveTime(self, key):
return json.loads(requests.get('https://post-shift.ru/api.php?action=livetime&key='+ key + '&type=json').content.decode('utf-8'))
def UpdateLiveTime(self, key):
return requests.get('https://post-shift.ru/api.php?action=update&key='+ key + '&type=json')
def DeleteMail(self, key):
return requests.get('https://post-shift.ru/api.php?action=delete&key='+ key + '&type=json')
def ClearMail(self, key):
return Api.ReturnContent(0, 'https://post-shift.ru/api.php?action=clear&key='+ key + '&type=json')
def CheckInbox(self, key):
r = json.loads(requests.get('https://post-shift.ru/api.php?action=getlist&key=' + key + '&type=json').content.decode('utf-8'))
try:
if r.get('error') is not None:
return False
else: return True
except AttributeError:
return True | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.