gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains integration with Azure CosmosDB.
AzureCosmosDBHook communicates via the Azure Cosmos library. Make sure that a
Airflow connection of type `azure_cosmos` exists. Authorization can be done by supplying a
login (=Endpoint uri), password (=secret key) and extra fields database_name and collection_name to specify
the default database and collection to use (see connection `azure_cosmos_default` for an example).
"""
import uuid
from typing import Any, Dict, Optional
from azure.cosmos.cosmos_client import CosmosClient
from azure.cosmos.exceptions import CosmosHttpResponseError
from airflow.exceptions import AirflowBadRequest
from airflow.hooks.base import BaseHook
class AzureCosmosDBHook(BaseHook):
"""
Interacts with Azure CosmosDB.
login should be the endpoint uri, password should be the master key
optionally, you can use the following extras to default these values
{"database_name": "<DATABASE_NAME>", "collection_name": "COLLECTION_NAME"}.
:param azure_cosmos_conn_id: Reference to the
:ref:`Azure CosmosDB connection<howto/connection:azure_cosmos>`.
"""
conn_name_attr = 'azure_cosmos_conn_id'
default_conn_name = 'azure_cosmos_default'
conn_type = 'azure_cosmos'
hook_name = 'Azure CosmosDB'
@staticmethod
def get_connection_form_widgets() -> Dict[str, Any]:
"""Returns connection widgets to add to connection form"""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import StringField
return {
"extra__azure_cosmos__database_name": StringField(
lazy_gettext('Cosmos Database Name (optional)'), widget=BS3TextFieldWidget()
),
"extra__azure_cosmos__collection_name": StringField(
lazy_gettext('Cosmos Collection Name (optional)'), widget=BS3TextFieldWidget()
),
}
@staticmethod
def get_ui_field_behaviour() -> Dict[str, Any]:
"""Returns custom field behaviour"""
return {
"hidden_fields": ['schema', 'port', 'host', 'extra'],
"relabeling": {
'login': 'Cosmos Endpoint URI',
'password': 'Cosmos Master Key Token',
},
"placeholders": {
'login': 'endpoint uri',
'password': 'master key',
'extra__azure_cosmos__database_name': 'database name',
'extra__azure_cosmos__collection_name': 'collection name',
},
}
def __init__(self, azure_cosmos_conn_id: str = default_conn_name) -> None:
super().__init__()
self.conn_id = azure_cosmos_conn_id
self._conn = None
self.default_database_name = None
self.default_collection_name = None
def get_conn(self) -> CosmosClient:
"""Return a cosmos db client."""
if not self._conn:
conn = self.get_connection(self.conn_id)
extras = conn.extra_dejson
endpoint_uri = conn.login
master_key = conn.password
self.default_database_name = extras.get('database_name') or extras.get(
'extra__azure_cosmos__database_name'
)
self.default_collection_name = extras.get('collection_name') or extras.get(
'extra__azure_cosmos__collection_name'
)
# Initialize the Python Azure Cosmos DB client
self._conn = CosmosClient(endpoint_uri, {'masterKey': master_key})
return self._conn
def __get_database_name(self, database_name: Optional[str] = None) -> str:
self.get_conn()
db_name = database_name
if db_name is None:
db_name = self.default_database_name
if db_name is None:
raise AirflowBadRequest("Database name must be specified")
return db_name
def __get_collection_name(self, collection_name: Optional[str] = None) -> str:
self.get_conn()
coll_name = collection_name
if coll_name is None:
coll_name = self.default_collection_name
if coll_name is None:
raise AirflowBadRequest("Collection name must be specified")
return coll_name
def does_collection_exist(self, collection_name: str, database_name: str) -> bool:
"""Checks if a collection exists in CosmosDB."""
if collection_name is None:
raise AirflowBadRequest("Collection name cannot be None.")
existing_container = list(
self.get_conn()
.get_database_client(self.__get_database_name(database_name))
.query_containers("SELECT * FROM r WHERE r.id=@id", [{"name": "@id", "value": collection_name}])
)
if len(existing_container) == 0:
return False
return True
def create_collection(self, collection_name: str, database_name: Optional[str] = None) -> None:
"""Creates a new collection in the CosmosDB database."""
if collection_name is None:
raise AirflowBadRequest("Collection name cannot be None.")
# We need to check to see if this container already exists so we don't try
# to create it twice
existing_container = list(
self.get_conn()
.get_database_client(self.__get_database_name(database_name))
.query_containers("SELECT * FROM r WHERE r.id=@id", [{"name": "@id", "value": collection_name}])
)
# Only create if we did not find it already existing
if len(existing_container) == 0:
self.get_conn().get_database_client(self.__get_database_name(database_name)).create_container(
collection_name
)
def does_database_exist(self, database_name: str) -> bool:
"""Checks if a database exists in CosmosDB."""
if database_name is None:
raise AirflowBadRequest("Database name cannot be None.")
existing_database = list(
self.get_conn().query_databases(
{
"query": "SELECT * FROM r WHERE r.id=@id",
"parameters": [{"name": "@id", "value": database_name}],
}
)
)
if len(existing_database) == 0:
return False
return True
def create_database(self, database_name: str) -> None:
"""Creates a new database in CosmosDB."""
if database_name is None:
raise AirflowBadRequest("Database name cannot be None.")
# We need to check to see if this database already exists so we don't try
# to create it twice
existing_database = list(
self.get_conn().query_databases(
{
"query": "SELECT * FROM r WHERE r.id=@id",
"parameters": [{"name": "@id", "value": database_name}],
}
)
)
# Only create if we did not find it already existing
if len(existing_database) == 0:
self.get_conn().create_database(database_name)
def delete_database(self, database_name: str) -> None:
"""Deletes an existing database in CosmosDB."""
if database_name is None:
raise AirflowBadRequest("Database name cannot be None.")
self.get_conn().delete_database(database_name)
def delete_collection(self, collection_name: str, database_name: Optional[str] = None) -> None:
"""Deletes an existing collection in the CosmosDB database."""
if collection_name is None:
raise AirflowBadRequest("Collection name cannot be None.")
self.get_conn().get_database_client(self.__get_database_name(database_name)).delete_container(
collection_name
)
def upsert_document(self, document, database_name=None, collection_name=None, document_id=None):
"""
Inserts a new document (or updates an existing one) into an existing
collection in the CosmosDB database.
"""
# Assign unique ID if one isn't provided
if document_id is None:
document_id = str(uuid.uuid4())
if document is None:
raise AirflowBadRequest("You cannot insert a None document")
# Add document id if isn't found
if 'id' in document:
if document['id'] is None:
document['id'] = document_id
else:
document['id'] = document_id
created_document = (
self.get_conn()
.get_database_client(self.__get_database_name(database_name))
.get_container_client(self.__get_collection_name(collection_name))
.upsert_item(document)
)
return created_document
def insert_documents(
self, documents, database_name: Optional[str] = None, collection_name: Optional[str] = None
) -> list:
"""Insert a list of new documents into an existing collection in the CosmosDB database."""
if documents is None:
raise AirflowBadRequest("You cannot insert empty documents")
created_documents = []
for single_document in documents:
created_documents.append(
self.get_conn()
.get_database_client(self.__get_database_name(database_name))
.get_container_client(self.__get_collection_name(collection_name))
.create_item(single_document)
)
return created_documents
def delete_document(
self, document_id: str, database_name: Optional[str] = None, collection_name: Optional[str] = None
) -> None:
"""Delete an existing document out of a collection in the CosmosDB database."""
if document_id is None:
raise AirflowBadRequest("Cannot delete a document without an id")
self.get_conn().get_database_client(self.__get_database_name(database_name)).get_container_client(
self.__get_collection_name(collection_name)
).delete_item(document_id)
def get_document(
self, document_id: str, database_name: Optional[str] = None, collection_name: Optional[str] = None
):
"""Get a document from an existing collection in the CosmosDB database."""
if document_id is None:
raise AirflowBadRequest("Cannot get a document without an id")
try:
return (
self.get_conn()
.get_database_client(self.__get_database_name(database_name))
.get_container_client(self.__get_collection_name(collection_name))
.read_item(document_id)
)
except CosmosHttpResponseError:
return None
def get_documents(
self,
sql_string: str,
database_name: Optional[str] = None,
collection_name: Optional[str] = None,
partition_key: Optional[str] = None,
) -> Optional[list]:
"""Get a list of documents from an existing collection in the CosmosDB database via SQL query."""
if sql_string is None:
raise AirflowBadRequest("SQL query string cannot be None")
# Query them in SQL
query = {'query': sql_string}
try:
result_iterable = (
self.get_conn()
.get_database_client(self.__get_database_name(database_name))
.get_container_client(self.__get_collection_name(collection_name))
.query_items(query, partition_key)
)
return list(result_iterable)
except CosmosHttpResponseError:
return None
def get_database_link(database_id: str) -> str:
"""Get Azure CosmosDB database link"""
return "dbs/" + database_id
def get_collection_link(database_id: str, collection_id: str) -> str:
"""Get Azure CosmosDB collection link"""
return get_database_link(database_id) + "/colls/" + collection_id
def get_document_link(database_id: str, collection_id: str, document_id: str) -> str:
"""Get Azure CosmosDB document link"""
return get_collection_link(database_id, collection_id) + "/docs/" + document_id
|
|
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
# pylint: skip-file
"""
To test conv layer operations between NervanaGPU, NervanaCPU against numpy.
The numpy implementation is different from what is done underneath NervanaCPU to
be a valid checking. It requires externally pad the input, while NervanaCPU does
not require so
"""
import itertools as itt
import numpy as np
from operator import mul
from neon.backends.nervanagpu import NervanaGPU
from neon.backends.nervanacpu import NervanaCPU
from timeit import default_timer
def slicable(dim, pad=0):
"""
colapse outer dimensions into one and preserve inner dimension
this allows for easy cpu convolution in numpy
Arguments:
dim (tuple): dimensions list in a tuple
pad (int): how many pixel paddings
"""
dim0 = reduce(mul, dim[:-1], 1) + pad
return (dim0, dim[-1])
def pixel_indices(conv, mt, pr, qs):
T, R, S = conv.TRS
D, H, W = conv.DHW
C = conv.C
HW = H * W
DHW = D * H * W
imax = C * DHW
idx = []
for c in range(C):
ci = c * DHW
for t in range(T):
z = mt + t
zi = ci + z * HW
zb = z >= 0 and z < D
for r in range(R):
y = pr + r
yi = zi + y * W
yb = zb and y >= 0 and y < H
for s in range(S):
x = qs + s
if yb and x >= 0 and x < W:
xi = yi + x
else:
xi = imax # out of bounds
idx.append(xi)
return idx
def run_backend_conv(lib, layer, I, F, E, dtype):
beI = lib.array(I, dtype=dtype)
beF = lib.array(F, dtype=dtype)
beE = lib.array(E, dtype=dtype)
beO = lib.zeros(layer.dimO, dtype=dtype)
lib.fprop_conv(layer, beI, beF, beO)
beB = lib.zeros(layer.dimI, dtype=dtype)
lib.bprop_conv(layer, beF, beE, beB)
beU = lib.zeros(layer.dimF, dtype=dtype)
lib.update_conv(layer, beI, beE, beU)
return beO, beB, beU
def pytest_generate_tests(metafunc):
"""
Build a list of test arguments.
"""
N_C_K = [
(64, 64, 64),
(128, 64, 64),
(32, 128, 128),
]
D_H_W = [
(3, 7, 7),
(3, 5, 5),
]
T_R_S = [
(3, 3, 3),
]
if 'fargs_tests' in metafunc.fixturenames:
fargs = itt.product(N_C_K, D_H_W, T_R_S)
metafunc.parametrize("fargs_tests", fargs)
def test_conv_layer(fargs_tests):
dtype = np.float32
ng = NervanaGPU(stochastic_round=False, bench=True)
N, C, K = fargs_tests[0]
D, H, W = fargs_tests[1]
T, R, S = fargs_tests[2]
padding_d, padding_h, padding_w = 0, 1, 1
strides_d, strides_h, strides_w = 1, 1, 1
conv_ng = ng.conv_layer(
dtype,
N, C, K,
D, H, W,
T, R, S,
padding_d, padding_h, padding_w,
strides_d, strides_h, strides_w)
nc = NervanaCPU()
conv_nc = nc.conv_layer(
dtype,
N, C, K,
D, H, W,
T, R, S,
padding_d, padding_h, padding_w,
strides_d, strides_h, strides_w)
assert conv_nc.dimI == conv_ng.dimI
assert conv_nc.dimF == conv_ng.dimF
assert conv_nc.dimO == conv_ng.dimO
assert conv_nc.M == conv_ng.M
dimI = conv_ng.dimI
dimF = conv_ng.dimF
dimO = conv_ng.dimO
# cpu input arrays
cpuI = np.random.uniform(-0.8, 0.8, slicable(dimI, 1)).astype(np.float32)
cpuF = np.random.uniform(0.0, 0.3, slicable(dimF)).astype(np.float32)
cpuE = np.random.uniform(-0.2, 0.2, dimO).astype(np.float32)
# zero pad the last row of cpu input for the sake of numpy
cpuI[-1, :] = 0.0
# =======GPU and CPU==========
beI = cpuI[:-1, :].reshape(dimI)
beF = cpuF.reshape(dimF)
beE = cpuE
start_gpu = default_timer()
ngO, ngB, ngU = run_backend_conv(ng, conv_ng, beI, beF, beE, dtype)
end_gpu = default_timer()
start_cpu = default_timer()
ncO, ncB, ncU = run_backend_conv(nc, conv_nc, beI, beF, beE, dtype)
end_cpu = default_timer()
print("gputime: %s, cputime %s" %
(end_gpu - start_gpu, end_cpu - start_cpu))
# ======numpy===========
# cpu output arrays
cpuO = np.zeros(dimO, dtype=dtype)
cpuB = np.zeros(slicable(dimI, 1), dtype=dtype)
cpuU = np.zeros(slicable(dimF), dtype=dtype)
D, H, W = conv_nc.DHW
T, R, S = conv_nc.TRS
M, P, Q = conv_nc.MPQ
pad_d, pad_h, pad_w = conv_nc.padding
str_d, str_h, str_w = conv_nc.strides
for m in range(M):
mt = m * str_d - pad_d
for p in range(P):
pr = p * str_h - pad_h
for q in range(Q):
qs = q * str_w - pad_w
idx = pixel_indices(conv_nc, mt, pr, qs)
cpuO[:, m, p, q, :] = np.dot(cpuF.T, cpuI[idx, :])
cpuB[idx, :] += np.dot(cpuF, cpuE[:, m, p, q, :])
cpuU += np.dot(cpuI[idx, :], cpuE[:, m, p, q, :].T)
for op, ngA, ncA, cpuA, w in (
("fprop", ngO, ncO, cpuO, Q),
("bprop", ngB, ncB.reshape(dimI), cpuB[:-1, :].reshape(dimI), W),
("update", ngU, ncU.reshape(dimF), cpuU.reshape(dimF), S)):
print op
assert np.allclose(ngA.get(), cpuA, rtol=0, atol=1e-4)
assert np.allclose(ncA.get(), cpuA, rtol=0, atol=1e-5)
del ng
del nc
if __name__ == '__main__':
test_conv_layer()
|
|
"""
Copyright 2015 Hewlett-Packard
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
fqdn_regex = "([a-zA-Z\\d]|[a-zA-Z\\d][a-zA-Z\\d\\-]{0,61}[a-zA-Z\\d])" \
"(\\.([a-zA-Z\\d]|[a-zA-Z\\d][a-zA-Z\\d\\-]{0,61}[a-zA-Z\\d]))*"
# Allow alphanumeric, underscore and minus
project_regex = r"(?:([\w\-]+)\_)?"
client_id_regex = r"^{project}{host}$".format(project=project_regex,
host=fqdn_regex)
freezer_action_properties = {
"action": {
"id": "action",
"pattern": r"^[\w-]+$",
"type": "string"
},
"mode": {
"id": "mode",
"pattern": r"^[\w-]+$",
"type": "string"
},
"path_to_backup": {
"id": "path_to_backup",
"type": "string"
},
"backup_name": {
"id": "backup_name",
"type": "string"
},
"container": {
"id": "container",
"type": "string"
},
"restore_abs_path": {
"id": "restore_abs_path",
"type": "string"
},
}
schedule_properties = {
"time_created": {
"id": "time_created",
"type": "integer"
},
"time_started": {
"id": "time_started",
"type": "integer"
},
"time_ended": {
"id": "time_ended",
"type": "integer"
},
"event": {
"id": "event",
"type": "string",
"enum": ["", "stop", "start", "abort", "remove"]
},
"status": {
"id": "status",
"type": "string",
"enum": ["", "completed", "stop", "scheduled",
"running", "aborting", "removed"]
},
"result": {
"id": "result",
"type": "string",
"enum": ["", "success", "fail", "aborted"]
},
"schedule_date": {
"id": "schedule_date",
"type": "string",
"pattern": r"^(-?(?:[1-9][0-9]*)?[0-9]{4})-(1[0-2]|0[1-9])"
r"-(3[01]|0[1-9]|[12][0-9])T(2[0-3]|[01][0-9])"
r":([0-5][0-9]):([0-5][0-9])(\.[0-9]+)?(Z|[+-]"
r"(?:2[0-3]|[01][0-9]):[0-5][0-9])?$"
},
"schedule_interval": {
"id": "schedule_interval",
"type": "string",
"pattern": r"^(continuous|(\d+ +(weeks|days|"
r"hours|minutes|seconds)))$"
},
"schedule_start_date": {
"id": "schedule_start_date",
"type": "string",
"pattern": r"^(-?(?:[1-9][0-9]*)?[0-9]{4})-(1[0-2]|0[1-9])"
r"-(3[01]|0[1-9]|[12][0-9])T(2[0-3]|[01][0-9]):"
r"([0-5][0-9]):([0-5][0-9])(\.[0-9]+)?(Z|[+-]"
r"(?:2[0-3]|[01][0-9]):[0-5][0-9])?$"
},
"schedule_end_date": {
"id": "schedule_end_date",
"type": "string",
"pattern": r"^(-?(?:[1-9][0-9]*)?[0-9]{4})-(1[0-2]|0[1-9])"
r"-(3[01]|0[1-9]|[12][0-9])T(2[0-3]|[01][0-9])"
r":([0-5][0-9]):([0-5][0-9])(\.[0-9]+)?(Z|[+-]"
r"(?:2[0-3]|[01][0-9]):[0-5][0-9])?$"
},
"schedule_year": {
"id": "schedule_year",
"type": "string",
"pattern": r"^\d{4}$"
},
"schedule_month": {
"id": "schedule_month",
"type": "string"
},
"schedule_day": {
"id": "schedule_day",
"type": "string"
},
"schedule_week": {
"id": "schedule_week",
"type": "string"
},
"schedule_day_of_week": {
"id": "schedule_day_of_week",
"type": "string"
},
"schedule_hour": {
"id": "schedule_hour",
"type": "string"
},
"schedule_minute": {
"id": "schedule_minute",
"type": "string"
},
"schedule_second": {
"id": "schedule_second",
"type": "string"
},
"current_pid": {
"id": "current_pid",
"type": "integer"
}
}
job_schema = {
"id": "/",
"type": "object",
"definitions": {
"freezer_action": {
"properties": freezer_action_properties,
"additionalProperties": True
},
"job_action": {
"properties": {
"freezer_action": {
"$ref": "#/definitions/freezer_action"
},
"max_retries": {
"type": "integer"
},
"max_retries_interval": {
"type": "integer"
},
"mandatory": {
"type": "boolean"
}
},
"additionalProperties": True
},
"job_action_list": {
"items": {
"$ref": "#/definitions/job_action"
}
}
},
"properties": {
"job_actions": {
"$ref": "#/definitions/job_action_list"
},
"job_schedule": {
"id": "job_schedule",
"type": "object",
"properties": schedule_properties,
"additionalProperties": False,
},
"job_id": {
"id": "job_id",
"pattern": r"^[\w-]+$",
"type": "string"
},
"client_id": {
"id": "client_id",
"pattern": client_id_regex,
"type": "string"
},
"session_id": {
"id": "session_id",
"pattern": r"^[\w-]*$",
"type": "string"
},
"session_tag": {
"id": "session_tag",
"type": "integer"
},
"session_name": {
"id": "session_name",
"type": "string"
},
"user_id": {
"id": "user_id",
"pattern": r"^[\w-]+$",
"type": "string"
},
"project_id": {
"id": "project_id",
"pattern": r"^[\w-]+$",
"type": "string"
},
"description": {
"id": "description",
"type": "string"
},
"_version": {
"id": "_version",
"type": "integer"
},
"action_defaults": {
"$ref": "#/definitions/freezer_action"
}
},
"additionalProperties": False,
"required": [
"job_actions",
"job_schedule",
"job_id",
"client_id",
"user_id"
]
}
job_patch_schema = {
"id": "/",
"type": "object",
"definitions": {
"freezer_action": {
"properties": freezer_action_properties,
"additionalProperties": True
},
"job_action": {
"properties": {
"freezer_action": {
"$ref": "#/definitions/freezer_action"
},
"max_retries": {
"type": "integer"
},
"max_retries_interval": {
"type": "integer"
},
"mandatory": {
"type": "boolean"
}
},
"additionalProperties": True
},
"job_action_list": {
"items": {
"$ref": "#/definitions/job_action"
}
}
},
"properties": {
"job_actions": {
"$ref": "#/definitions/job_action_list"
},
"job_schedule": {
"id": "job_schedule",
"type": "object",
"properties": schedule_properties,
"additionalProperties": False,
},
"job_id": {
"id": "job_id",
"pattern": r"^[\w-]+$",
"type": "string"
},
"client_id": {
"id": "client_id",
"pattern": client_id_regex,
"type": "string"
},
"session_id": {
"id": "session_id",
"pattern": r"^[\w-]*$",
"type": "string"
},
"session_tag": {
"id": "session_tag",
"type": "integer"
},
"session_name": {
"id": "session_name",
"type": "string"
},
"user_id": {
"id": "user_id",
"pattern": r"^[\w-]+$",
"type": "string"
},
"project_id": {
"id": "project_id",
"pattern": r"^[\w-]+$",
"type": "string"
},
"description": {
"id": "description",
"type": "string"
},
"_version": {
"id": "_version",
"type": "integer"
},
"action_defaults": {
"$ref": "#/definitions/freezer_action"
}
},
"additionalProperties": False
}
additional_action_properties = {
"action_id": {
"id": "action_id",
"pattern": r"^[\w-]+$",
"type": "string"
},
"user_id": {
"id": "user_id",
"pattern": r"^[\w-]+$",
"type": "string"
},
"project_id": {
"id": "project_id",
"pattern": r"^[\w-]+$",
"type": "string"
}
}
tmp_prop = freezer_action_properties.items()
tmp_add_prop = additional_action_properties.items()
joined_properties = {}
joined_properties.update(tmp_prop)
joined_properties.update(tmp_add_prop)
action_schema = {
"id": "/",
"type": "object",
"properties": joined_properties,
"additionalProperties": True,
"required": [
"action_id",
"user_id",
"freezer_action"
]
}
action_patch_schema = {
"id": "/",
"type": "object",
"properties": joined_properties,
"additionalProperties": True
}
session_schema = {
"id": "/",
"type": "object",
"properties": {
"schedule": {
"id": "schedule",
"type": "object",
"properties": schedule_properties,
"additionalProperties": False,
},
"session_id": {
"id": "session_id",
"pattern": r"^[\w-]+$",
"type": "string"
},
"user_id": {
"id": "user_id",
"pattern": r"^[\w-]+$",
"type": "string"
},
"project_id": {
"id": "project_id",
"pattern": r"^[\w-]+$",
"type": "string"
},
"session_tag": {
"id": "session_tag",
"type": "integer"
},
"time_started": {
"id": "time_started",
"type": "integer"
},
"time_ended": {
"id": "time_ended",
"type": "integer"
},
},
"additionalProperties": True,
"required": [
"session_id",
"session_tag",
"user_id",
"schedule"
]
}
session_patch_schema = {
"id": "/",
"type": "object",
"properties": {
"schedule": {
"id": "schedule",
"type": "object",
"properties": schedule_properties,
"additionalProperties": False,
},
"session_id": {
"id": "session_id",
"pattern": r"^[\w-]+$",
"type": "string"
},
"user_id": {
"id": "user_id",
"pattern": r"^[\w-]+$",
"type": "string"
},
"project_id": {
"id": "project_id",
"pattern": r"^[\w-]+$",
"type": "string"
},
"session_tag": {
"id": "session_tag",
"type": "integer"
},
"time_started": {
"id": "time_started",
"type": "integer"
},
"time_ended": {
"id": "time_ended",
"type": "integer"
},
},
"additionalProperties": True
}
client_info = {
"client_id": {
"id": "client_id",
"pattern": client_id_regex,
"type": "string"
},
"hostname": {
"id": "hostname",
"pattern": fqdn_regex,
"type": "string"
},
"description": {
"id": "description",
"type": "string"
},
"uuid": {
"id": "uuid",
"type": "string"
}
}
client_schema = {
"id": "/",
"type": "object",
"properties": {
"client": {
"id": "client",
"type": "object",
"properties": client_info,
"additionalProperties": True,
"required": [
"client_id"
]
},
"user_id": {
"id": "user_id",
"pattern": r"^[\w-]+$",
"type": "string"
},
"project_id": {
"id": "project_id",
"pattern": r"^[\w-]+$",
"type": "string"
}
},
"additionalProperties": True,
"required": [
"client",
"user_id"
]
}
|
|
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ...util.linalg import pdinv, dpotrs, dpotri, symmetrify, jitchol, dtrtrs, tdot
from GPy.core.parameterization.variational import VariationalPosterior
class Posterior(object):
"""
An object to represent a Gaussian posterior over latent function values, p(f|D).
This may be computed exactly for Gaussian likelihoods, or approximated for
non-Gaussian likelihoods.
The purpose of this class is to serve as an interface between the inference
schemes and the model classes. the model class can make predictions for
the function at any new point x_* by integrating over this posterior.
"""
def __init__(self, woodbury_chol=None, woodbury_vector=None, K=None, mean=None, cov=None, K_chol=None, woodbury_inv=None, prior_mean=0):
"""
woodbury_chol : a lower triangular matrix L that satisfies posterior_covariance = K - K L^{-T} L^{-1} K
woodbury_vector : a matrix (or vector, as Nx1 matrix) M which satisfies posterior_mean = K M
K : the proir covariance (required for lazy computation of various quantities)
mean : the posterior mean
cov : the posterior covariance
Not all of the above need to be supplied! You *must* supply:
K (for lazy computation)
or
K_chol (for lazy computation)
You may supply either:
woodbury_chol
woodbury_vector
Or:
mean
cov
Of course, you can supply more than that, but this class will lazily
compute all other quantites on demand.
"""
#obligatory
self._K = K
if ((woodbury_chol is not None) and (woodbury_vector is not None))\
or ((woodbury_inv is not None) and (woodbury_vector is not None))\
or ((woodbury_inv is not None) and (mean is not None))\
or ((mean is not None) and (cov is not None)):
pass # we have sufficient to compute the posterior
else:
raise ValueError("insufficient information to compute the posterior")
self._K_chol = K_chol
self._K = K
#option 1:
self._woodbury_chol = woodbury_chol
self._woodbury_vector = woodbury_vector
#option 2.
self._woodbury_inv = woodbury_inv
#and woodbury vector
#option 2:
self._mean = mean
self._covariance = cov
self._prior_mean = prior_mean
#compute this lazily
self._precision = None
@property
def mean(self):
"""
Posterior mean
$$
K_{xx}v
v := \texttt{Woodbury vector}
$$
"""
if self._mean is None:
self._mean = np.dot(self._K, self.woodbury_vector)
return self._mean
@property
def covariance(self):
"""
Posterior covariance
$$
K_{xx} - K_{xx}W_{xx}^{-1}K_{xx}
W_{xx} := \texttt{Woodbury inv}
$$
"""
if self._covariance is None:
#LiK, _ = dtrtrs(self.woodbury_chol, self._K, lower=1)
self._covariance = (np.atleast_3d(self._K) - np.tensordot(np.dot(np.atleast_3d(self.woodbury_inv).T, self._K), self._K, [1,0]).T).squeeze()
#self._covariance = self._K - self._K.dot(self.woodbury_inv).dot(self._K)
return self._covariance
@property
def precision(self):
"""
Inverse of posterior covariance
"""
if self._precision is None:
cov = np.atleast_3d(self.covariance)
self._precision = np.zeros(cov.shape) # if one covariance per dimension
for p in range(cov.shape[-1]):
self._precision[:,:,p] = pdinv(cov[:,:,p])[0]
return self._precision
@property
def woodbury_chol(self):
"""
return $L_{W}$ where L is the lower triangular Cholesky decomposition of the Woodbury matrix
$$
L_{W}L_{W}^{\top} = W^{-1}
W^{-1} := \texttt{Woodbury inv}
$$
"""
if self._woodbury_chol is None:
#compute woodbury chol from
if self._woodbury_inv is not None:
winv = np.atleast_3d(self._woodbury_inv)
self._woodbury_chol = np.zeros(winv.shape)
for p in range(winv.shape[-1]):
self._woodbury_chol[:,:,p] = pdinv(winv[:,:,p])[2]
#Li = jitchol(self._woodbury_inv)
#self._woodbury_chol, _ = dtrtri(Li)
#W, _, _, _, = pdinv(self._woodbury_inv)
#symmetrify(W)
#self._woodbury_chol = jitchol(W)
#try computing woodbury chol from cov
elif self._covariance is not None:
raise NotImplementedError("TODO: check code here")
B = self._K - self._covariance
tmp, _ = dpotrs(self.K_chol, B)
self._woodbury_inv, _ = dpotrs(self.K_chol, tmp.T)
_, _, self._woodbury_chol, _ = pdinv(self._woodbury_inv)
else:
raise ValueError("insufficient information to compute posterior")
return self._woodbury_chol
@property
def woodbury_inv(self):
"""
The inverse of the woodbury matrix, in the gaussian likelihood case it is defined as
$$
(K_{xx} + \Sigma_{xx})^{-1}
\Sigma_{xx} := \texttt{Likelihood.variance / Approximate likelihood covariance}
$$
"""
if self._woodbury_inv is None:
if self._woodbury_chol is not None:
self._woodbury_inv, _ = dpotri(self._woodbury_chol, lower=1)
#self._woodbury_inv, _ = dpotrs(self.woodbury_chol, np.eye(self.woodbury_chol.shape[0]), lower=1)
symmetrify(self._woodbury_inv)
elif self._covariance is not None:
B = np.atleast_3d(self._K) - np.atleast_3d(self._covariance)
self._woodbury_inv = np.empty_like(B)
for i in range(B.shape[-1]):
tmp, _ = dpotrs(self.K_chol, B[:,:,i])
self._woodbury_inv[:,:,i], _ = dpotrs(self.K_chol, tmp.T)
return self._woodbury_inv
@property
def woodbury_vector(self):
"""
Woodbury vector in the gaussian likelihood case only is defined as
$$
(K_{xx} + \Sigma)^{-1}Y
\Sigma := \texttt{Likelihood.variance / Approximate likelihood covariance}
$$
"""
if self._woodbury_vector is None:
self._woodbury_vector, _ = dpotrs(self.K_chol, self.mean - self._prior_mean)
return self._woodbury_vector
@property
def K_chol(self):
"""
Cholesky of the prior covariance K
"""
if self._K_chol is None:
self._K_chol = jitchol(self._K)
return self._K_chol
def _raw_predict(self, kern, Xnew, pred_var, full_cov=False):
woodbury_vector = self.woodbury_vector
woodbury_inv = self.woodbury_inv
if not isinstance(Xnew, VariationalPosterior):
Kx = kern.K(pred_var, Xnew)
mu = np.dot(Kx.T, woodbury_vector)
if len(mu.shape)==1:
mu = mu.reshape(-1,1)
if full_cov:
Kxx = kern.K(Xnew)
if woodbury_inv.ndim == 2:
var = Kxx - np.dot(Kx.T, np.dot(woodbury_inv, Kx))
elif woodbury_inv.ndim == 3: # Missing data
var = np.empty((Kxx.shape[0],Kxx.shape[1],woodbury_inv.shape[2]))
from ...util.linalg import mdot
for i in range(var.shape[2]):
var[:, :, i] = (Kxx - mdot(Kx.T, woodbury_inv[:, :, i], Kx))
var = var
else:
Kxx = kern.Kdiag(Xnew)
if woodbury_inv.ndim == 2:
var = (Kxx - np.sum(np.dot(woodbury_inv.T, Kx) * Kx, 0))[:,None]
elif woodbury_inv.ndim == 3: # Missing data
var = np.empty((Kxx.shape[0],woodbury_inv.shape[2]))
for i in range(var.shape[1]):
var[:, i] = (Kxx - (np.sum(np.dot(woodbury_inv[:, :, i].T, Kx) * Kx, 0)))
var = var
else:
psi0_star = kern.psi0(pred_var, Xnew)
psi1_star = kern.psi1(pred_var, Xnew)
psi2_star = kern.psi2n(pred_var, Xnew)
la = woodbury_vector
mu = np.dot(psi1_star, la) # TODO: dimensions?
N,M,D = psi0_star.shape[0],psi1_star.shape[1], la.shape[1]
if full_cov:
raise NotImplementedError("Full covariance for Sparse GP predicted with uncertain inputs not implemented yet.")
var = np.zeros((Xnew.shape[0], la.shape[1], la.shape[1]))
di = np.diag_indices(la.shape[1])
else:
tmp = psi2_star - psi1_star[:,:,None]*psi1_star[:,None,:]
var = (tmp.reshape(-1,M).dot(la).reshape(N,M,D)*la[None,:,:]).sum(1) + psi0_star[:,None]
if woodbury_inv.ndim==2:
var += -psi2_star.reshape(N,-1).dot(woodbury_inv.flat)[:,None]
else:
var += -psi2_star.reshape(N,-1).dot(woodbury_inv.reshape(-1,D))
var = np.clip(var,1e-15,np.inf)
return mu, var
class PosteriorExact(Posterior):
def _raw_predict(self, kern, Xnew, pred_var, full_cov=False):
Kx = kern.K(pred_var, Xnew)
mu = np.dot(Kx.T, self.woodbury_vector)
if len(mu.shape)==1:
mu = mu.reshape(-1,1)
if full_cov:
Kxx = kern.K(Xnew)
if self._woodbury_chol.ndim == 2:
tmp = dtrtrs(self._woodbury_chol, Kx)[0]
var = Kxx - tdot(tmp.T)
elif self._woodbury_chol.ndim == 3: # Missing data
var = np.empty((Kxx.shape[0],Kxx.shape[1],self._woodbury_chol.shape[2]))
for i in range(var.shape[2]):
tmp = dtrtrs(self._woodbury_chol[:,:,i], Kx)[0]
var[:, :, i] = (Kxx - tdot(tmp.T))
var = var
else:
Kxx = kern.Kdiag(Xnew)
if self._woodbury_chol.ndim == 2:
tmp = dtrtrs(self._woodbury_chol, Kx)[0]
var = (Kxx - np.square(tmp).sum(0))[:,None]
elif self._woodbury_chol.ndim == 3: # Missing data
var = np.empty((Kxx.shape[0],self._woodbury_chol.shape[2]))
for i in range(var.shape[1]):
tmp = dtrtrs(self._woodbury_chol[:,:,i], Kx)[0]
var[:, i] = (Kxx - np.square(tmp).sum(0))
var = var
return mu, var
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from collections import OrderedDict, namedtuple
import inspect
import functools
import os
import re
import sys
import types
import warnings
from tfi.base import _GetAttrAccumulator
from tfi.parse.docstring import GoogleDocstring
from tfi.driver.pytorch.tensor_codec import as_tensor
import torch
def _resolve_instance_method_tensors(instance, fn):
def _expand_annotation(instance, annotation, default=None):
if annotation == inspect.Signature.empty:
return default
return annotation(instance) if isinstance(annotation, _GetAttrAccumulator) else annotation
def _expand_annotation_dict(instance, annotation, default=None):
if annotation == inspect.Signature.empty:
return default
return {
k: v(instance) if isinstance(v, _GetAttrAccumulator) else v
for k, v in annotation.items()
}
def _tensor_info_str(tensor):
shape_list = tensor['shape']
ndims = len(shape_list)
dtype = tensor.get('dtype', None)
if dtype is None:
dtype_name = 'any'
elif isinstance(dtype, type):
dtype_name = dtype.__name__
else:
dtype_name = str(dtype)
if ndims is None:
return "%s ?" % dtype_name
if len(shape_list) == 0:
shape = "scalar"
else:
shape = "<%s>" % (
", ".join(["?" if n is None else str(n) for n in shape_list]),
)
return "%s %s" % (dtype_name, shape)
def _enrich_docs(doc_fields, tensor_dict):
existing = {k: v for k, _, v in doc_fields}
return [
(
name,
_tensor_info_str(tensor_dict[name]) if name in tensor_dict else '',
existing.get(name, '')
)
for name in set([*tensor_dict.keys(), *existing.keys()])
]
sig = inspect.signature(fn)
input_annotations = OrderedDict([
(name, _expand_annotation_dict(instance, param.annotation))
for name, param in sig.parameters.items()
])
output_annotations = OrderedDict([
(name, _expand_annotation_dict(instance, value))
for name, value in _expand_annotation(instance, sig.return_annotation, {}).items()
])
if fn.__doc__:
doc = GoogleDocstring(obj=fn).result()
else:
doc = {'sections': [], 'args': {}, 'returns': {}}
doc['args'] = _enrich_docs(doc['args'], input_annotations)
doc['returns'] = _enrich_docs(doc['returns'], output_annotations)
return doc, input_annotations, output_annotations
def _make_method(signature_def, existing):
input_tensor_names = signature_def['inputs'].keys()
def session_handle_for(value, signature_def_input):
if isinstance(value, float):
pass
else:
value = as_tensor(value, signature_def_input['shape'], signature_def_input.get('dtype', None))
if 'transform' in signature_def_input:
value = signature_def_input['transform'](value)
if signature_def_input.get('dtype', None) in (int,):
return value
return torch.autograd.Variable(value, volatile=True)
def _impl(self, **kwargs):
return existing(**{
input_name: session_handle_for(kwargs[input_name], input_d)
for input_name, input_d in signature_def['inputs'].items()
})
# Need to properly forge method parameters, complete with annotations.
argdef = ",".join(["_", *input_tensor_names])
argcall = ",".join(["_", *["%s=%s" % (k, k) for k in input_tensor_names]])
gensrc = """lambda %s: _impl(%s)""" % (argdef, argcall)
impl = eval(gensrc, {'_impl': _impl})
sigdef_inputs = signature_def['inputs']
impl.__annotations__ = {
k: sigdef_inputs[k]
for k, p in inspect.signature(impl).parameters.items()
if k in sigdef_inputs
}
return impl
class Meta(type):
@staticmethod
def __new__(meta, classname, bases, d):
if '__tfi_del__' in d:
for name in d['__tfi_del__']:
del d[name]
del d['__tfi_del__']
if '__init__' in d:
init = d['__init__']
# Wrap __init__ to auto adapt inputs.
@functools.wraps(init)
def wrapped_init(self, *a, **k):
init(self, *a, **k)
# Once init has executed, we can bind proper methods too!
if not hasattr(self, '__tfi_signature_defs__'):
self.__tfi_signature_defs__ = OrderedDict()
self.__tfi_signature_defs_docs__ = OrderedDict()
for method_name, method in inspect.getmembers(self, predicate=inspect.ismethod):
if method_name.startswith('_'):
continue
doc, input_annotations, output_annotations = _resolve_instance_method_tensors(self, method)
self.__tfi_signature_defs_docs__[method_name] = doc
self.__tfi_signature_defs__[method_name] = dict(
inputs=input_annotations,
outputs=output_annotations)
# Remember which fields to pickle BEFORE we add methods.
if not hasattr(self, '__getstate__'):
self.__tfi_saved_fields__ = list(self.__dict__.keys())
self.__getstate__ = lambda: {k: getattr(self, k) for k in self.__tfi_saved_fields__}
self.__tfi_init__()
d['__init__'] = wrapped_init
return super(Meta, meta).__new__(meta, classname, bases, d)
@staticmethod
def __prepare__(name, bases):
def input_name_decorator(name, **kwargs):
def install_annotation(fn):
# TODO(adamb) Should blow up if unknown/invalid kwargs are given.
# TODO(adamb) Should blow up if kwargs are repeated.
# TODO(adamb) Should blow up if there is no such argument
fn.__annotations__[name] = kwargs
return fn
return install_annotation
def output_name_decorator(name, **kwargs):
def install_annotation(fn):
if 'return' not in fn.__annotations__:
fn.__annotations__['return'] = {}
fn.__annotations__['return'][name] = kwargs
return fn
return install_annotation
d = OrderedDict({
'tfi_input': input_name_decorator,
'tfi_output': output_name_decorator,
'self': _GetAttrAccumulator(),
})
# NOTE(adamb) Remember to delete all of these! Every item in here is
# essentially "reserved" and can't be used as a method name in a
# SavedModel. Expand it with caution.
d['__tfi_del__'] = list(d.keys())
return d
class Base(object, metaclass=Meta):
def __tfi_init__(self):
for method_name, sigdef in self.__tfi_signature_defs__.items():
setattr(self,
method_name,
types.MethodType(
_make_method(sigdef, getattr(self, method_name)),
self))
def __setstate__(self, state):
for k, v in state.items():
setattr(self, k, v)
self.__tfi_init__()
from tfi.driver.pytorch.load import persistent_load
from tfi.driver.pytorch import kosher as _kosher
def dump(export_path, model):
from tfi.doc import record_documentation
record_documentation(model)
pickle_module = _kosher.PickleModule(lambda m: m.startswith('zoo.'))
pickle_module.persistent_load = persistent_load
with open(export_path, "w+b") as f:
torch.save(model, f, pickle_module=pickle_module)
def load(import_path):
with open(import_path, "rb") as f:
return torch.load(f, pickle_module=_kosher.PickleModule(lambda x: False))
# Compatibility
export = dump
as_class = load
|
|
# vim: fileencoding=utf8:et:sw=4:ts=8:sts=4
import os
import sys
import unittest
import datahog
from datahog import error
import psycopg2
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import base
from pgmock import *
class RelationshipTests(base.TestCase):
def setUp(self):
super(RelationshipTests, self).setUp()
datahog.set_context(1, datahog.NODE)
datahog.set_context(2, datahog.NODE,
{'base_ctx': 1, 'storage': datahog.storage.INT})
datahog.set_context(3, datahog.RELATIONSHIP, {
'base_ctx': 1, 'rel_ctx': 2})
def test_create(self):
add_fetch_result([(1,)])
add_fetch_result([(1,)])
self.assertEqual(
datahog.relationship.create(self.p, 3, 123, 456),
True)
self.assertEqual(eventlog, [
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
insert into relationship (base_id, rel_id, ctx, forward, pos, flags)
select %s, %s, %s, %s, (
select count(*)
from relationship
where
time_removed is null
and base_id=%s
and ctx=%s
and forward=%s
), %s
where exists (
select 1
from node
where
time_removed is null
and id=%s
and ctx=%s
)
returning 1
""", (123, 456, 3, True, 123, 3, True, 0, 123, 1)),
ROWCOUNT,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
insert into relationship (base_id, rel_id, ctx, forward, pos, flags)
select %s, %s, %s, %s, (
select count(*)
from relationship
where
time_removed is null
and rel_id=%s
and ctx=%s
and forward=%s
), %s
where exists (
select 1
from node
where
time_removed is null
and id=%s
and ctx=%s
)
returning 1
""", (123, 456, 3, False, 456, 3, False, 0, 456, 2)),
ROWCOUNT,
COMMIT,
TPC_COMMIT])
def test_create_failure_noobject_forward(self):
add_fetch_result([])
self.assertRaises(error.NoObject,
datahog.relationship.create, self.p, 3, 123, 456)
self.assertEqual(eventlog, [
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
insert into relationship (base_id, rel_id, ctx, forward, pos, flags)
select %s, %s, %s, %s, (
select count(*)
from relationship
where
time_removed is null
and base_id=%s
and ctx=%s
and forward=%s
), %s
where exists (
select 1
from node
where
time_removed is null
and id=%s
and ctx=%s
)
returning 1
""", (123, 456, 3, True, 123, 3, True, 0, 123, 1)),
ROWCOUNT,
TPC_ROLLBACK])
def test_create_failure_noobject_reverse(self):
add_fetch_result([(1,)])
add_fetch_result([])
self.assertRaises(error.NoObject,
datahog.relationship.create, self.p, 3, 123, 456)
self.assertEqual(eventlog, [
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
insert into relationship (base_id, rel_id, ctx, forward, pos, flags)
select %s, %s, %s, %s, (
select count(*)
from relationship
where
time_removed is null
and base_id=%s
and ctx=%s
and forward=%s
), %s
where exists (
select 1
from node
where
time_removed is null
and id=%s
and ctx=%s
)
returning 1
""", (123, 456, 3, True, 123, 3, True, 0, 123, 1)),
ROWCOUNT,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
insert into relationship (base_id, rel_id, ctx, forward, pos, flags)
select %s, %s, %s, %s, (
select count(*)
from relationship
where
time_removed is null
and rel_id=%s
and ctx=%s
and forward=%s
), %s
where exists (
select 1
from node
where
time_removed is null
and id=%s
and ctx=%s
)
returning 1
""", (123, 456, 3, False, 456, 3, False, 0, 456, 2)),
ROWCOUNT,
ROLLBACK,
TPC_ROLLBACK])
def test_create_failure_duplicate(self):
query_fail(psycopg2.IntegrityError)
self.assertEqual(
datahog.relationship.create(self.p, 3, 123, 456),
False)
self.assertEqual(eventlog, [
TPC_BEGIN,
GET_CURSOR,
EXECUTE_FAILURE("""
insert into relationship (base_id, rel_id, ctx, forward, pos, flags)
select %s, %s, %s, %s, (
select count(*)
from relationship
where
time_removed is null
and base_id=%s
and ctx=%s
and forward=%s
), %s
where exists (
select 1
from node
where
time_removed is null
and id=%s
and ctx=%s
)
returning 1
""", (123, 456, 3, True, 123, 3, True, 0, 123, 1)),
TPC_ROLLBACK])
def test_create_with_positions(self):
add_fetch_result([(1,)])
add_fetch_result([(1,)])
self.assertEqual(
datahog.relationship.create(self.p, 3, 123, 456, 4, 5),
True)
self.assertEqual(eventlog, [
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
with eligible as (
select 1
from node
where
time_removed is null
and id=%s
and ctx=%s
), bump as (
update relationship
set pos=pos + 1
where
exists (select 1 from eligible)
and time_removed is null
and forward=%s
and base_id=%s
and ctx=%s
and pos >= %s
)
insert into relationship (base_id, rel_id, ctx, forward, pos, flags)
select %s, %s, %s, %s, %s, %s
where exists (select 1 from eligible)
returning 1
""", (123, 1, True, 123, 3, 4, 123, 456, 3, True, 4, 0)),
ROWCOUNT,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
with eligible as (
select 1
from node
where
time_removed is null
and id=%s
and ctx=%s
), bump as (
update relationship
set pos=pos + 1
where
exists (select 1 from eligible)
and time_removed is null
and forward=%s
and rel_id=%s
and ctx=%s
and pos >= %s
)
insert into relationship (base_id, rel_id, ctx, forward, pos, flags)
select %s, %s, %s, %s, %s, %s
where exists (select 1 from eligible)
returning 1
""", (456, 2, False, 456, 3, 5, 123, 456, 3, False, 5, 0)),
ROWCOUNT,
COMMIT,
TPC_COMMIT])
def test_list_forwards(self):
add_fetch_result([(456, 0, 0), (457, 0, 1), (458, 0, 2), (459, 0, 3)])
self.assertEqual(
datahog.relationship.list(self.p, 123, 3),
([
{'ctx': 3, 'base_id': 123, 'rel_id': 456, 'flags': set([])},
{'ctx': 3, 'base_id': 123, 'rel_id': 457, 'flags': set([])},
{'ctx': 3, 'base_id': 123, 'rel_id': 458, 'flags': set([])},
{'ctx': 3, 'base_id': 123, 'rel_id': 459, 'flags': set([])},
], 4))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select rel_id, flags, pos
from relationship
where
time_removed is null
and base_id=%s
and ctx=%s
and forward=%s
and pos >= %s
order by pos asc
limit %s
""", (123, 3, True, 0, 100)),
FETCH_ALL,
COMMIT])
def test_list_reverse(self):
add_fetch_result([(123, 0, 0), (124, 0, 1), (125, 0, 2), (126, 0, 3)])
self.assertEqual(
datahog.relationship.list(self.p, 456, 3, False),
([
{'ctx': 3, 'base_id': 123, 'rel_id': 456, 'flags': set([])},
{'ctx': 3, 'base_id': 124, 'rel_id': 456, 'flags': set([])},
{'ctx': 3, 'base_id': 125, 'rel_id': 456, 'flags': set([])},
{'ctx': 3, 'base_id': 126, 'rel_id': 456, 'flags': set([])},
], 4))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags, pos
from relationship
where
time_removed is null
and rel_id=%s
and ctx=%s
and forward=%s
and pos >= %s
order by pos asc
limit %s
""", (456, 3, False, 0, 100)),
FETCH_ALL,
COMMIT])
def test_get_success(self):
add_fetch_result([(456, 0, 7)])
self.assertEqual(
datahog.relationship.get(self.p, 3, 123, 456),
{'ctx': 3, 'base_id': 123, 'rel_id': 456, 'flags': set([])})
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select rel_id, flags, pos
from relationship
where
time_removed is null
and base_id=%s
and ctx=%s
and forward=%s
and pos >= %s
and rel_id=%s
order by pos asc
limit %s
""", (123, 3, True, 0, 456, 1)),
FETCH_ALL,
COMMIT])
def test_get_failure(self):
add_fetch_result([])
self.assertEqual(
datahog.relationship.get(self.p, 3, 123, 456),
None)
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select rel_id, flags, pos
from relationship
where
time_removed is null
and base_id=%s
and ctx=%s
and forward=%s
and pos >= %s
and rel_id=%s
order by pos asc
limit %s
""", (123, 3, True, 0, 456, 1)),
FETCH_ALL,
COMMIT])
def test_add_flags(self):
datahog.set_flag(1, 3)
datahog.set_flag(2, 3)
datahog.set_flag(3, 3)
add_fetch_result([(5,)])
add_fetch_result([(5,)])
self.assertEqual(
datahog.relationship.set_flags(self.p, 123, 456, 3, [1, 3], []),
set([1, 3]))
self.assertEqual(eventlog, [
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update relationship
set flags=flags | %s
where time_removed is null and forward=%s and rel_id=%s and ctx=%s and base_id=%s
returning flags
""", (5, True, 456, 3, 123)),
FETCH_ALL,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update relationship
set flags=flags | %s
where time_removed is null and forward=%s and rel_id=%s and ctx=%s and base_id=%s
returning flags
""", (5, False, 456, 3, 123)),
FETCH_ALL,
COMMIT,
TPC_COMMIT])
def test_add_flags_no_rel(self):
datahog.set_flag(1, 3)
datahog.set_flag(2, 3)
datahog.set_flag(3, 3)
add_fetch_result([])
self.assertEqual(
datahog.relationship.set_flags(self.p, 123, 456, 3, [1, 3], []),
None)
def test_clear_flags(self):
datahog.set_flag(1, 3)
datahog.set_flag(2, 3)
datahog.set_flag(3, 3)
add_fetch_result([(2,)])
add_fetch_result([(2,)])
self.assertEqual(
datahog.relationship.set_flags(self.p, 123, 456, 3, [], [1, 3]),
set([2]))
self.assertEqual(eventlog, [
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update relationship
set flags=flags & ~%s
where time_removed is null and forward=%s and rel_id=%s and ctx=%s and base_id=%s
returning flags
""", (5, True, 456, 3, 123)),
FETCH_ALL,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update relationship
set flags=flags & ~%s
where time_removed is null and forward=%s and rel_id=%s and ctx=%s and base_id=%s
returning flags
""", (5, False, 456, 3, 123)),
FETCH_ALL,
COMMIT,
TPC_COMMIT])
def test_clear_flags_no_rel(self):
datahog.set_flag(1, 3)
datahog.set_flag(2, 3)
datahog.set_flag(3, 3)
add_fetch_result([])
self.assertEqual(
datahog.relationship.set_flags(self.p, 123, 456, 3, [], [1, 3]),
None)
def test_set_flags_add(self):
datahog.set_flag(1, 3)
datahog.set_flag(2, 3)
datahog.set_flag(3, 3)
add_fetch_result([(5,)])
add_fetch_result([(5,)])
self.assertEqual(
datahog.relationship.set_flags(self.p, 123, 456, 3, [1, 3], []),
set([1, 3]))
self.assertEqual(eventlog, [
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update relationship
set flags=flags | %s
where time_removed is null and forward=%s and rel_id=%s and ctx=%s and base_id=%s
returning flags
""", (5, True, 456, 3, 123)),
FETCH_ALL,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update relationship
set flags=flags | %s
where time_removed is null and forward=%s and rel_id=%s and ctx=%s and base_id=%s
returning flags
""", (5, False, 456, 3, 123)),
FETCH_ALL,
COMMIT,
TPC_COMMIT])
def test_set_flags_clear(self):
datahog.set_flag(1, 3)
datahog.set_flag(2, 3)
datahog.set_flag(3, 3)
add_fetch_result([(4,)])
add_fetch_result([(4,)])
self.assertEqual(
datahog.relationship.set_flags(self.p, 123, 456, 3, [], [1, 2]),
set([3]))
self.assertEqual(eventlog, [
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update relationship
set flags=flags & ~%s
where time_removed is null and forward=%s and rel_id=%s and ctx=%s and base_id=%s
returning flags
""", (3, True, 456, 3, 123)),
FETCH_ALL,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update relationship
set flags=flags & ~%s
where time_removed is null and forward=%s and rel_id=%s and ctx=%s and base_id=%s
returning flags
""", (3, False, 456, 3, 123)),
FETCH_ALL,
COMMIT,
TPC_COMMIT])
def test_set_flags_both(self):
datahog.set_flag(1, 3)
datahog.set_flag(2, 3)
datahog.set_flag(3, 3)
add_fetch_result([(5,)])
add_fetch_result([(5,)])
self.assertEqual(
datahog.relationship.set_flags(self.p, 123, 456, 3, [1, 3], [2]),
set([1, 3]))
self.assertEqual(eventlog, [
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update relationship
set flags=(flags & ~%s) | %s
where time_removed is null and forward=%s and rel_id=%s and ctx=%s and base_id=%s
returning flags
""", (2, 5, True, 456, 3, 123)),
FETCH_ALL,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update relationship
set flags=(flags & ~%s) | %s
where time_removed is null and forward=%s and rel_id=%s and ctx=%s and base_id=%s
returning flags
""", (2, 5, False, 456, 3, 123)),
FETCH_ALL,
COMMIT,
TPC_COMMIT])
def test_shift(self):
add_fetch_result([(True,)])
self.assertEqual(
datahog.relationship.shift(self.p, 123, 456, 3, True, 7),
True)
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
with oldpos as (
select pos
from relationship
where
time_removed is null
and forward=%s
and base_id=%s
and ctx=%s
and rel_id=%s
), bump as (
update relationship
set pos=pos + (case
when (select pos from oldpos) < pos
then -1
else 1
end)
where
exists (select 1 from oldpos)
and time_removed is null
and forward=%s
and base_id=%s
and ctx=%s
and pos between symmetric (select pos from oldpos) and %s
returning 1
), move as (
update relationship
set pos=%s
where
time_removed is null
and forward=%s
and base_id=%s
and ctx=%s
and rel_id=%s
returning 1
)
select exists (select 1 from move)
""", (True, 123, 3, 456, True, 123, 3, 7, 7, True, 123, 3, 456)),
FETCH_ONE,
COMMIT])
def test_shift_failure(self):
add_fetch_result([(False,)])
self.assertEqual(
datahog.relationship.shift(self.p, 123, 456, 3, True, 7),
False)
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
with oldpos as (
select pos
from relationship
where
time_removed is null
and forward=%s
and base_id=%s
and ctx=%s
and rel_id=%s
), bump as (
update relationship
set pos=pos + (case
when (select pos from oldpos) < pos
then -1
else 1
end)
where
exists (select 1 from oldpos)
and time_removed is null
and forward=%s
and base_id=%s
and ctx=%s
and pos between symmetric (select pos from oldpos) and %s
returning 1
), move as (
update relationship
set pos=%s
where
time_removed is null
and forward=%s
and base_id=%s
and ctx=%s
and rel_id=%s
returning 1
)
select exists (select 1 from move)
""", (True, 123, 3, 456, True, 123, 3, 7, 7, True, 123, 3, 456)),
FETCH_ONE,
COMMIT])
def test_remove(self):
add_fetch_result([(1,)])
add_fetch_result([(1,)])
self.assertEqual(
datahog.relationship.remove(self.p, 123, 456, 3),
True)
self.assertEqual(eventlog, [
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
with removal as (
update relationship
set time_removed=now()
where
time_removed is null
and base_id=%s
and ctx=%s
and forward=%s
and rel_id=%s
returning pos
), bump as (
update relationship
set pos = pos - 1
where
exists (select 1 from removal)
and time_removed is null
and base_id=%s
and ctx=%s
and forward=%s
and pos > (select pos from removal)
)
select 1 from removal
""", (123, 3, True, 456, 123, 3, True)),
ROWCOUNT,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
with removal as (
update relationship
set time_removed=now()
where
time_removed is null
and base_id=%s
and ctx=%s
and forward=%s
and rel_id=%s
returning pos
), bump as (
update relationship
set pos = pos - 1
where
exists (select 1 from removal)
and time_removed is null
and rel_id=%s
and ctx=%s
and forward=%s
and pos > (select pos from removal)
)
select 1 from removal
""", (123, 3, False, 456, 456, 3, False)),
ROWCOUNT,
COMMIT,
TPC_COMMIT])
def test_remove_failure_forward(self):
add_fetch_result([])
self.assertEqual(
datahog.relationship.remove(self.p, 123, 456, 3),
False)
self.assertEqual(eventlog, [
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
with removal as (
update relationship
set time_removed=now()
where
time_removed is null
and base_id=%s
and ctx=%s
and forward=%s
and rel_id=%s
returning pos
), bump as (
update relationship
set pos = pos - 1
where
exists (select 1 from removal)
and time_removed is null
and base_id=%s
and ctx=%s
and forward=%s
and pos > (select pos from removal)
)
select 1 from removal
""", (123, 3, True, 456, 123, 3, True)),
ROWCOUNT,
TPC_ROLLBACK])
def test_remove_failure_reverse(self):
add_fetch_result([(1,)])
add_fetch_result([])
self.assertEqual(
datahog.relationship.remove(self.p, 123, 456, 3),
False)
self.assertEqual(eventlog, [
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
with removal as (
update relationship
set time_removed=now()
where
time_removed is null
and base_id=%s
and ctx=%s
and forward=%s
and rel_id=%s
returning pos
), bump as (
update relationship
set pos = pos - 1
where
exists (select 1 from removal)
and time_removed is null
and base_id=%s
and ctx=%s
and forward=%s
and pos > (select pos from removal)
)
select 1 from removal
""", (123, 3, True, 456, 123, 3, True)),
ROWCOUNT,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
with removal as (
update relationship
set time_removed=now()
where
time_removed is null
and base_id=%s
and ctx=%s
and forward=%s
and rel_id=%s
returning pos
), bump as (
update relationship
set pos = pos - 1
where
exists (select 1 from removal)
and time_removed is null
and rel_id=%s
and ctx=%s
and forward=%s
and pos > (select pos from removal)
)
select 1 from removal
""", (123, 3, False, 456, 456, 3, False)),
ROWCOUNT,
ROLLBACK,
TPC_ROLLBACK])
if __name__ == '__main__':
unittest.main()
|
|
import UserDict
from pygr import annotation, seqdb, sequence, sqlgraph, worldbase
from pygr.classutil import read_only_error
class UCSCStrandDescr(object):
def __get__(self, obj, objtype):
if obj.strand == '+':
return 1
else:
return -1
class UCSCSeqIntervalRow(sqlgraph.TupleO):
orientation = UCSCStrandDescr()
class UCSCEnsemblInterface(object):
'package of gene, transcript, exon, protein interfaces to UCSC/Ensembl'
def __init__(self, ucsc_genome_name, ens_species=None,
ucsc_serverInfo=None, ens_serverInfo=None,
ens_db=None, trackVersion='hgFixed.trackVersion'):
'''Construct interfaces to UCSC/Ensembl annotation databases.
ucsc_genome_name must be a worldbase ID specifying a UCSC genome.
naming convention.
ens_species should be the Ensembl database name (generally
the name of the species). If not specified, we will try
to autodetect it based on ucsc_genome_name.
The interface uses the standard UCSC and Ensembl mysql servers
by default, unless you provide serverInfo argument(s).
trackVersion must be the fully qualified MySQL table name
of the trackVersion table containing information about the
Ensembl version that each genome dataset connects to.'''
# Connect to both servers and prepare database names.
if ucsc_serverInfo is not None:
if isinstance(ucsc_serverInfo, str): # treat as worldbase ID
self.ucsc_server = worldbase(ucsc_serverInfo)
else:
self.ucsc_server = ucsc_serverInfo
else:
self.ucsc_server = sqlgraph.DBServerInfo(
host='genome-mysql.cse.ucsc.edu', user='genome')
if ens_serverInfo is not None:
if isinstance(ens_serverInfo, str): # treat as worldbase ID
self.ens_server = worldbase(ens_serverInfo)
else:
self.ens_server = ens_serverInfo
else:
self.ens_server = sqlgraph.DBServerInfo(
host='ensembldb.ensembl.org', port=5306, user='anonymous')
self.ucsc_db = ucsc_genome_name.split('.')[-1]
if ens_db is None: # auto-set ensembl database name
self.ens_db = self.get_ensembl_db_name(ens_species,
trackVersion)
else:
self.ens_db = ens_db
# Connect to all the necessary tables.
self.ucsc_ensGene_trans = sqlgraph.SQLTable('%s.ensGene' %
self.ucsc_db, serverInfo=self.ucsc_server,
primaryKey='name', itemClass=UCSCSeqIntervalRow)
self.ucsc_ensGene_gene = sqlgraph.SQLTable('%s.ensGene' %
self.ucsc_db, serverInfo=self.ucsc_server,
primaryKey='name2', allowNonUniqueID=True,
itemClass=UCSCSeqIntervalRow,
attrAlias=dict(minTxStart='min(txStart)',
maxTxEnd='max(txEnd)'))
self.ucsc_ensGtp_gene = sqlgraph.SQLTable('%s.ensGtp' %
self.ucsc_db, serverInfo=self.ucsc_server,
primaryKey='gene', allowNonUniqueID=True)
self.prot_db = sqlgraph.SQLTable('%s.ensGtp' % self.ucsc_db,
serverInfo=self.ucsc_server,
primaryKey='protein',
itemClass=EnsemblProteinRow)
self.prot_db.gRes = self
self.ucsc_ensPep = sqlgraph.SQLTable('%s.ensPep' % self.ucsc_db,
serverInfo=self.ucsc_server,
itemClass=sqlgraph.ProteinSQLSequenceCached,
itemSliceClass=seqdb.SeqDBSlice)
self.ens_exon_stable_id = sqlgraph.SQLTable('%s.exon_stable_id' %
self.ens_db, serverInfo=self.ens_server,
primaryKey='stable_id')
self.ens_transcript_stable_id = sqlgraph.SQLTable(
'%s.transcript_stable_id' % self.ens_db,
serverInfo=self.ens_server, primaryKey='stable_id')
# We will need this too.
self.genome_seq = worldbase(ucsc_genome_name)
# Finally, initialise all UCSC-Ensembl databases.
self.trans_db = annotation.AnnotationDB(self.ucsc_ensGene_trans,
self.genome_seq,
checkFirstID=False,
sliceAttrDict=dict(
id='chrom',
start='txStart',
stop='txEnd'),
itemClass=EnsemblTranscriptAnnotationSeq)
self.gene_db = annotation.AnnotationDB(self.ucsc_ensGene_gene,
self.genome_seq,
checkFirstID=False,
sliceAttrDict=dict(
id='chrom',
start='txStart',
stop='txEnd'))
exon_slicedb = EnsemblExonOnDemandSliceDB(self)
self.exon_db = annotation.AnnotationDB(exon_slicedb,
self.genome_seq,
checkFirstID=False,
sliceAttrDict=dict(id=0,
start=1, stop=2,
orientation=3))
# Mappings.
self.protein_transcript_id_map = sqlgraph.MapView(
self.prot_db, self.trans_db,
'select transcript from %s.ensGtp \
where protein=%%s' % self.ucsc_db, inverseSQL='select protein \
from %s.ensGtp where transcript=%%s' % self.ucsc_db,
serverInfo=self.ucsc_server)
self.transcripts_in_genes_map = sqlgraph.GraphView(
self.gene_db, self.trans_db,
"select transcript from %s.ensGtp where gene=%%s" % self.ucsc_db,
inverseSQL="select gene from %s.ensGtp where transcript=%%s" %
self.ucsc_db, serverInfo=self.ucsc_server)
self.ens_transcripts_of_exons_map = sqlgraph.GraphView(
self.exon_db, self.trans_db, """\
select trans.stable_id from %s.exon_stable_id exon, \
%s.transcript_stable_id trans, %s.exon_transcript et where \
exon.exon_id=et.exon_id and trans.transcript_id=et.transcript_id and \
exon.stable_id=%%s""" % (self.ens_db, self.ens_db, self.ens_db),
serverInfo=self.ens_server)
self.ens_transcripts_of_exons_map2 = sqlgraph.GraphView(
self.ens_exon_stable_id, self.trans_db, """\
select trans.stable_id from %s.exon_stable_id exon, \
%s.transcript_stable_id trans, %s.exon_transcript et where \
exon.exon_id=et.exon_id and trans.transcript_id=et.transcript_id and \
exon.stable_id=%%s""" % (self.ens_db, self.ens_db, self.ens_db),
serverInfo=self.ens_server)
self.ens_exons_in_transcripts_map = sqlgraph.GraphView(
self.trans_db, self.exon_db, """\
select exon.stable_id from %s.exon_stable_id exon, %s.transcript_stable_id \
trans, %s.exon_transcript et where exon.exon_id=et.exon_id and \
trans.transcript_id=et.transcript_id and trans.stable_id=%%s order by \
et.rank""" % (self.ens_db, self.ens_db, self.ens_db),
serverInfo=self.ens_server)
self.ens_exons_in_transcripts_map2 = sqlgraph.GraphView(
self.trans_db, self.ens_exon_stable_id, """\
select exon.stable_id from %s.exon_stable_id exon, %s.transcript_stable_id \
trans, %s.exon_transcript et where exon.exon_id=et.exon_id and \
trans.transcript_id=et.transcript_id and trans.stable_id=%%s order by \
et.rank""" % (self.ens_db, self.ens_db, self.ens_db),
serverInfo=self.ens_server)
self.trans_db.exons_map = self.ens_exons_in_transcripts_map2
def get_ensembl_db_name(self, ens_prefix, trackVersion):
'''Used by __init__(), obtains Ensembl database name matching
the specified UCSC genome version'''
ucsc_versions = sqlgraph.SQLTableMultiNoCache(trackVersion,
serverInfo=self.ucsc_server)
ucsc_versions._distinct_key = 'db'
cursor = self.ens_server.cursor()
for t in ucsc_versions[self.ucsc_db]: # search rows until success
if ens_prefix is None:
# Note: this assumes 'source' in hgFixed.trackVersion contains
# the URI of the Ensembl data set and that the last path component
# of that URI is the species name of that data set.
try:
ens_prefix1 = t.source.split('/')[-2]
except IndexError:
continue
else:
ens_prefix1 = ens_prefix
cursor.execute("show databases like '%s_core_%s_%%'"
% (ens_prefix1, t.version))
try:
return cursor.fetchall()[0][0]
except IndexError:
pass
raise KeyError(
"Genome %s doesn't exist or has got no Ensembl data at UCSC" %
self.ucsc_db)
def get_gene_transcript_ids(self, gene_id):
'''Obtain a list of stable IDs of transcripts associated
with the specified gene.'''
matching_edges = self.transcripts_in_genes_map[
self.ucsc_ensGtp_gene[gene_id]]
ids = []
for transcript in matching_edges.keys():
ids.append(transcript.name)
return ids
def get_annot_db(self, table, primaryKey='name',
sliceAttrDict=dict(id='chrom', start='chromStart',
stop='chromEnd')):
'''generic method to obtain an AnnotationDB for any
annotation table in UCSC, e.g. snp130. If your target table
has non-standard name, start, end columns, specify them in
the primaryKey and sliceAttrDict args.
Saves table as named attribute on this package object.'''
try: # return existing db if already cached here
return getattr(self, table)
except AttributeError:
pass
sliceDB = sqlgraph.SQLTable(self.ucsc_db + '.' + table,
primaryKey=primaryKey,
serverInfo=self.ucsc_server,
itemClass=UCSCSeqIntervalRow)
annoDB = annotation.AnnotationDB(sliceDB, self.genome_seq,
checkFirstID=False,
sliceAttrDict=sliceAttrDict)
setattr(self, table, annoDB) # cache this db on named attribute
return annoDB
class EnsemblTranscriptAnnotationSeqDescr(object):
def __init__(self, attr):
self.attr = attr
def __get__(self, obj, objtype):
'''Concatenate exon sequences of a transcript to obtain
its sequence.'''
exon_count = obj.exonCount
exon_starts = obj.exonStarts.split(',')[:exon_count]
exon_ends = obj.exonEnds.split(',')[:exon_count]
trans_seq = ''
for i in range(0, exon_count):
trans_seq += str(sequence.absoluteSlice(obj._anno_seq,
int(exon_starts[i]),
int(exon_ends[i])))
seq = sequence.Sequence(trans_seq, obj.name)
setattr(obj, self.attr, seq) # cache on object
return seq
class EnsemblTranscriptAnnotationSeq(annotation.AnnotationSeq):
'''An AnnotationSeq class for transcript annotations, implementing
custom 'mrna_sequence' property.'''
mrna_sequence = EnsemblTranscriptAnnotationSeqDescr('mrna_sequence')
def get_exon_slices(self):
'''Parse the provided transcript, extract exon data from it
and return it as a dictionary of slices.'''
chromosome = self.chrom
exon_count = self.exonCount
exon_starts = self.exonStarts.split(',')[:exon_count]
exon_ends = self.exonEnds.split(',')[:exon_count]
exons = {}
exon_ids = self.get_ensembl_exon_ids()
for i in range(exon_count):
exons[exon_ids[i]] = (chromosome, exon_starts[i], exon_ends[i],
self.orientation)
return exons
def get_ensembl_exon_ids(self):
'''Obtain a list of stable IDs of exons associated with the
specified transcript, ordered by rank.'''
matching_edges = self.db.exons_map[self]
return [exon.stable_id for exon in matching_edges.keys()]
class EnsemblProteinSeqDescr(object):
def __init__(self, attr):
self.attr = attr
def __get__(self, obj, objtype):
transcript = obj.db.gRes.protein_transcript_id_map[obj]
pep = obj.db.gRes.ucsc_ensPep[transcript.name]
seq = sequence.Sequence(str(pep), obj.id)
setattr(obj, self.attr, seq) # cache on object
return seq
class EnsemblProteinRow(sqlgraph.TupleO):
sequence = EnsemblProteinSeqDescr('sequence')
def __repr__(self):
return str(self.id)
class EnsemblExonOnDemandSliceDB(object, UserDict.DictMixin):
'''Obtains exon info on demand by looking up associated transcript '''
def __init__(self, gRes):
self.data = {}
self.gRes = gRes
def __getitem__(self, k):
try:
return self.data[k]
except KeyError:
# Not cached yet, extract the exon from transcript data.
transcripts = self.gRes.ens_transcripts_of_exons_map2[
self.gRes.ens_exon_stable_id[k]].keys()
self.data.update(transcripts[0].get_exon_slices())
# Cache whole transcript interval to speed sequence access
self.gRes.genome_seq.cacheHint({transcripts[0].id:
(transcripts[0].txStart,
transcripts[0].txEnd)},
transcripts[0])
return self.data[k]
__setitem__ = __delitem__ = read_only_error # Throws an exception
def keys(self): # mirror iterator methods from exon stable ID table
return self.gRes.ens_exon_stable_id.keys()
def __iter__(self):
return iter(self.gRes.ens_exon_stable_id)
def __len__(self):
return len(self.gRes.ens_exon_stable_id)
|
|
"""
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric)
from .deprecation import deprecated
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
from ..exceptions import ConvergenceWarning as _ConvergenceWarning
from ..exceptions import DataConversionWarning
@deprecated("ConvergenceWarning has been moved into the sklearn.exceptions "
"module. It will not be available here from version 0.19")
class ConvergenceWarning(_ConvergenceWarning):
pass
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric"]
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def axis0_safe_slice(X, mask, len_mask):
"""
This mask is safer than safe_mask since it returns an
empty array, when a sparse matrix is sliced with a boolean mask
with all False, instead of raising an unhelpful error in older
versions of SciPy.
See: https://github.com/scipy/scipy/issues/5361
Also note that we can avoid doing the dot product by checking if
the len_mask is not zero in _huber_loss_and_gradient but this
is not going to be the bottleneck, since the number of outliers
and non_outliers are typically non-zero and it makes the code
tougher to follow.
"""
if len_mask != 0:
return X[safe_mask(X, mask), :]
return np.zeros(shape=(0, X.shape[1]))
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False)
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1"
% n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.videointelligence_v1p1beta1.types import video_intelligence
from google.longrunning import operations_pb2 # type: ignore
from .base import VideoIntelligenceServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import VideoIntelligenceServiceGrpcTransport
class VideoIntelligenceServiceGrpcAsyncIOTransport(VideoIntelligenceServiceTransport):
"""gRPC AsyncIO backend transport for VideoIntelligenceService.
Service that implements Google Cloud Video Intelligence API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "videointelligence.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "videointelligence.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def annotate_video(
self,
) -> Callable[
[video_intelligence.AnnotateVideoRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the annotate video method over gRPC.
Performs asynchronous video annotation. Progress and results can
be retrieved through the ``google.longrunning.Operations``
interface. ``Operation.metadata`` contains
``AnnotateVideoProgress`` (progress). ``Operation.response``
contains ``AnnotateVideoResponse`` (results).
Returns:
Callable[[~.AnnotateVideoRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "annotate_video" not in self._stubs:
self._stubs["annotate_video"] = self.grpc_channel.unary_unary(
"/google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService/AnnotateVideo",
request_serializer=video_intelligence.AnnotateVideoRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["annotate_video"]
def close(self):
return self.grpc_channel.close()
__all__ = ("VideoIntelligenceServiceGrpcAsyncIOTransport",)
|
|
# python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the Orchestrate API Service."""
import re
import uuid
from googleapiclient import discovery
from googleapiclient import errors
from oauth2client.client import GoogleCredentials
from google.cloud import error_reporting
from orchestrateapi import environ
from orchestrateapi import orchestrate_pb2
error_client = error_reporting.Client()
# Connect to Google Cloud Compute Engine API using the environment's service
# account.
credentials = GoogleCredentials.get_application_default()
compute = discovery.build('compute', 'v1', credentials=credentials,
cache_discovery=False)
class OrchestrateInstanceCreationError(Exception):
"""Provides detailed message on error occurred during instance creation.
"""
pass
def run(request, context):
"""Creates an instance.
Args:
request (orchestrate_pb2.CreateInstanceRequest): Request payload.
context: Context.
Returns:
A orchestrate_pb2.CreateInstanceResponse with the status of the request.
"""
instance = request.instance
print('Orchestrate.CreateInstance project={project} zone={zone}'.format(
project=instance.project,
zone=instance.zone,
))
request_id = uuid.uuid4().hex
try:
payload = build_instance_payload(instance)
operation = compute.instances().insert(
project=instance.project,
zone=instance.zone,
body=payload,
).execute()
print('Started operation {name}'.format(name=operation['name']))
return orchestrate_pb2.CreateInstanceResponse(
status='SUBMITTED',
request_id=str(request_id),
name=payload['name'],
)
except errors.HttpError as exception:
if exception.resp.status == 409:
message = 'An instance with name {name} already exists.'.format(
name=payload['name'])
raise OrchestrateInstanceCreationError(message)
else:
raise
def build_instance_payload(instance):
"""Returns a dict with all creation parameters.
Payload format required by the POST instances.insert endpoint.
https://cloud.google.com/compute/docs/reference/rest/v1/instances/insert
Args:
instance: Creation parameters.
"""
template = get_template(instance)
metadata = Metadata.parse(instance, template)
name = build_name(instance, metadata.orchestrate)
region = '-'.join(instance.zone.split('-')[:2])
region_url = 'projects/{project}/regions/{region}'.format(
project=instance.project,
region=region,
)
zone_url = 'projects/{project}/zones/{zone}'.format(
project=instance.project,
zone=instance.zone,
)
properties = template['properties']
boot_image, boot_image_latest = get_boot_images(properties['disks'])
# POST https://www.googleapis.com/compute/v1/
# projects/{project}/zones/us-central1-a/instances
payload = dict(
name=name,
description=(
'Orchestrate instance created from template {template} size {size}'
).format(template=instance.template, size=instance.size),
)
keys_to_transfer = [
'machineType',
'tags',
'canIpForward',
'networkInterfaces',
'labels',
'scheduling',
'deletionProtection',
'serviceAccounts',
'guestAccelerators',
'disks',
]
for key in keys_to_transfer:
if key in properties:
payload[key] = properties[key]
# Expand to proper URLs for some values that are not supported as URLs
# when stored in the instanceTemplate
# machineType
# b/137211294 - orchestrate instances create would have to pay attention to
# this value and override the machineType from this template.
payload['machineType'] = '{zone_url}/machineTypes/{machine_type}'.format(
zone_url=zone_url,
machine_type=metadata.orchestrate.get('machine_type', 'n1-standard-8'),
)
# acceleratorType
for accelerator in payload.get('guestAccelerators', []):
accelerator['acceleratorType'] = (
'{zone_url}/acceleratorTypes/{gpu_type}'
).format(
zone_url=zone_url,
gpu_type=accelerator['acceleratorType'],
)
# diskType
for disk in payload['disks']:
parameters = disk['initializeParams']
parameters['diskType'] = '{zone_url}/diskTypes/{disk_type}'.format(
zone_url=zone_url,
disk_type=parameters['diskType'],
)
if disk['boot'] and instance.use_latest_image:
# Updates reference to sourceImage to the latest in the image family.
parameters['sourceImage'] = boot_image_latest['selfLink']
# subnetwork
for interface in payload['networkInterfaces']:
network = metadata.orchestrate.get('network', 'default')
subnetwork = metadata.orchestrate.get('subnetwork', network)
interface['network'] = 'global/networks/{network}'.format(network=network)
interface['subnetwork'] = '{region_url}/subnetworks/{subnetwork}'.format(
region_url=region_url,
subnetwork=subnetwork,
)
if not instance.use_external_ip and 'accessConfigs' in interface:
del interface['accessConfigs']
set_startup_script(
metadata,
boot_image_latest if instance.use_latest_image else boot_image
)
payload['metadata'] = dict(items=metadata.instance_payload)
return payload
def get_boot_images(disks):
"""Returns the image referenced in the template and the latest for boot disk.
Args:
disks: A list of properties.disks as returned by instanceTemplates.get
Raises:
OrchestrateInstanceCreationError: if no boot disk can be found.
"""
for disk in disks:
if disk['boot']:
return get_images(disk['initializeParams'])
raise OrchestrateInstanceCreationError('Template has no boot disk.')
def get_images(parameters):
"""Returns the image referenced in the template and the latest version.
Args:
parameters: A dict containing the values of
properties.disks[].initializeParams as returned by instanceTemplates.get
"""
# source image example:
# projects/cloud-media-solutions/global/images/visual-20191217t184104
link = parameters['sourceImage']
# Extract image project and version from that link, e.g.:
# project=cloud-media-solutions
# version=visual-20191217t184104
match = re.match(r'.*/projects/(?P<project>.*)/global/images/(?P<version>.*)',
link)
project = match.group('project')
version = match.group('version')
# get the family from the source image version
image = compute.images().get(project=project, image=version).execute()
# get latest image version from the family
latest = compute.images().getFromFamily(
project=project,
family=image['family'],
).execute()
return image, latest
def get_template(instance):
"""Returns the instanceTemplate for the requested Orchestrate template and size.
Args:
instance: Instance creation parameters.
Raises:
OrchestrateInstanceCreationError: if no template can be located based on the
given instance creation parameters.
"""
if not instance.size:
print('Locating default size for template {template}'.format(
template=instance.template))
response = compute.instanceTemplates().list(
project=instance.project,
filter='name = "{}-*"'.format(instance.template),
).execute()
templates = response.get('items', dict())
for template in templates:
for item in template['properties']['metadata']['items']:
if item['key'] == 'orchestrate_default_size' and item['value'] == 'true':
print('Found instanceTemplate {template}'.format(
template=template['name']))
return template
message = (
'Could not locate default size for project {project} template'
' {template}. Please specify an explicit size in the request.'.format(
project=instance.project,
template=instance.template,
)
)
raise OrchestrateInstanceCreationError(message)
else:
print('Finding instanceTemplate {template} size {size}'.format(
template=instance.template, size=instance.size))
name = '{template}-{size}'.format(
template=instance.template, size=instance.size)
template = compute.instanceTemplates().get(
project=instance.project,
instanceTemplate=name,
).execute()
return template
def build_name(instance, orchestrate_metadata):
"""Returns an appropriate name for the instance.
The name is determined in the following order:
1. Use the one explicitly requested upon creation in the instance object.
2. Construct one using the instance-name-pattern value in the requested
template, if any.
3. Generate a unique name based on the requested template and size.
Args:
instance: Instance creation parameters.
orchestrate_metadata (dict): Orchestrate-specific metadata stored in the
instanceTemplate object representing the Orchestrate template. The
instance-name-pattern is stored here, if any.
"""
# Example pattern: {type}-{region}-{gpu_count}x{gpu_type}-{user}
instance_name_pattern = orchestrate_metadata.get('instance_name_pattern')
default_pattern = '{template}-{size}-{id}'
name_pattern = instance.name or instance_name_pattern or default_pattern
unique_id = uuid.uuid4().hex[:5]
gpu_count = orchestrate_metadata.get('gpu_count', 0)
gpu_type = orchestrate_metadata.get('gpu_type', '')
graphics_type = 'vws' if gpu_type.endswith('-vws') else 'gpu'
gpu_name = gpu_type if not gpu_type.endswith('-vws') else gpu_type[:-4]
region = '-'.join(instance.zone.split('-')[:2])
name = name_pattern.format(
template=instance.template,
size=instance.size,
region=region,
zone=instance.zone,
type=graphics_type,
gpu_name=gpu_name,
gpu_count=gpu_count,
gpu_type=gpu_type,
user=unique_id,
id=unique_id,
)
return name
class Metadata:
"""Contain instance and orchestrate-specific metadata.
The instanceTemplate representing the Orchestrate template and size stores two
kinds of metadata in the same list: One set intended for the instance itself,
and the other for Orchestrate-specific attributes that extend those stored in
the instanceTemplate itself. The latter are prefixed with "orchestrate_".
This method splits the metadata into three groups for convenience:
- instance: Metadata intended to propagate down to the instance upon creation.
- instance_payload: Same as "instance" but in a format suitable for the API,
i.e.: [dict(key=..., value=...),...]
- Metadata intended for Orchestrate itself. This normally includes machine
type, and other parameters that do not currently exist in the standard
instanceTemplate entity.
"""
def __init__(self):
self.instance = dict()
self.instance_payload = []
self.orchestrate = dict()
@staticmethod
def parse(instance, template):
"""Split metadata stored in template into instance and orchestrate groups.
Args:
instance: Instance creation parameters.
template: An instanceTemplate object representing the Orchestrate template
and size requested for the instance.
Returns:
An instance of Metadata.
"""
metadata = Metadata()
# Order matters.
# 1. Get metadata from template.
for item in template['properties']['metadata']['items']:
if item['key'].startswith('orchestrate_'):
key = item['key'].replace('orchestrate_', '')
metadata.orchestrate[key] = item['value']
else:
metadata.instance[item['key']] = item['value']
metadata.instance_payload.append(item)
# 2. Override with metadata explicitly provided upon instance creation.
for item in instance.metadata:
metadata.instance[item.key] = item.value
metadata.instance_payload.append(dict(key=item.key, value=item.value))
return metadata
def set_startup_script(metadata, image):
"""Set startup script to run post-creation configuration based on metadata.
Determine whether to use a Python startup script for Linux or a PowerShell
one for Windows depending on the image base OS (see get_os_type)
Args:
metadata: Parsed metadata
image: Image.
Raises:
OrchestrateInstanceCreationError: if cannot determine the type of OS from
the given image.
"""
os_type = get_os_type(image)
if os_type == 'windows':
key = 'windows-startup-script-url'
extension = 'ps1'
else:
key = 'startup-script-url'
extension = 'py'
if key not in metadata.instance:
script = 'gs://{bucket}/remotedesktopconfigure.{extension}'.format(
bucket=environ.ORCHESTRATE_BUCKET,
extension=extension,
)
metadata.instance[key] = script
metadata.instance_payload.append(dict(key=key, value=script))
def get_os_type(image):
"""Returns the base OS for given image.
Determine by looking for a "orchestrate_os" label in the image first in case
this is a custom image. If not, look at the prefix of the family name, e.g.
stock GCP images start with centos-, windows-, etc.
Args:
image: Image.
Raises:
OrchestrateInstanceCreationError: if it cannot determine the type of OS.
"""
# 1. Check for an explicit Orchestrate label.
labels = image.get('labels', dict())
os_type = labels.get('orchestrate_os')
if os_type:
return os_type.lower()
# 2. Guess by family name, if possible.
# Get prefix based on GCP naming conventions, e.g.:
# centos-7, rhel-7, ubuntu-1804-lts, windows-2016
# https://cloud.google.com/compute/docs/images
linux_families = [
'centos',
'debian',
'rhel',
'sles',
'cos',
'coreos',
'ubuntu',
]
family = image['family'].split('-')[0]
if family == 'windows':
return 'windows'
elif family in linux_families:
return 'linux'
# 3. Cannot determine OS
message = (
'Image {image} does not have a orchestrate_os label. And, could not guess'
' the OS from the family name based on GCP image family naming'
' conventions, e.g. windows-, centos-, etc. Please add a orchestrate_os'
' label and set it to either "linux" or "windows" to indicate the base OS'
' for this image. Or, rename the image family to include a prefix with'
' the base OS name.'
).format(image=image['selfLink'])
raise OrchestrateInstanceCreationError(message)
|
|
# Copyright (c) 2010 Anil Kumar
# All rights reserved.
#
# License: BSD
import os, re
import copy
from PyQt4.QtCore import QObject, pyqtSignal, QProcess
def NOT_IMPLEMENTED(n, f):
msg = '%s: %s: Not implemeted' % (n, f)
from PyQt4.QtGui import QMessageBox
QMessageBox.warning(None, "Seascope", msg, QMessageBox.Ok)
import CtagsCache
cmd_table_master = [
[ 'REF', ['&References', 'Ctrl+0'], ['References to' ] ],
[ 'DEF', ['&Definitions', 'Ctrl+1'], ['Definition of' ] ],
[ '<--', ['&Called Functions', 'Ctrl+2'], ['Functions called by' ] ],
[ '-->', ['C&alling Functions', 'Ctrl+3'], ['Functions calling' ] ],
[ 'TXT', ['Find &Text', 'Ctrl+4'], ['Find text' ] ],
[ 'GREP', ['Find &Egrep', 'Ctrl+5'], ['Find egrep pattern' ] ],
[ 'FIL', ['Find &File', 'Ctrl+7'], ['Find files' ] ],
[ 'INC', ['&Include/Import', 'Ctrl+8'], ['Find include/import' ] ],
[ '---', [None, ], None ],
[ 'QDEF', ['&Quick Definition', 'Ctrl+]'], None ],
[ 'CTREE', ['Call Tr&ee', 'Ctrl+\\'], ['Call tree' ] ],
[ '---', [None, ], None ],
[ 'CLGRAPH', ['Class &Graph', 'Ctrl+:'], ['Class graph' ] ],
[ 'CLGRAPHD', ['Class Graph Dir', 'Ctrl+;'], ['Class graph dir' ] ],
[ 'FFGRAPH', ['File Func Graph', 'Ctrl+^'], ['File Func graph dir' ] ],
[ '---', [None, ], None ],
[ 'UPD', ['Re&build Database', None ], None ],
]
class PluginFeatureBase:
def __init__(self):
self.clgraph_query_args = [
['CLGRAPH', 'D', 'Derived classes' ],
['CLGRAPH', 'B', 'Base classes' ],
]
self.clgraph_query_args = [
['CLGRAPH', 'D', 'Derived classes' ],
['CLGRAPH', 'B', 'Base classes' ],
]
self.ffgraph_query_args = [
['FFGRAPH', 'F', 'File functions graph'],
['FFGRAPH_E', 'F+E', 'File functions + external graph'],
['FFGRAPH_D', 'D', 'Directory functions graph'],
['FFGRAPH_DE', 'D+E', 'Directory functions + external graph']
]
def setup(self):
feat_cmds = [ d[0] for d in self.feat_desc ]
ct = copy.deepcopy(cmd_table_master)
self.cmd_table = [ t for t in ct if t[0] in feat_cmds or t[0] == '---' ]
self.menu_cmd_list = [ [c[0]] + c[1] for c in self.cmd_table ]
self.cmd_str2id = {}
self.cmd_str2qstr = {}
self.cmd_qstr2str = {}
for c in self.feat_desc:
self.cmd_str2id[c[0]] = c[1]
for c in self.cmd_table:
if c[2] != None:
self.cmd_str2qstr[c[0]] = c[2][0]
self.cmd_qstr2str[c[2][0]] = c[0]
# python 2.7
#self.cmd_str2id = { c[0]:c[1] for c in self.feat_desc }
#self.cmd_str2qstr = { c[0]:c[2][0] for c in self.cmd_table if c[1] }
#self.cmd_qstr2str = { c[2][0]:c[0] for c in self.cmd_table if c[1] }
self.cmd_qstrlist = [ c[2][0] for c in self.cmd_table if c[2] ]
class ProjectBase(QObject):
prj = None
qry = None
def __init__(self):
QObject.__init__(self)
self.feat = None
def prj_close(self):
if (self.conf != None):
self.conf.proj_close()
self.conf = None
def prj_dir(self):
return self.conf.c_dir
def prj_name(self):
return self.conf.get_proj_name()
def prj_src_files(self):
return self.conf.get_proj_src_files()
def prj_is_open(self):
return self.conf != None
def prj_is_ready(self):
return self.conf.is_ready()
def prj_conf(self):
return self.conf.get_proj_conf()
def prj_update_conf(self, proj_args):
self.conf.proj_update(proj_args)
def prj_show_settings(self, proj_args):
NOT_IMPLEMENTED(__name__, __func__)
def prj_settings(self, proj_args):
NOT_IMPLEMENTED(__name__, __func__)
def prj_feature_setup(self):
self.feat.setup()
def prj_query(self, rquery):
return self.qry.query(rquery)
def prj_rebuild(self):
return self.qry.rebuild()
def prj_query_fl(self):
return self.qry.query_fl()
def prj_type(self):
return self.conf.prj_type
def prj_feature(self):
return self.feat
def prj_settings_get(self):
proj_args = self.prj_conf()
return proj_args
def prj_settings_update(self, proj_args):
NOT_IMPLEMENTED(__name__, __func__)
return
class ConfigBase(QObject):
def __init__(self, ptype):
self.prj_type = ptype
self.c_dir = ''
self.c_opt = ''
self.c_flist = []
def get_proj_name(self):
return os.path.split(self.c_dir)[1]
def get_proj_src_files(self):
fl = self.c_flist
return fl
def get_proj_conf(self):
return (self.c_dir, self.c_opt, self.c_flist)
def read_config(self):
pass
def write_config(self):
pass
def proj_start(self):
pass
def proj_open(self, proj_path):
self.c_dir = proj_path
self.read_config()
self.proj_start()
def proj_update(self, proj_args):
self.proj_new(proj_args)
def proj_new(self, proj_args):
self.proj_args = proj_args
(self.c_dir, self.c_opt, self.c_flist) = proj_args
self.write_config()
self.proj_start()
def proj_close(self):
pass
def is_ready(self):
return True
@staticmethod
def prepare_menu(menubar):
pass
class QueryBase(QObject):
@staticmethod
def prepare_menu(menubar):
pass
def query(self, rquery):
cmd_str = rquery['cmd']
req = rquery['req']
opt = rquery['opt']
if opt == None:
opt = []
pargs = []
if cmd_str == 'GREP':
pargs = ['grep', '-E', '-R', '-n', '-I']
pargs += [ '--', req ]
pargs += [self.conf.c_dir]
else:
assert(false)
return None
qsig = PluginProcess(self.conf.c_dir, [cmd_str, req]).run_query_process(pargs, req, rquery)
return qsig
def rebuild():
NOT_IMPLEMENTED(__name__, __func__)
def conf_is_open(self):
return self.conf != None
def conf_is_ready(self):
return self.conf.is_ready()
class QueryUiBase(QObject):
def __init__(self):
QObject.__init__(self)
from PyQt4.QtGui import QMessageBox
class QuerySignal(QObject):
sig_result = pyqtSignal(str, list)
sig_result_dbg = pyqtSignal(str, str, str)
sig_rebuild = pyqtSignal()
sig_query_fl = pyqtSignal(list)
def __init__(self):
QObject.__init__(self)
def _relevancy_sort(self, hfile, res):
pt = []
pd = {}
p = hfile
(pre, ext) = os.path.splitext(hfile)
c = None
while p != c:
e = [p, [], []]
pt += [e]
pd[p] = e
c = p
p = os.path.dirname(p)
for line in res:
f = line[1]
d = os.path.dirname(f)
p = f
while p not in pd:
p = os.path.dirname(p)
e = pd[p]
if p in [f, d]:
e[1].append(line)
else:
e[2].append(line)
for e in pt:
e[1] = sorted(e[1], key=lambda li: li[1])
e[2] = sorted(e[2], key=lambda li: li[1])
pre = pre + '.*'
e0 = []
e1 = []
for e in pt[1][1]:
if re.match(pre, e[1]):
e0 += [e]
else:
e1 += [e]
pt[0][1] += e0
pt[1][1] = e1
res1 = []
res2 = []
for e in pt:
res1 += e[1]
res2 += e[2]
res = res1 + res2
return res
def relevancy_sort(self, res):
if os.getenv('RELEVANCY_SORT', 1) == 0:
return res
hint_file = None
try:
hint_file = self.rquery['hint_file']
except:
pass
if not hint_file:
return res
if not os.path.isabs(hint_file):
print 'BUG: relevancy_sort: not abs path:', hint_file
return res
if len(res) > 10000:
return res
return self._relevancy_sort(hint_file, res)
def emit_result(self, res):
res = self.relevancy_sort(res)
self.sig_result.emit(self.sym, res)
class PluginProcessBase(QObject):
proc_list = []
def __init__(self, wdir):
QObject.__init__(self)
PluginProcess.proc_list.append(self)
self.is_rebuild = False
self.is_query_fl = False
self.sig = QuerySignal()
self.proc = QProcess()
self.proc.finished.connect(self._finished_cb)
self.proc.error.connect(self._error_cb)
self.proc.setWorkingDirectory(wdir)
self.wdir = wdir
def _cleanup(self):
PluginProcess.proc_list.remove(self)
if self.err_str != '':
s = '<b>' + self.p_cmd + '</b><p>' + '<p>'.join(self.err_str.splitlines())
QMessageBox.warning(None, "Seascope", s, QMessageBox.Ok)
if self.res != '':
s = '<b>' + self.p_cmd + '</b><p>Summary<p>' + self.res
QMessageBox.information(None, "Seascope", s, QMessageBox.Ok)
def _error_cb(self, err):
err_dict = {
QProcess.FailedToStart: 'FailedToStart',
QProcess.Crashed: 'Crashed',
QProcess.Timedout: 'The last waitFor...() function timed out',
QProcess.WriteError: 'An error occurred when attempting to write to the process',
QProcess.ReadError: 'An error occurred when attempting to read from the process',
QProcess.UnknownError: 'An unknown error occurred',
}
self.err_str = '<b>' + self.p_cmd + '</b><p>' + err_dict[err]
self._cleanup()
def _finished_cb(self, ret):
res = str(self.proc.readAllStandardOutput())
self.err_str = str(self.proc.readAllStandardError())
#print 'output', res
#print 'cmd:', self.p_cmd
if self.is_rebuild:
self.res = res
self.sig.sig_rebuild.emit()
elif self.is_query_fl:
self.res = ''
res = self.parse_query_fl(res)
self.sig.sig_query_fl.emit(res)
else:
self.res = ''
self.sig.sig_result_dbg.emit(self.p_cmd, res, self.err_str)
try:
res = self.parse_result(res, self.sig)
except Exception as e:
print e
res = [['', '', '', 'error while parsing output of: ' + self.p_cmd]]
if res != None:
self.sig.emit_result(res)
self._cleanup()
def run_query_process(self, pargs, sym, rquery=None):
self.sig.sym = sym
self.sig.rquery = rquery
self.p_cmd = ' '.join(pargs)
if os.getenv('SEASCOPE_DEBUG'):
print self.p_cmd
self.proc.start(pargs[0], pargs[1:])
if self.proc.waitForStarted() == False:
return None
self.proc.closeWriteChannel()
return [self.sig.sig_result, self.sig.sig_result_dbg]
def run_rebuild_process(self, pargs):
self.is_rebuild = True
self.p_cmd = ' '.join(pargs)
self.proc.start(pargs[0], pargs[1:])
if self.proc.waitForStarted() == False:
return None
#print 'cmd:', pargs
self.sig.sig_rebuild.connect(CtagsCache.flush)
return self.sig.sig_rebuild
def run_query_fl(self, pargs):
self.is_query_fl = True
self.p_cmd = ' '.join(pargs)
self.proc.start(pargs[0], pargs[1:])
if self.proc.waitForStarted() == False:
return None
return self.sig.sig_query_fl
def parse_query_fl(self, text):
fl = []
for f in re.split('\r?\n', text.strip()):
if f == '':
continue
fl.append(os.path.join(self.wdir, f))
return fl
class PluginProcess(PluginProcessBase):
def __init__(self, wdir, rq):
PluginProcessBase.__init__(self, wdir)
if rq == None:
rq = ['', '']
self.cmd_str = rq[0]
self.req = rq[1]
def parse_result(self, text, sig):
text = re.split('\r?\n', text)
res = []
if self.cmd_str == 'GREP':
for line in text:
if line == '':
continue
line = ['<unknown>'] + line.split(':', 2)
res.append(line)
else:
assert(false)
res.append(['', '', '', 'PluginProcess.parse_result: FAILED'])
return res
qsig = CtagsCache.CtagsThread(sig).apply_fix(self.cmd_str, res, ['<unknown>'])
return qsig
if __name__ == '__main__':
import sys
def slot_result(sym, res):
print 'slot_result: ', [str(sym), res]
sys.exit(0)
def slot_result_dbg(cmd, res, err_str):
print 'slot_result_dbg:', [str(cmd), str(res).strip().split('\n'), str(err_str)]
def slot_rebuild():
print 'slot_rebuild'
from PyQt4.QtCore import QCoreApplication
app = QCoreApplication(sys.argv)
qsig = PluginProcess('.').run_query_process(['ls'], 'ls')
#qsig = PluginProcess('/home/anil/prj/ss/lin').run_query_process(['cscope', '-q', '-k', '-L', '-d', '-0', 'vdso'], 'ls')
if qsig == None:
sys.exit(-1)
qsig[0].connect(slot_result)
qsig[1].connect(slot_result_dbg)
app.exec_()
|
|
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslotest import base as test_base
from fuel_agent.drivers import nailgun
from fuel_agent import errors
from fuel_agent.objects import image
from fuel_agent.utils import hardware_utils as hu
CEPH_JOURNAL = {
"partition_guid": "45b0969e-9b03-4f30-b4c6-b4b80ceff106",
"name": "cephjournal",
"mount": "none",
"disk_label": "",
"type": "partition",
"file_system": "none",
"size": 0
}
CEPH_DATA = {
"partition_guid": "4fbd7e29-9d25-41b8-afd0-062c0ceff05d",
"name": "ceph",
"mount": "none",
"disk_label": "",
"type": "partition",
"file_system": "none",
"size": 3333
}
PROVISION_SAMPLE_DATA = {
"profile": "pro_fi-le",
"name_servers_search": "\"domain.tld\"",
"uid": "1",
"interfaces": {
"eth2": {
"static": "0",
"mac_address": "08:00:27:b1:d7:15"
},
"eth1": {
"static": "0",
"mac_address": "08:00:27:46:43:60"
},
"eth0": {
"ip_address": "10.20.0.3",
"dns_name": "node-1.domain.tld",
"netmask": "255.255.255.0",
"static": "0",
"mac_address": "08:00:27:79:da:80"
}
},
"interfaces_extra": {
"eth2": {
"onboot": "no",
"peerdns": "no"
},
"eth1": {
"onboot": "no",
"peerdns": "no"
},
"eth0": {
"onboot": "yes",
"peerdns": "no"
}
},
"power_type": "ssh",
"power_user": "root",
"kernel_options": {
"udevrules": "08:00:27:79:da:80_eth0,08:00:27:46:43:60_eth1,"
"08:00:27:b1:d7:15_eth2",
"netcfg/choose_interface": "08:00:27:79:da:80"
},
"power_address": "10.20.0.253",
"name_servers": "\"10.20.0.2\"",
"ks_meta": {
"image_data": {
"/": {
"uri": "http://fake_image_url",
"format": "ext4",
"container": "gzip"
}
},
"timezone": "America/Los_Angeles",
"master_ip": "10.20.0.2",
"mco_enable": 1,
"mco_vhost": "mcollective",
"mco_pskey": "unset",
"mco_user": "mcollective",
"puppet_enable": 0,
"fuel_version": "5.0.1",
"install_log_2_syslog": 1,
"mco_password": "marionette",
"puppet_auto_setup": 1,
"puppet_master": "fuel.domain.tld",
"mco_auto_setup": 1,
"auth_key": "fake_auth_key",
"authorized_keys": ["fake_authorized_key1", "fake_authorized_key2"],
"repo_metadata": 'repo1="repo1_url",' + "repo2='repo2_url'",
"pm_data": {
"kernel_params": "console=ttyS0,9600 console=tty0 rootdelay=90 "
"nomodeset",
"ks_spaces": [
{
"name": "sda",
"extra": [
"disk/by-id/scsi-SATA_VBOX_HARDDISK_VB69050467-"
"b385c7cd",
"disk/by-id/ata-VBOX_HARDDISK_VB69050467-b385c7cd"
],
"free_space": 64907,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"size": 200,
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"mount": "/tmp",
"size": 200,
"type": "partition",
"file_system": "ext2",
"partition_guid": "fake_guid",
"name": "TMP"
},
{
"type": "lvm_meta_pool",
"size": 0
},
{
"size": 19438,
"type": "pv",
"lvm_meta_size": 64,
"vg": "os"
},
{
"size": 45597,
"type": "pv",
"lvm_meta_size": 64,
"vg": "image"
}
],
"type": "disk",
"id": "sda",
"size": 65535
},
{
"name": "sdb",
"extra": [
"disk/by-id/scsi-SATA_VBOX_HARDDISK_VBf2923215-"
"708af674",
"disk/by-id/ata-VBOX_HARDDISK_VBf2923215-708af674"
],
"free_space": 64907,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"size": 200,
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"type": "lvm_meta_pool",
"size": 64
},
{
"size": 0,
"type": "pv",
"lvm_meta_size": 0,
"vg": "os"
},
{
"size": 64971,
"type": "pv",
"lvm_meta_size": 64,
"vg": "image"
}
],
"type": "disk",
"id": "sdb",
"size": 65535
},
{
"name": "sdc",
"extra": [
"disk/by-id/scsi-SATA_VBOX_HARDDISK_VB50ee61eb-"
"84e74fdf",
"disk/by-id/ata-VBOX_HARDDISK_VB50ee61eb-84e74fdf"
],
"free_space": 64907,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"size": 200,
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"type": "lvm_meta_pool",
"size": 64
},
{
"size": 0,
"type": "pv",
"lvm_meta_size": 0,
"vg": "os"
},
{
"size": 64971,
"type": "pv",
"lvm_meta_size": 64,
"vg": "image"
}
],
"type": "disk",
"id": "disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0",
"size": 65535
},
{
"_allocate_size": "min",
"label": "Base System",
"min_size": 19374,
"volumes": [
{
"mount": "/",
"size": 15360,
"type": "lv",
"name": "root",
"file_system": "ext4"
},
{
"mount": "swap",
"size": 4014,
"type": "lv",
"name": "swap",
"file_system": "swap"
}
],
"type": "vg",
"id": "os"
},
{
"_allocate_size": "min",
"label": "Zero size volume",
"min_size": 0,
"volumes": [
{
"mount": "none",
"size": 0,
"type": "lv",
"name": "zero_size",
"file_system": "xfs"
}
],
"type": "vg",
"id": "zero_size"
},
{
"_allocate_size": "all",
"label": "Image Storage",
"min_size": 5120,
"volumes": [
{
"mount": "/var/lib/glance",
"size": 175347,
"type": "lv",
"name": "glance",
"file_system": "xfs"
}
],
"type": "vg",
"id": "image"
}
]
},
"mco_connector": "rabbitmq",
"mco_host": "10.20.0.2"
},
"name": "node-1",
"hostname": "node-1.domain.tld",
"slave_name": "node-1",
"power_pass": "/root/.ssh/bootstrap.rsa",
"netboot_enabled": "1"
}
LIST_BLOCK_DEVICES_SAMPLE = [
{'uspec':
{'DEVLINKS': [
'disk/by-id/scsi-SATA_VBOX_HARDDISK_VB69050467-b385c7cd',
'/dev/disk/by-id/ata-VBOX_HARDDISK_VB69050467-b385c7cd',
'/dev/disk/by-id/wwn-fake_wwn_1',
'/dev/disk/by-path/pci-0000:00:1f.2-scsi-0:0:0:0'],
'ID_SERIAL_SHORT': 'fake_serial_1',
'ID_WWN': 'fake_wwn_1',
'DEVPATH': '/devices/pci0000:00/0000:00:1f.2/ata1/host0/'
'target0:0:0/0:0:0:0/block/sda',
'ID_MODEL': 'fake_id_model',
'DEVNAME': '/dev/sda',
'MAJOR': '8',
'DEVTYPE': 'disk', 'MINOR': '0', 'ID_BUS': 'ata'
},
'startsec': '0',
'device': '/dev/sda',
'espec': {'state': 'running', 'timeout': '30', 'removable': '0'},
'bspec': {
'sz': '976773168', 'iomin': '4096', 'size64': '500107862016',
'ss': '512', 'ioopt': '0', 'alignoff': '0', 'pbsz': '4096',
'ra': '256', 'ro': '0', 'maxsect': '1024'
},
'size': 500107862016},
{'uspec':
{'DEVLINKS': [
'/dev/disk/by-id/ata-VBOX_HARDDISK_VBf2923215-708af674',
'/dev/disk/by-id/scsi-SATA_VBOX_HARDDISK_VBf2923215-708af674',
'/dev/disk/by-id/wwn-fake_wwn_2'],
'ID_SERIAL_SHORT': 'fake_serial_2',
'ID_WWN': 'fake_wwn_2',
'DEVPATH': '/devices/pci0000:00/0000:00:3f.2/ata2/host0/'
'target0:0:0/0:0:0:0/block/sdb',
'ID_MODEL': 'fake_id_model',
'DEVNAME': '/dev/sdb',
'MAJOR': '8',
'DEVTYPE': 'disk', 'MINOR': '0', 'ID_BUS': 'ata'
},
'startsec': '0',
'device': '/dev/sdb',
'espec': {'state': 'running', 'timeout': '30', 'removable': '0'},
'bspec': {
'sz': '976773168', 'iomin': '4096', 'size64': '500107862016',
'ss': '512', 'ioopt': '0', 'alignoff': '0', 'pbsz': '4096',
'ra': '256', 'ro': '0', 'maxsect': '1024'},
'size': 500107862016},
{'uspec':
{'DEVLINKS': [
'/dev/disk/by-id/ata-VBOX_HARDDISK_VB50ee61eb-84e74fdf',
'/dev/disk/by-id/scsi-SATA_VBOX_HARDDISK_VB50ee61eb-84e74fdf',
'/dev/disk/by-id/wwn-fake_wwn_3',
'/dev/disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0'],
'ID_SERIAL_SHORT': 'fake_serial_3',
'ID_WWN': 'fake_wwn_3',
'DEVPATH': '/devices/pci0000:00/0000:00:0d.0/ata4/host0/target0:0:0/'
'0:0:0:0/block/sdc',
'ID_MODEL': 'fake_id_model',
'DEVNAME': '/dev/sdc',
'MAJOR': '8',
'DEVTYPE': 'disk', 'MINOR': '0', 'ID_BUS': 'ata'},
'startsec': '0',
'device': '/dev/sdc',
'espec': {'state': 'running', 'timeout': '30', 'removable': '0'},
'bspec': {
'sz': '976773168', 'iomin': '4096', 'size64': '500107862016',
'ss': '512', 'ioopt': '0', 'alignoff': '0', 'pbsz': '4096',
'ra': '256', 'ro': '0', 'maxsect': '1024'},
'size': 500107862016},
]
class TestNailgun(test_base.BaseTestCase):
def setUp(self):
super(TestNailgun, self).setUp()
self.drv = nailgun.Nailgun(PROVISION_SAMPLE_DATA)
def test_match_device_by_id_matches(self):
# matches by 'by-id' links
fake_ks_disk = {
"extra": [
"disk/by-id/fake_scsi_matches",
"disk/by-id/fake_ata_dont_matches"
]
}
fake_hu_disk = {
"uspec": {
"DEVLINKS": [
"/dev/disk/by-id/fake_scsi_matches",
"/dev/disk/by-path/fake_path"
]
}
}
self.assertTrue(nailgun.match_device(fake_hu_disk, fake_ks_disk))
def test_match_device_id_dont_matches_non_empty_extra(self):
# Shouldn't match. If non empty extra present it will match by what is
# presented `extra` field, ignoring the `id` at all. Eg.: on VirtualBox
fake_ks_disk = {
"extra": [
"disk/by-id/fake_scsi_dont_matches",
"disk/by-id/fake_ata_dont_matches"
],
"id": "sdd"
}
fake_hu_disk = {
"uspec": {
"DEVLINKS": [
"/dev/disk/by-id/fake_scsi_matches",
"/dev/disk/by-path/fake_path",
"/dev/sdd"
]
}
}
self.assertFalse(nailgun.match_device(fake_hu_disk, fake_ks_disk))
def test_match_device_id_matches_empty_extra(self):
# since `extra` is empty, it will match by `id`
fake_ks_disk = {
"extra": [],
"id": "sdd"
}
fake_hu_disk = {
"uspec": {
"DEVLINKS": [
"/dev/disk/by-id/fake_scsi_matches",
"/dev/disk/by-path/fake_path",
"/dev/sdd"
]
}
}
self.assertTrue(nailgun.match_device(fake_hu_disk, fake_ks_disk))
def test_match_device_id_matches_missing_extra(self):
# `extra` is empty or just missing entirely, it will match by `id`
fake_ks_disk = {"id": "sdd"}
fake_hu_disk = {
"uspec": {
"DEVLINKS": [
"/dev/disk/by-id/fake_scsi_matches",
"/dev/disk/by-path/fake_path",
"/dev/sdd"
]
}
}
self.assertTrue(nailgun.match_device(fake_hu_disk, fake_ks_disk))
def test_match_device_dont_macthes(self):
# Mismatches totally
fake_ks_disk = {
"extra": [
"disk/by-id/fake_scsi_dont_matches",
"disk/by-id/fake_ata_dont_matches"
],
"id": "sda"
}
fake_hu_disk = {
"uspec": {
"DEVLINKS": [
"/dev/disk/by-id/fake_scsi_matches",
"/dev/disk/by-path/fake_path",
"/dev/sdd"
]
}
}
self.assertFalse(nailgun.match_device(fake_hu_disk, fake_ks_disk))
def test_match_device_dont_macthes_by_id(self):
# disks are different but both of have same `by-path` link.
# it will match by `extra` ignoring `id`
fake_ks_disk = {
"extra": [
"disk/by-id/fake_scsi_dont_matches",
"disk/by-id/fake_ata_dont_matches"
],
"id": "disk/by-path/pci-fake_path"
}
fake_hu_disk = {
"uspec": {
"DEVLINKS": [
"/dev/disk/by-id/fake_scsi_matches",
"/dev/disk/by-path/pci-fake_path",
"/dev/sdd"
]
}
}
self.assertFalse(nailgun.match_device(fake_hu_disk, fake_ks_disk))
def test_configdrive_scheme(self):
cd_scheme = self.drv.configdrive_scheme()
self.assertEqual(['fake_authorized_key1', 'fake_authorized_key2',
'fake_auth_key'], cd_scheme.common.ssh_auth_keys)
self.assertEqual('node-1.domain.tld', cd_scheme.common.hostname)
self.assertEqual('node-1.domain.tld', cd_scheme.common.fqdn)
self.assertEqual('node-1.domain.tld', cd_scheme.common.fqdn)
self.assertEqual('"10.20.0.2"', cd_scheme.common.name_servers)
self.assertEqual('"domain.tld"', cd_scheme.common.search_domain)
self.assertEqual('10.20.0.2', cd_scheme.common.master_ip)
self.assertEqual('http://10.20.0.2:8000/api',
cd_scheme.common.master_url)
self.assertEqual('08:00:27:79:da:80_eth0,08:00:27:46:43:60_eth1,'
'08:00:27:b1:d7:15_eth2', cd_scheme.common.udevrules)
self.assertEqual('08:00:27:79:da:80', cd_scheme.common.admin_mac)
self.assertEqual('10.20.0.3', cd_scheme.common.admin_ip)
self.assertEqual('255.255.255.0', cd_scheme.common.admin_mask)
self.assertEqual('eth0', cd_scheme.common.admin_iface_name)
self.assertEqual('America/Los_Angeles', cd_scheme.common.timezone)
self.assertEqual('fuel.domain.tld', cd_scheme.puppet.master)
self.assertEqual('unset', cd_scheme.mcollective.pskey)
self.assertEqual('mcollective', cd_scheme.mcollective.vhost)
self.assertEqual('10.20.0.2', cd_scheme.mcollective.host)
self.assertEqual('mcollective', cd_scheme.mcollective.user)
self.assertEqual('marionette', cd_scheme.mcollective.password)
self.assertEqual('rabbitmq', cd_scheme.mcollective.connector)
self.assertEqual('pro_fi-le', cd_scheme.profile)
self.assertEqual({'repo1': 'repo1_url', 'repo2': 'repo2_url'},
cd_scheme.common.ks_repos)
@mock.patch.object(hu, 'list_block_devices')
def test_partition_scheme(self, mock_lbd):
mock_lbd.return_value = LIST_BLOCK_DEVICES_SAMPLE
p_scheme = self.drv.partition_scheme()
self.assertEqual(5, len(p_scheme.fss))
self.assertEqual(4, len(p_scheme.pvs))
self.assertEqual(3, len(p_scheme.lvs))
self.assertEqual(2, len(p_scheme.vgs))
self.assertEqual(1, len(p_scheme.mds))
self.assertEqual(3, len(p_scheme.parteds))
@mock.patch.object(hu, 'list_block_devices')
def test_image_scheme(self, mock_lbd):
mock_lbd.return_value = LIST_BLOCK_DEVICES_SAMPLE
p_scheme = self.drv.partition_scheme()
i_scheme = self.drv.image_scheme(p_scheme)
expected_images = []
for fs in p_scheme.fss:
if fs.mount not in PROVISION_SAMPLE_DATA['ks_meta']['image_data']:
continue
i_data = PROVISION_SAMPLE_DATA['ks_meta']['image_data'][fs.mount]
expected_images.append(image.Image(
uri=i_data['uri'],
target_device=fs.device,
format=i_data['format'],
container=i_data['container'],
))
expected_images = sorted(expected_images, key=lambda x: x.uri)
for i, img in enumerate(sorted(i_scheme.images, key=lambda x: x.uri)):
self.assertEqual(img.uri, expected_images[i].uri)
self.assertEqual(img.target_device,
expected_images[i].target_device)
self.assertEqual(img.format,
expected_images[i].format)
self.assertEqual(img.container,
expected_images[i].container)
def test_getlabel(self):
self.assertEqual('', self.drv._getlabel(None))
long_label = '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.assertEqual(' -L %s ' % long_label[:12],
self.drv._getlabel(long_label))
@mock.patch.object(hu, 'list_block_devices')
def test_disk_dev_not_found(self, mock_lbd):
mock_lbd.return_value = LIST_BLOCK_DEVICES_SAMPLE
fake_ks_disk = {
"name": "fake",
"extra": [
"disk/by-id/fake_scsi_matches",
"disk/by-id/fake_ata_dont_matches"
]
}
self.assertRaises(errors.DiskNotFoundError, self.drv._disk_dev,
fake_ks_disk)
def test_get_partition_count(self):
self.assertEqual(3, self.drv._get_partition_count('Boot'))
self.assertEqual(1, self.drv._get_partition_count('TMP'))
@mock.patch.object(hu, 'list_block_devices')
def test_partition_scheme_ceph(self, mock_lbd):
#TODO(agordeev): perform better testing of ceph logic
p_data = PROVISION_SAMPLE_DATA.copy()
for i in range(0, 3):
p_data['ks_meta']['pm_data']['ks_spaces'][i]['volumes'].append(
CEPH_JOURNAL)
p_data['ks_meta']['pm_data']['ks_spaces'][i]['volumes'].append(
CEPH_DATA)
self.drv = nailgun.Nailgun(p_data)
mock_lbd.return_value = LIST_BLOCK_DEVICES_SAMPLE
p_scheme = self.drv.partition_scheme()
self.assertEqual(5, len(p_scheme.fss))
self.assertEqual(4, len(p_scheme.pvs))
self.assertEqual(3, len(p_scheme.lvs))
self.assertEqual(2, len(p_scheme.vgs))
self.assertEqual(1, len(p_scheme.mds))
self.assertEqual(3, len(p_scheme.parteds))
self.assertEqual(3, self.drv._get_partition_count('ceph'))
#NOTE(agordeev): (-2, -1, -1) is the list of ceph data partition counts
# corresponding to (sda, sdb, sdc) disks respectively.
for disk, part in enumerate((-2, -1, -1)):
self.assertEqual(CEPH_DATA['partition_guid'],
p_scheme.parteds[disk].partitions[part].guid)
|
|
"""Support for Ambient Weather Station Service."""
import asyncio
import logging
from aioambient import Client
from aioambient.errors import WebsocketError
import voluptuous as vol
from homeassistant.components.binary_sensor import DEVICE_CLASS_CONNECTIVITY
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
AREA_SQUARE_METERS,
ATTR_LOCATION,
ATTR_NAME,
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_MILLION,
CONF_API_KEY,
DEGREE,
EVENT_HOMEASSISTANT_STOP,
LIGHT_LUX,
PERCENTAGE,
POWER_WATT,
PRESSURE_INHG,
SPEED_MILES_PER_HOUR,
TEMP_FAHRENHEIT,
)
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_call_later
from .const import (
ATTR_LAST_DATA,
ATTR_MONITORED_CONDITIONS,
CONF_APP_KEY,
DATA_CLIENT,
DOMAIN,
TYPE_BINARY_SENSOR,
TYPE_SENSOR,
)
_LOGGER = logging.getLogger(__name__)
DATA_CONFIG = "config"
DEFAULT_SOCKET_MIN_RETRY = 15
TYPE_24HOURRAININ = "24hourrainin"
TYPE_BAROMABSIN = "baromabsin"
TYPE_BAROMRELIN = "baromrelin"
TYPE_BATT1 = "batt1"
TYPE_BATT10 = "batt10"
TYPE_BATT2 = "batt2"
TYPE_BATT3 = "batt3"
TYPE_BATT4 = "batt4"
TYPE_BATT5 = "batt5"
TYPE_BATT6 = "batt6"
TYPE_BATT7 = "batt7"
TYPE_BATT8 = "batt8"
TYPE_BATT9 = "batt9"
TYPE_BATTOUT = "battout"
TYPE_CO2 = "co2"
TYPE_DAILYRAININ = "dailyrainin"
TYPE_DEWPOINT = "dewPoint"
TYPE_EVENTRAININ = "eventrainin"
TYPE_FEELSLIKE = "feelsLike"
TYPE_HOURLYRAININ = "hourlyrainin"
TYPE_HUMIDITY = "humidity"
TYPE_HUMIDITY1 = "humidity1"
TYPE_HUMIDITY10 = "humidity10"
TYPE_HUMIDITY2 = "humidity2"
TYPE_HUMIDITY3 = "humidity3"
TYPE_HUMIDITY4 = "humidity4"
TYPE_HUMIDITY5 = "humidity5"
TYPE_HUMIDITY6 = "humidity6"
TYPE_HUMIDITY7 = "humidity7"
TYPE_HUMIDITY8 = "humidity8"
TYPE_HUMIDITY9 = "humidity9"
TYPE_HUMIDITYIN = "humidityin"
TYPE_LASTRAIN = "lastRain"
TYPE_MAXDAILYGUST = "maxdailygust"
TYPE_MONTHLYRAININ = "monthlyrainin"
TYPE_RELAY1 = "relay1"
TYPE_RELAY10 = "relay10"
TYPE_RELAY2 = "relay2"
TYPE_RELAY3 = "relay3"
TYPE_RELAY4 = "relay4"
TYPE_RELAY5 = "relay5"
TYPE_RELAY6 = "relay6"
TYPE_RELAY7 = "relay7"
TYPE_RELAY8 = "relay8"
TYPE_RELAY9 = "relay9"
TYPE_SOILHUM1 = "soilhum1"
TYPE_SOILHUM10 = "soilhum10"
TYPE_SOILHUM2 = "soilhum2"
TYPE_SOILHUM3 = "soilhum3"
TYPE_SOILHUM4 = "soilhum4"
TYPE_SOILHUM5 = "soilhum5"
TYPE_SOILHUM6 = "soilhum6"
TYPE_SOILHUM7 = "soilhum7"
TYPE_SOILHUM8 = "soilhum8"
TYPE_SOILHUM9 = "soilhum9"
TYPE_SOILTEMP1F = "soiltemp1f"
TYPE_SOILTEMP10F = "soiltemp10f"
TYPE_SOILTEMP2F = "soiltemp2f"
TYPE_SOILTEMP3F = "soiltemp3f"
TYPE_SOILTEMP4F = "soiltemp4f"
TYPE_SOILTEMP5F = "soiltemp5f"
TYPE_SOILTEMP6F = "soiltemp6f"
TYPE_SOILTEMP7F = "soiltemp7f"
TYPE_SOILTEMP8F = "soiltemp8f"
TYPE_SOILTEMP9F = "soiltemp9f"
TYPE_SOLARRADIATION = "solarradiation"
TYPE_SOLARRADIATION_LX = "solarradiation_lx"
TYPE_TEMP10F = "temp10f"
TYPE_TEMP1F = "temp1f"
TYPE_TEMP2F = "temp2f"
TYPE_TEMP3F = "temp3f"
TYPE_TEMP4F = "temp4f"
TYPE_TEMP5F = "temp5f"
TYPE_TEMP6F = "temp6f"
TYPE_TEMP7F = "temp7f"
TYPE_TEMP8F = "temp8f"
TYPE_TEMP9F = "temp9f"
TYPE_TEMPF = "tempf"
TYPE_TEMPINF = "tempinf"
TYPE_TOTALRAININ = "totalrainin"
TYPE_UV = "uv"
TYPE_PM25 = "pm25"
TYPE_PM25_24H = "pm25_24h"
TYPE_WEEKLYRAININ = "weeklyrainin"
TYPE_WINDDIR = "winddir"
TYPE_WINDDIR_AVG10M = "winddir_avg10m"
TYPE_WINDDIR_AVG2M = "winddir_avg2m"
TYPE_WINDGUSTDIR = "windgustdir"
TYPE_WINDGUSTMPH = "windgustmph"
TYPE_WINDSPDMPH_AVG10M = "windspdmph_avg10m"
TYPE_WINDSPDMPH_AVG2M = "windspdmph_avg2m"
TYPE_WINDSPEEDMPH = "windspeedmph"
TYPE_YEARLYRAININ = "yearlyrainin"
SENSOR_TYPES = {
TYPE_24HOURRAININ: ("24 Hr Rain", "in", TYPE_SENSOR, None),
TYPE_BAROMABSIN: ("Abs Pressure", PRESSURE_INHG, TYPE_SENSOR, "pressure"),
TYPE_BAROMRELIN: ("Rel Pressure", PRESSURE_INHG, TYPE_SENSOR, "pressure"),
TYPE_BATT10: ("Battery 10", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT1: ("Battery 1", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT2: ("Battery 2", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT3: ("Battery 3", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT4: ("Battery 4", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT5: ("Battery 5", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT6: ("Battery 6", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT7: ("Battery 7", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT8: ("Battery 8", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT9: ("Battery 9", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATTOUT: ("Battery", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_CO2: ("co2", CONCENTRATION_PARTS_PER_MILLION, TYPE_SENSOR, None),
TYPE_DAILYRAININ: ("Daily Rain", "in", TYPE_SENSOR, None),
TYPE_DEWPOINT: ("Dew Point", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_EVENTRAININ: ("Event Rain", "in", TYPE_SENSOR, None),
TYPE_FEELSLIKE: ("Feels Like", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_HOURLYRAININ: ("Hourly Rain Rate", "in/hr", TYPE_SENSOR, None),
TYPE_HUMIDITY10: ("Humidity 10", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY1: ("Humidity 1", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY2: ("Humidity 2", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY3: ("Humidity 3", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY4: ("Humidity 4", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY5: ("Humidity 5", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY6: ("Humidity 6", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY7: ("Humidity 7", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY8: ("Humidity 8", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY9: ("Humidity 9", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY: ("Humidity", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITYIN: ("Humidity In", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_LASTRAIN: ("Last Rain", None, TYPE_SENSOR, "timestamp"),
TYPE_MAXDAILYGUST: ("Max Gust", SPEED_MILES_PER_HOUR, TYPE_SENSOR, None),
TYPE_MONTHLYRAININ: ("Monthly Rain", "in", TYPE_SENSOR, None),
TYPE_RELAY10: ("Relay 10", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY1: ("Relay 1", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY2: ("Relay 2", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY3: ("Relay 3", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY4: ("Relay 4", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY5: ("Relay 5", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY6: ("Relay 6", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY7: ("Relay 7", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY8: ("Relay 8", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY9: ("Relay 9", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_SOILHUM10: ("Soil Humidity 10", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM1: ("Soil Humidity 1", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM2: ("Soil Humidity 2", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM3: ("Soil Humidity 3", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM4: ("Soil Humidity 4", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM5: ("Soil Humidity 5", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM6: ("Soil Humidity 6", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM7: ("Soil Humidity 7", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM8: ("Soil Humidity 8", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM9: ("Soil Humidity 9", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILTEMP10F: ("Soil Temp 10", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP1F: ("Soil Temp 1", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP2F: ("Soil Temp 2", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP3F: ("Soil Temp 3", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP4F: ("Soil Temp 4", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP5F: ("Soil Temp 5", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP6F: ("Soil Temp 6", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP7F: ("Soil Temp 7", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP8F: ("Soil Temp 8", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP9F: ("Soil Temp 9", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_SOLARRADIATION: (
"Solar Rad",
f"{POWER_WATT}/{AREA_SQUARE_METERS}",
TYPE_SENSOR,
None,
),
TYPE_SOLARRADIATION_LX: ("Solar Rad (lx)", LIGHT_LUX, TYPE_SENSOR, "illuminance"),
TYPE_TEMP10F: ("Temp 10", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_TEMP1F: ("Temp 1", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_TEMP2F: ("Temp 2", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_TEMP3F: ("Temp 3", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_TEMP4F: ("Temp 4", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_TEMP5F: ("Temp 5", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_TEMP6F: ("Temp 6", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_TEMP7F: ("Temp 7", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_TEMP8F: ("Temp 8", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_TEMP9F: ("Temp 9", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_TEMPF: ("Temp", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_TEMPINF: ("Inside Temp", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_TOTALRAININ: ("Lifetime Rain", "in", TYPE_SENSOR, None),
TYPE_UV: ("uv", "Index", TYPE_SENSOR, None),
TYPE_PM25: ("PM25", CONCENTRATION_MICROGRAMS_PER_CUBIC_METER, TYPE_SENSOR, None),
TYPE_PM25_24H: (
"PM25 24h Avg",
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
TYPE_SENSOR,
None,
),
TYPE_WEEKLYRAININ: ("Weekly Rain", "in", TYPE_SENSOR, None),
TYPE_WINDDIR: ("Wind Dir", DEGREE, TYPE_SENSOR, None),
TYPE_WINDDIR_AVG10M: ("Wind Dir Avg 10m", DEGREE, TYPE_SENSOR, None),
TYPE_WINDDIR_AVG2M: ("Wind Dir Avg 2m", SPEED_MILES_PER_HOUR, TYPE_SENSOR, None),
TYPE_WINDGUSTDIR: ("Gust Dir", DEGREE, TYPE_SENSOR, None),
TYPE_WINDGUSTMPH: ("Wind Gust", SPEED_MILES_PER_HOUR, TYPE_SENSOR, None),
TYPE_WINDSPDMPH_AVG10M: ("Wind Avg 10m", SPEED_MILES_PER_HOUR, TYPE_SENSOR, None),
TYPE_WINDSPDMPH_AVG2M: ("Wind Avg 2m", SPEED_MILES_PER_HOUR, TYPE_SENSOR, None),
TYPE_WINDSPEEDMPH: ("Wind Speed", SPEED_MILES_PER_HOUR, TYPE_SENSOR, None),
TYPE_YEARLYRAININ: ("Yearly Rain", "in", TYPE_SENSOR, None),
}
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_APP_KEY): cv.string,
vol.Required(CONF_API_KEY): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Ambient PWS component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_CLIENT] = {}
if DOMAIN not in config:
return True
conf = config[DOMAIN]
# Store config for use during entry setup:
hass.data[DOMAIN][DATA_CONFIG] = conf
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_API_KEY: conf[CONF_API_KEY], CONF_APP_KEY: conf[CONF_APP_KEY]},
)
)
return True
async def async_setup_entry(hass, config_entry):
"""Set up the Ambient PWS as config entry."""
if not config_entry.unique_id:
hass.config_entries.async_update_entry(
config_entry, unique_id=config_entry.data[CONF_APP_KEY]
)
session = aiohttp_client.async_get_clientsession(hass)
try:
ambient = AmbientStation(
hass,
config_entry,
Client(
config_entry.data[CONF_API_KEY],
config_entry.data[CONF_APP_KEY],
session=session,
),
)
hass.loop.create_task(ambient.ws_connect())
hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id] = ambient
except WebsocketError as err:
_LOGGER.error("Config entry failed: %s", err)
raise ConfigEntryNotReady from err
async def _async_disconnect_websocket(*_):
await ambient.client.websocket.disconnect()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _async_disconnect_websocket)
return True
async def async_unload_entry(hass, config_entry):
"""Unload an Ambient PWS config entry."""
ambient = hass.data[DOMAIN][DATA_CLIENT].pop(config_entry.entry_id)
hass.async_create_task(ambient.ws_disconnect())
tasks = [
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in ("binary_sensor", "sensor")
]
await asyncio.gather(*tasks)
return True
async def async_migrate_entry(hass, config_entry):
"""Migrate old entry."""
version = config_entry.version
_LOGGER.debug("Migrating from version %s", version)
# 1 -> 2: Unique ID format changed, so delete and re-import:
if version == 1:
dev_reg = await hass.helpers.device_registry.async_get_registry()
dev_reg.async_clear_config_entry(config_entry)
en_reg = await hass.helpers.entity_registry.async_get_registry()
en_reg.async_clear_config_entry(config_entry)
version = config_entry.version = 2
hass.config_entries.async_update_entry(config_entry)
_LOGGER.info("Migration to version %s successful", version)
return True
class AmbientStation:
"""Define a class to handle the Ambient websocket."""
def __init__(self, hass, config_entry, client):
"""Initialize."""
self._config_entry = config_entry
self._entry_setup_complete = False
self._hass = hass
self._ws_reconnect_delay = DEFAULT_SOCKET_MIN_RETRY
self.client = client
self.stations = {}
async def _attempt_connect(self):
"""Attempt to connect to the socket (retrying later on fail)."""
async def connect(timestamp=None):
"""Connect."""
await self.client.websocket.connect()
try:
await connect()
except WebsocketError as err:
_LOGGER.error("Error with the websocket connection: %s", err)
self._ws_reconnect_delay = min(2 * self._ws_reconnect_delay, 480)
async_call_later(self._hass, self._ws_reconnect_delay, connect)
async def ws_connect(self):
"""Register handlers and connect to the websocket."""
def on_connect():
"""Define a handler to fire when the websocket is connected."""
_LOGGER.info("Connected to websocket")
def on_data(data):
"""Define a handler to fire when the data is received."""
mac_address = data["macAddress"]
if data != self.stations[mac_address][ATTR_LAST_DATA]:
_LOGGER.debug("New data received: %s", data)
self.stations[mac_address][ATTR_LAST_DATA] = data
async_dispatcher_send(
self._hass, f"ambient_station_data_update_{mac_address}"
)
def on_disconnect():
"""Define a handler to fire when the websocket is disconnected."""
_LOGGER.info("Disconnected from websocket")
def on_subscribed(data):
"""Define a handler to fire when the subscription is set."""
for station in data["devices"]:
if station["macAddress"] in self.stations:
continue
_LOGGER.debug("New station subscription: %s", data)
# Only create entities based on the data coming through the socket.
# If the user is monitoring brightness (in W/m^2), make sure we also
# add a calculated sensor for the same data measured in lx:
monitored_conditions = [
k for k in station["lastData"] if k in SENSOR_TYPES
]
if TYPE_SOLARRADIATION in monitored_conditions:
monitored_conditions.append(TYPE_SOLARRADIATION_LX)
self.stations[station["macAddress"]] = {
ATTR_LAST_DATA: station["lastData"],
ATTR_LOCATION: station.get("info", {}).get("location"),
ATTR_MONITORED_CONDITIONS: monitored_conditions,
ATTR_NAME: station.get("info", {}).get(
"name", station["macAddress"]
),
}
# If the websocket disconnects and reconnects, the on_subscribed
# handler will get called again; in that case, we don't want to
# attempt forward setup of the config entry (because it will have
# already been done):
if not self._entry_setup_complete:
for component in ("binary_sensor", "sensor"):
self._hass.async_create_task(
self._hass.config_entries.async_forward_entry_setup(
self._config_entry, component
)
)
self._entry_setup_complete = True
self._ws_reconnect_delay = DEFAULT_SOCKET_MIN_RETRY
self.client.websocket.on_connect(on_connect)
self.client.websocket.on_data(on_data)
self.client.websocket.on_disconnect(on_disconnect)
self.client.websocket.on_subscribed(on_subscribed)
await self._attempt_connect()
async def ws_disconnect(self):
"""Disconnect from the websocket."""
await self.client.websocket.disconnect()
class AmbientWeatherEntity(Entity):
"""Define a base Ambient PWS entity."""
def __init__(
self, ambient, mac_address, station_name, sensor_type, sensor_name, device_class
):
"""Initialize the sensor."""
self._ambient = ambient
self._device_class = device_class
self._mac_address = mac_address
self._sensor_name = sensor_name
self._sensor_type = sensor_type
self._state = None
self._station_name = station_name
@property
def available(self):
"""Return True if entity is available."""
# Since the solarradiation_lx sensor is created only if the
# user shows a solarradiation sensor, ensure that the
# solarradiation_lx sensor shows as available if the solarradiation
# sensor is available:
if self._sensor_type == TYPE_SOLARRADIATION_LX:
return (
self._ambient.stations[self._mac_address][ATTR_LAST_DATA].get(
TYPE_SOLARRADIATION
)
is not None
)
return (
self._ambient.stations[self._mac_address][ATTR_LAST_DATA].get(
self._sensor_type
)
is not None
)
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def device_info(self):
"""Return device registry information for this entity."""
return {
"identifiers": {(DOMAIN, self._mac_address)},
"name": self._station_name,
"manufacturer": "Ambient Weather",
}
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._station_name}_{self._sensor_name}"
@property
def should_poll(self):
"""Disable polling."""
return False
@property
def unique_id(self):
"""Return a unique, unchanging string that represents this sensor."""
return f"{self._mac_address}_{self._sensor_type}"
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def update():
"""Update the state."""
self.update_from_latest_data()
self.async_write_ha_state()
self.async_on_remove(
async_dispatcher_connect(
self.hass, f"ambient_station_data_update_{self._mac_address}", update
)
)
self.update_from_latest_data()
@callback
def update_from_latest_data(self):
"""Update the entity from the latest data."""
raise NotImplementedError
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library of common shape functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six.moves
from tensorflow.core.framework import types_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import cpp_shape_inference_pb2
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
def scalar_shape(unused_op):
"""Shape function for ops that output a scalar value."""
return [tensor_shape.scalar()]
def unchanged_shape(op):
"""Shape function for ops that output an tensor like their first input."""
return [op.inputs[0].get_shape()]
def unchanged_shape_with_rank(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: The exact rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank(rank)]
return _ShapeFunction
def unchanged_shape_with_rank_at_least(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: A lower bound on the rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank_at_least(rank)]
return _ShapeFunction
def unchanged_shape_with_rank_at_most(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: An upper bound on the rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank_at_most(rank)]
return _ShapeFunction
def matmul_shape(op):
"""Shape function for a MatMul op."""
a_shape = op.inputs[0].get_shape().with_rank(2)
transpose_a = op.get_attr("transpose_a")
b_shape = op.inputs[1].get_shape().with_rank(2)
transpose_b = op.get_attr("transpose_b")
output_rows = a_shape[1] if transpose_a else a_shape[0]
output_cols = b_shape[0] if transpose_b else b_shape[1]
inner_a = a_shape[0] if transpose_a else a_shape[1]
inner_b = b_shape[1] if transpose_b else b_shape[0]
inner_a.assert_is_compatible_with(inner_b)
return [tensor_shape.TensorShape([output_rows, output_cols])]
def get_conv_output_size(input_size, filter_size, strides, padding_type):
"""Returns the spatial size of a n-d convolution/pooling output."""
input_size = tuple([tensor_shape.as_dimension(x).value for x in input_size])
filter_size = tuple([tensor_shape.as_dimension(x).value for x in filter_size])
strides = [int(x) for x in strides]
if all(x == 1 for x in input_size) and all(x == 1 for x in filter_size):
return input_size
if any(x is not None and y is not None and x > y for x, y in
zip(filter_size, input_size)):
raise ValueError("Filter must not be larger than the input: "
"Filter: %r Input: %r" % (filter_size, input_size))
if padding_type == b"VALID":
def _valid(in_dim, k_dim, s_dim):
if in_dim is not None and k_dim is not None:
return (in_dim - k_dim + s_dim) // s_dim
else:
return None
output_size = [
_valid(in_dim, k_dim, s_dim)
for in_dim, k_dim, s_dim in zip(input_size, filter_size, strides)
]
elif padding_type == b"SAME":
def _same(in_dim, s_dim):
if in_dim is not None:
return (in_dim + s_dim - 1) // s_dim
else:
return None
output_size = [_same(in_dim, s_dim)
for in_dim, s_dim in zip(input_size, strides)]
else:
raise ValueError("Invalid padding: %r" % padding_type)
return tuple(output_size)
def get2d_conv_output_size(input_height, input_width, filter_height,
filter_width, row_stride, col_stride, padding_type):
"""Returns the number of rows and columns in a convolution/pooling output."""
return get_conv_output_size((input_height, input_width),
(filter_height, filter_width),
(row_stride, col_stride), padding_type)
def conv2d_shape(op):
"""Shape function for a Conv2D op.
This op has two inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* filter, a 4D tensor with shape = [filter_rows, filter_cols,
depth_in, depth_out]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "padding" and "strides" attrs.
Args:
op: A Conv2D Operation.
Returns:
A list containing the Shape of the Conv2D output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
filter_shape = op.inputs[1].get_shape().with_rank(4)
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
if data_format == b"NCHW":
# Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = filter_shape[0]
filter_cols = filter_shape[1]
depth_out = filter_shape[3]
# Check that the input depths are compatible.
input_shape[3].assert_is_compatible_with(filter_shape[2])
if data_format == b"NCHW":
stride_b, stride_d, stride_r, stride_c = op.get_attr("strides")
else:
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,
filter_cols, stride_r, stride_c,
padding)
output_shape = [batch_size, out_rows, out_cols, depth_out]
if data_format == b"NCHW":
# Convert output shape back to NCHW.
output_shape = [output_shape[0], output_shape[3], output_shape[1],
output_shape[2]]
return [tensor_shape.TensorShape(output_shape)]
def depthwise_conv2d_native_shape(op):
"""Shape function for a DepthwiseConv2D op.
This op has two inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* filter, a 4D tensor with shape = [filter_rows, filter_cols,
depth_in, depthwise_multiplier]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_in*depthwise_multiplier], where out_rows and out_cols depend
on the value of the op's "padding" and "strides" attrs.
Args:
op: A DepthwiseConv2dNative Operation.
Returns:
A list containing the Shape of the DepthwiseConv2DNative output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
filter_shape = op.inputs[1].get_shape().with_rank(4)
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = filter_shape[0]
filter_cols = filter_shape[1]
depth_out = filter_shape[3] * filter_shape[2]
# Check that the input depths are compatible.
input_shape[3].assert_is_compatible_with(filter_shape[2])
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
if stride_r != stride_c:
# TODO(shlens): Add support for this.
raise ValueError("Current implementation only supports equal length "
"strides in the row and column dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
stride = stride_r
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,
filter_cols, stride, stride,
padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth_out])]
def separable_conv2d_shape(op):
"""Shape function for a SeparableConv2D op.
This op has three inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* depthwise_filter, a 4D tensor with shape = [filter_rows,
filter_cols, depth_in, depth_multiplier]
* pointwise_filter, a 4D tensor with shape = [1, 1, depth_in *
depth_multiplier, depth_out]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "padding" and "strides" attrs.
Args:
op: A SeparableConv2D Operation.
Returns:
A list containing the Shape of the SeparableConv2D output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
depthwise_filter_shape = op.inputs[1].get_shape().merge_with(
tensor_shape.TensorShape([None, None, input_shape[3], None]))
pointwise_depth_in = depthwise_filter_shape[2] * depthwise_filter_shape[3]
pointwise_filter_shape = op.inputs[2].get_shape().merge_with(
tensor_shape.TensorShape([1, 1, pointwise_depth_in, None]))
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = depthwise_filter_shape[0]
filter_cols = depthwise_filter_shape[1]
depth_out = pointwise_filter_shape[3]
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
if stride_r != stride_c:
# TODO(shlens): Add support for this.
raise ValueError("Current implementation only supports equal length "
"strides in the row and column dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
stride = stride_r
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,
filter_cols, stride, stride,
padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth_out])]
def avg_pool_shape(op):
"""Shape function for an AvgPool op.
This op has one input:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "ksize", "strides", and "padding" attrs.
Args:
op: An AvgPool Operation.
Returns:
A single-element list containing the Shape of the AvgPool output.
Raises:
ValueError: If the shape of the input is invalid or incompatible with
the values of the attrs.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
if data_format == b"NCHW":
# Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
if data_format == b"NCHW":
ksize_b, ksize_d, ksize_r, ksize_c = op.get_attr("ksize")
stride_b, stride_d, stride_r, stride_c = op.get_attr("strides")
else:
ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksize")
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
depth = input_shape[3]
if ksize_b != 1 or ksize_d != 1:
raise ValueError("Current implementation does not support pooling "
"in the batch and depth dimensions.")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not support strides "
"in the batch and depth dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, ksize_r,
ksize_c, stride_r, stride_c,
padding)
output_shape = [batch_size, out_rows, out_cols, depth]
if data_format == b"NCHW":
# Convert output shape back to NCHW.
output_shape = [output_shape[0], output_shape[3], output_shape[1],
output_shape[2]]
return [tensor_shape.TensorShape(output_shape)]
def max_pool_shape(op):
"""Shape function for a MaxPool op.
This op has one input:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows, out_cols, and depth_out depend
on the value of the op's "ksize", "strides", and "padding" attrs.
Args:
op: A MaxPool Operation.
Returns:
A single-element list containing the Shape of the MaxPool output.
Raises:
ValueError: If the shape of the input is invalid or incompatible with
the values of the attrs.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
if data_format == b"NCHW":
# Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
if data_format == b"NCHW":
ksize_b, ksize_d, ksize_r, ksize_c = op.get_attr("ksize")
stride_b, stride_d, stride_r, stride_c = op.get_attr("strides")
else:
ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksize")
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
depth = input_shape[3]
if ksize_b != 1:
raise ValueError("Current implementation does not support pooling "
"in the batch dimension.")
if stride_b != 1:
raise ValueError("Current implementation does not support strides "
"in the batch dimension.")
if not ((ksize_r == 1 and ksize_c == 1) or ksize_d == 1):
raise ValueError("MaxPooling supports exactly one of pooling across depth "
"or pooling across width/height.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
if ksize_d == 1:
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, ksize_r,
ksize_c, stride_r, stride_c,
padding)
output_shape = [batch_size, out_rows, out_cols, depth]
else:
if depth % ksize_d > 0:
raise ValueError("Depthwise max pooling requires the depth window "
"to evenly divide the input depth.")
if stride_d != ksize_d:
raise ValueError("Depthwise max pooling requires the depth window "
"to equal the depth stride.")
output_shape = [batch_size, in_rows, in_cols, depth // ksize_d]
if data_format == b"NCHW":
# Convert output shape back to NCHW.
output_shape = [output_shape[0], output_shape[3], output_shape[1],
output_shape[2]]
return [tensor_shape.TensorShape(output_shape)]
def no_outputs(unused_op):
"""Shape function for use with ops that have no outputs."""
return []
def unknown_shape(op):
"""Shape function for use with ops whose output shapes are unknown."""
return [tensor_shape.unknown_shape() for _ in op.outputs]
def broadcast_shape(shape_x, shape_y):
"""Returns the broadcasted shape between `shape_x` and `shape_y`.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
A `TensorShape` representing the broadcasted shape.
Raises:
ValueError: If the two shapes can not be broadcasted.
"""
if shape_x.ndims is None or shape_y.ndims is None:
return tensor_shape.unknown_shape()
# To compute the broadcasted dimensions, we zip together shape_x and shape_y,
# and pad with 1 to make them the same length.
broadcasted_dims = reversed(list(six.moves.zip_longest(
reversed(shape_x.dims),
reversed(shape_y.dims),
fillvalue=tensor_shape.Dimension(1))))
# Next we combine the dimensions according to the numpy broadcasting rules.
# http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
return_dims = []
for (dim_x, dim_y) in broadcasted_dims:
if dim_x.value is None or dim_y.value is None:
# One or both dimensions is unknown. If either dimension is greater than
# 1, we assume that the program is correct, and the other dimension will
# be broadcast to match it.
# TODO(mrry): If we eliminate the shape checks in C++, we must still
# assert that the unknown dim is either 1 or the same as the known dim.
if dim_x.value is not None and dim_x.value > 1:
return_dims.append(dim_x)
elif dim_y.value is not None and dim_y.value > 1:
return_dims.append(dim_y)
else:
return_dims.append(None)
elif dim_x.value == 1:
# We will broadcast dim_x to dim_y.
return_dims.append(dim_y)
elif dim_y.value == 1:
# We will broadcast dim_y to dim_x.
return_dims.append(dim_x)
elif dim_x.value == dim_y.value:
# The dimensions are compatible, so output is the same size in that
# dimension.
return_dims.append(dim_x.merge_with(dim_y))
else:
raise ValueError("Incompatible shapes for broadcasting: %s and %s"
% (shape_x, shape_y))
return tensor_shape.TensorShape(return_dims)
def call_cpp_shape_fn(op,
input_tensors_needed=None,
input_tensors_as_shapes_needed=None,
debug_python_shape_fn=None,
require_shape_fn=True):
"""A shape function that delegates to the registered C++ shape function.
Args:
op: the node in the graph for which to compute output shapes.
input_tensors_needed: a list of input tensor indices for which to compute
the input tensor's value and pass to the C++ shape function.
input_tensors_as_shapes_needed: a list of input tensor indices for which to
compute the constant_value_as_shape and pass to the C++ shape function.
debug_python_shape_fn: For testing only during migration to using
call_cpp_shape_fn. Do not submit calls that set this,
as the comparison is slow. If non-None, the python shape function;
this function will be called and its output compared to that of
the C++ shape function.
require_shape_fn: If true, and the C++ shape function is not registered
in the current binary then an exception is raised; otherwise, if the
C++ shape function is not registered then unknown_shape is used.
Returns:
A dictionary with the following keys:
shapes: A TensorShape list of the output shapes of the op, as computed
using the C++ shape inference function registered for the op.
handle_shapes: A TensorShape list of the shapes for handle outputs, if
any.
handle_dtypes: A list of DataType enums for the handle outputs, if any.
Raises:
ValueError: If the C++ shape function returned an error (e.g. because the
shapes of the inputs are of the wrong rank or otherwise incompatible
according to the shape function).
RuntimeError: If the C++ shape function is not registered and
<require_shape_fn> is True.
"""
if op.type == "Const":
# To avoid serializing large constants, we special-case constant
# here, even though it has a C++ shape function. When Python
# calls the C / C-API directly, we should be able to remove this.
return {
"shapes": [tensor_shape.TensorShape(op.get_attr("value").tensor_shape)],
"handle_shapes": [tensor_shape.TensorShape(None).as_proto()],
"handle_dtypes": [types_pb2.DT_INVALID]
}
input_tensors_needed = input_tensors_needed or []
input_tensors_as_shapes_needed = input_tensors_as_shapes_needed or []
while True:
res = _call_cpp_shape_fn_impl(op, input_tensors_needed,
input_tensors_as_shapes_needed,
debug_python_shape_fn, require_shape_fn)
if not isinstance(res, dict):
# Handles the case where _call_cpp_shape_fn_impl calls unknown_shape(op).
return res
# See if we need to evaluate some inputs.
if not res["inputs_needed"]:
return res
p = cpp_shape_inference_pb2.CppShapeInferenceInputsNeeded()
p = p.FromString(res["inputs_needed"])
changed = False
for idx in p.input_tensors_needed:
if idx not in input_tensors_needed:
input_tensors_needed.append(idx)
changed = True
for idx in p.input_tensors_as_shapes_needed:
if idx not in input_tensors_as_shapes_needed:
input_tensors_as_shapes_needed.append(idx)
changed = True
if not changed:
return res
def _call_cpp_shape_fn_impl(
op, input_tensors_needed,
input_tensors_as_shapes_needed,
debug_python_shape_fn, require_shape_fn):
"""Core implementaton of call_cpp_shape_fn."""
node_def_str = op.node_def.SerializeToString()
def tensor_to_inference_result(t):
r = cpp_shape_inference_pb2.CppShapeInferenceResult()
r.shape.CopyFrom(t.get_shape().as_proto())
# pylint: disable=protected-access
r.handle_shape.CopyFrom(t._handle_shape)
r.handle_dtype = t._handle_dtype
# pylint: enable=protected-access
return r.SerializeToString()
input_shapes = [tensor_to_inference_result(i) for i in op.inputs]
input_tensors = [None for i in input_shapes]
for idx in input_tensors_needed:
v = tensor_util.constant_value(op.inputs[idx])
if v is not None:
input_tensors[idx] = np.asarray(v)
serialized_unknown_shape = (
tensor_shape.TensorShape(None).as_proto().SerializeToString())
arr = [serialized_unknown_shape for i in input_shapes]
for idx in input_tensors_as_shapes_needed:
s = tensor_util.constant_value_as_shape(op.inputs[idx])
if s is not None:
arr[idx] = s.as_proto().SerializeToString()
input_tensors_as_shapes = arr
missing_shape_fn = False
try:
with errors.raise_exception_on_not_ok_status() as status:
output = pywrap_tensorflow.RunCppShapeInference(
node_def_str, input_shapes, input_tensors, input_tensors_as_shapes,
status)
except errors.InvalidArgumentError as err:
if err.message.startswith("No shape inference function exists for op"):
missing_shape_fn = True
else:
raise ValueError(err.message)
if missing_shape_fn:
if require_shape_fn:
raise RuntimeError(
"No C++ shape function registered for standard op: %s" % op.type)
return unknown_shape(op)
output_shapes = output[:-1]
# Convert TensorShapeProto values in output_shapes.
result_protos = [
cpp_shape_inference_pb2.CppShapeInferenceResult().FromString(s)
for s in output_shapes
]
result = [r.shape for r in result_protos]
result_handle_shapes = [r.handle_shape for r in result_protos]
result_handle_dtypes = [r.handle_dtype for r in result_protos]
if debug_python_shape_fn:
try:
python_result = [tensor_shape.as_shape(s)
for s in debug_python_shape_fn(op)]
except Exception as err:
raise AssertionError("Python shape function return error but "
"C++ shape functon did not: %s" % str(err))
result_as_shapes = [tensor_shape.as_shape(s) for s in result]
if str(result_as_shapes) != str(python_result):
raise ValueError(
("Python vs CPP shape mismatch. "
"CPP: %s vs python: %s on node %s "
"with input shapes %s") % (
str(result_as_shapes), str(python_result), str(op.node_def),
",".join([str(i.get_shape()) for i in op.inputs])))
return {"shapes": result,
"handle_shapes": result_handle_shapes,
"handle_dtypes": result_handle_dtypes,
"inputs_needed": output[-1]}
# pylint: disable=protected-access
ops._set_call_cpp_shape_fn(call_cpp_shape_fn)
# pylint: enable=protected-access
|
|
"""
Instead of the change detector looking at each revision for an item
what i want here, is to compare the current state of an item's key/value pairs that I define, with another
set of data (a reference dataset, from an owl/obographs json file)
Steps:
- Does a sparql query against wikidata to get all mesh IDs on all items with a DOID. Looks for a mapping relation type (P4390)
if available. If no mapping rel type is specified, default to oboInOwl:hasDbXref
- Sparql query against the latest doid.owl release file looking for mesh terms using the relations:
{skos:closeMatch skos:narrowMatch skos:broadMatch skos:relatedMatch skos:exactMatch oboInOwl:hasDbXref}
- Compare the mesh IDs on wd vs whats in DO. Returns a table listing all of the differences
"""
import subprocess
from collections import defaultdict
import pandas as pd
import requests
from rdflib import Graph
from rdflib import URIRef, Literal
from tqdm import tqdm
from wikidataintegrator.wdi_core import WDItemEngine
from wikidataintegrator.wdi_helpers import id_mapper
BIOPORTAL_KEY = "a1ac23bb-23cb-44cf-bf5e-bcdd7446ef37"
DOID_QID = id_mapper("P699")
DO_OWL_PATH = "doid.owl"
QID_MAP_REL_TYPE_CURIE = {'Q39893184': 'skos:closeMatch',
'Q39893967': 'skos:narrowMatch',
'Q39894595': 'skos:broadMatch',
'Q39894604': 'skos:relatedMatch',
'Q39893449': 'skos:exactMatch'}
QID_MAP_REL_TYPE_CURIE = defaultdict(lambda: "oboInOwl:hasDbXref", QID_MAP_REL_TYPE_CURIE)
"""
MAP_REL_TYPE_QID = {'http://www.w3.org/2004/02/skos/core#broadMatch': 'Q39894595',
'http://www.w3.org/2004/02/skos/core#closeMatch': 'Q39893184',
'http://www.w3.org/2004/02/skos/core#exactMatch': 'Q39893449',
'http://www.w3.org/2004/02/skos/core#narrowMatch': 'Q39893967',
'http://www.w3.org/2004/02/skos/core#relatedMatch': 'Q39894604'}
"""
PREFIX_TO_CURIE = {
'http://www.w3.org/2004/02/skos/core#': 'skos',
'http://www.geneontology.org/formats/oboInOwl#': 'oboInOwl'
}
purl_to_curie = lambda s: s.replace("http://purl.obolibrary.org/obo/", "").replace("_", ":")
curie_to_purl = lambda s: "http://purl.obolibrary.org/obo/" + s.replace(":", "_")
def get_wikidata_do_mesh():
# get mesh xrefs, and including mapping relation type
# {'DOID:0050856': {'skos:broadMatch_D019958'}}
query = """
select ?item ?doid ?mesh ?mesh_rt where {
?item wdt:P699 ?doid .
?item p:P486 ?mesh_s .
?mesh_s ps:P486 ?mesh .
optional { ?mesh_s pq:P4390 ?mesh_rt }
}"""
results = WDItemEngine.execute_sparql_query(query)['results']['bindings']
results = [{k: v['value'].replace("http://www.wikidata.org/entity/", "") for k, v in item.items()} for item in
results]
df = pd.DataFrame(results)
df['mesh_rt'] = df.apply(lambda row: QID_MAP_REL_TYPE_CURIE[row.mesh_rt] + "_MESH:" + row.mesh, axis=1)
df['_item'] = df['item']
r = df.groupby("_item").aggregate(lambda x: set(y for y in x if not pd.isnull(y))).to_dict("records")
wd = {list(x['doid'])[0]: x for x in r}
wd = {k: v['mesh_rt'] for k, v in wd.items()}
wd = {k: v for k, v in wd.items() if v}
return wd
def getConceptLabel(qid):
return getConceptLabels((qid,))[qid]
def getConceptLabels(qids):
qids = "|".join({qid.replace("wd:", "") if qid.startswith("wd:") else qid for qid in qids})
params = {'action': 'wbgetentities', 'ids': qids, 'languages': 'en', 'format': 'json', 'props': 'labels'}
r = requests.get("https://www.wikidata.org/w/api.php", params=params)
print(r.url)
r.raise_for_status()
wd = r.json()['entities']
return {k: v['labels']['en']['value'] for k, v in wd.items()}
def get_do_metadata():
# from the do owl file, get do labels, descriptions
g = Graph()
g.parse(DO_OWL_PATH)
disease_ontology = Literal('disease_ontology', datatype=URIRef('http://www.w3.org/2001/XMLSchema#string'))
query = """
SELECT * WHERE {
?id oboInOwl:hasOBONamespace ?disease_ontology .
?id rdfs:label ?label .
OPTIONAL {?id obo:IAO_0000115 ?descr}
FILTER NOT EXISTS {?id owl:deprecated ?dep}
}
"""
rows = g.query(query, initBindings={'disease_ontology': disease_ontology})
res = [{str(k): str(v) for k, v in binding.items()} for binding in rows.bindings]
df = pd.DataFrame(res)
df.drop_duplicates(subset=['id'], inplace=True)
df.fillna("", inplace=True)
do = df.to_dict("records")
do = {purl_to_curie(x['id']): x for x in do}
return do
def parse_do_owl():
"""
Parse xrefs and skos matches from owl file.
Returns dict. key: doid curie, value: set of xrefs in the format: relation type + "_" + xref. (ex: oboInOwl:hasDbXref_MESH:D007690)
:return:
"""
g = Graph()
g.parse(DO_OWL_PATH)
disease_ontology = Literal('disease_ontology', datatype=URIRef('http://www.w3.org/2001/XMLSchema#string'))
true = Literal('true', datatype=URIRef('http://www.w3.org/2001/XMLSchema#boolean'))
query = """
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
SELECT ?id ?rel_type ?xref WHERE {
?id oboInOwl:hasOBONamespace ?disease_ontology .
OPTIONAL {
values ?rel_type {skos:closeMatch skos:narrowMatch skos:broadMatch skos:relatedMatch skos:exactMatch oboInOwl:hasDbXref}
?id ?rel_type ?xref .
}
FILTER NOT EXISTS {?id owl:deprecated ?true}
}
"""
rows = g.query(query, initBindings={'disease_ontology': disease_ontology, 'true': true})
res = [{str(k): str(v) for k, v in binding.items()} for binding in rows.bindings]
df = pd.DataFrame(res)
df["doid"] = df["id"]
df.dropna(subset=['xref'], inplace=True)
df.rel_type = df.rel_type.apply(
lambda x: x.replace(x.split("#")[0] + "#", PREFIX_TO_CURIE[x.split("#")[0] + "#"] + ":"))
df.xref = df.apply(lambda row: row.rel_type + "_" + row.xref, axis=1)
r = df.groupby("id").aggregate(lambda x: set(y for y in x if not pd.isnull(y))).to_dict("records")
do = {purl_to_curie(list(x['doid'])[0]): x for x in r}
do = {k: v['xref'] for k, v in do.items()}
# filter mesh xrefs only
do = {k: set([x for x in v if "MESH:" in x]) for k, v in do.items()}
do = {k: v for k, v in do.items() if v}
# do['DOID:5570']
return do
def compare(wd, do):
# for each DO item, does wd have everything it should? What else does it have?
wd = defaultdict(set, wd)
do = defaultdict(set, do)
leftover_in_wd = dict()
leftover_in_do = dict()
doids = set(wd.keys()) | set(do.keys())
missing = []
for doid in doids:
leftover_in_wd[doid] = set()
leftover_in_do[doid] = set()
if doid not in wd:
missing.append(doid)
continue
leftover_in_wd[doid] = wd[doid] - do[doid]
leftover_in_do[doid] = do[doid] - wd[doid]
leftover_in_wd = {k: v for k, v in leftover_in_wd.items() if v}
leftover_in_do = {k: v for k, v in leftover_in_do.items() if v}
print("Items missing in wikidata: {}".format(missing))
return leftover_in_wd, leftover_in_do
def get_changes():
wd = get_wikidata_do_mesh()
do = parse_do_owl()
leftover_in_wd, leftover_in_do = compare(wd, do)
return leftover_in_wd, leftover_in_do
def get_mesh_info(mesh_id):
url = "http://data.bioontology.org/ontologies/MESH/classes/http%3A%2F%2Fpurl.bioontology.org%2Fontology%2FMESH%2F{}"
d = requests.get(url.format(mesh_id), params={'apikey': BIOPORTAL_KEY}).json()
if "errors" in d:
return {'mesh_label': '', 'mesh_descr': ''}
d = {'mesh_label': d['prefLabel'], 'mesh_descr': d['definition'], 'mesh_synonyms': ";".join(d['synonym'])}
d['mesh_descr'] = d['mesh_descr'][0] if d['mesh_descr'] else ''
return d
def get_mesh_changes(leftover_in_wd):
# from the things added to wikidata, make a table with the metadata about the change
# starting with things added to wd
mesh_info = []
mesh_url = "https://meshb.nlm.nih.gov/record/ui?ui={}"
do_metadata = get_do_metadata()
for doid, meshs in tqdm(leftover_in_wd.items()):
for mesh in meshs:
relation, mesh = mesh.split("_")
mesh = mesh.split(":")[1]
qid = DOID_QID[doid]
do_node = do_metadata.get(doid, dict())
x = {'qid': qid, 'wd_label': getConceptLabel(qid),
'doid': doid, 'do_label': do_node.get("label"), 'doid_url': curie_to_purl(doid),
'do_def': do_node.get("descr"),
'mesh': mesh, 'mesh_url': mesh_url.format(mesh),
'relation': relation}
x.update(get_mesh_info(mesh))
mesh_info.append(x)
df = pd.DataFrame(mesh_info)
df = df[['doid', 'do_label', 'do_def', 'doid_url', 'mesh', 'mesh_label',
'mesh_descr', 'mesh_synonyms', 'mesh_url', 'qid', 'wd_label', 'relation']]
print(df.head(2))
remove_me = df[df.mesh_label.isnull()]
if not remove_me.empty:
print("you should remove these")
print(remove_me)
# make a formatted df
df_fmt = df.copy()
df_fmt.doid = df_fmt.apply(lambda x: "[" + x.doid + "](" + x.doid_url + ")", 1)
del df_fmt['doid_url']
df_fmt.mesh = df_fmt.apply(lambda x: "[" + x.mesh + "](" + x.mesh_url + ")", 1)
del df_fmt['mesh_url']
df_fmt.qid = df_fmt.qid.apply(lambda x: "[" + x + "](https://www.wikidata.org/wiki/" + x + ")")
return df, df_fmt
def download_do_owl(release):
url = "https://github.com/DiseaseOntology/HumanDiseaseOntology/raw/master/src/ontology/releases/{}/doid.owl"
subprocess.check_call(["wget", "-N", url.format(release)])
def main(release):
# release = "2017-11-28"
download_do_owl(release)
leftover_in_wd, leftover_in_do = get_changes()
df, df_fmt = get_mesh_changes(leftover_in_wd)
return df, df_fmt
|
|
from __future__ import unicode_literals
from django.template import Context, Template, TemplateSyntaxError
from django.test import SimpleTestCase, ignore_warnings
from django.utils.deprecation import RemovedInDjango20Warning
from .templatetags import custom, inclusion
class CustomFilterTests(SimpleTestCase):
def test_filter(self):
t = Template("{% load custom %}{{ string|trim:5 }}")
self.assertEqual(
t.render(Context({"string": "abcdefghijklmnopqrstuvwxyz"})),
"abcde"
)
class TagTestCase(SimpleTestCase):
def verify_tag(self, tag, name):
self.assertEqual(tag.__name__, name)
self.assertEqual(tag.__doc__, 'Expected %s __doc__' % name)
self.assertEqual(tag.__dict__['anything'], 'Expected %s __dict__' % name)
class SimpleTagTests(TagTestCase):
def test_simple_tags(self):
c = Context({'value': 42})
templates = [
('{% load custom %}{% no_params %}', 'no_params - Expected result'),
('{% load custom %}{% one_param 37 %}', 'one_param - Expected result: 37'),
('{% load custom %}{% explicit_no_context 37 %}', 'explicit_no_context - Expected result: 37'),
('{% load custom %}{% no_params_with_context %}',
'no_params_with_context - Expected result (context value: 42)'),
('{% load custom %}{% params_and_context 37 %}',
'params_and_context - Expected result (context value: 42): 37'),
('{% load custom %}{% simple_two_params 37 42 %}', 'simple_two_params - Expected result: 37, 42'),
('{% load custom %}{% simple_one_default 37 %}', 'simple_one_default - Expected result: 37, hi'),
('{% load custom %}{% simple_one_default 37 two="hello" %}',
'simple_one_default - Expected result: 37, hello'),
('{% load custom %}{% simple_one_default one=99 two="hello" %}',
'simple_one_default - Expected result: 99, hello'),
('{% load custom %}{% simple_one_default 37 42 %}',
'simple_one_default - Expected result: 37, 42'),
('{% load custom %}{% simple_unlimited_args 37 %}', 'simple_unlimited_args - Expected result: 37, hi'),
('{% load custom %}{% simple_unlimited_args 37 42 56 89 %}',
'simple_unlimited_args - Expected result: 37, 42, 56, 89'),
('{% load custom %}{% simple_only_unlimited_args %}', 'simple_only_unlimited_args - Expected result: '),
('{% load custom %}{% simple_only_unlimited_args 37 42 56 89 %}',
'simple_only_unlimited_args - Expected result: 37, 42, 56, 89'),
('{% load custom %}{% simple_unlimited_args_kwargs 37 40|add:2 56 eggs="scrambled" four=1|add:3 %}',
'simple_unlimited_args_kwargs - Expected result: 37, 42, 56 / eggs=scrambled, four=4'),
]
for entry in templates:
t = Template(entry[0])
self.assertEqual(t.render(c), entry[1])
for entry in templates:
t = Template("%s as var %%}Result: {{ var }}" % entry[0][0:-2])
self.assertEqual(t.render(c), "Result: %s" % entry[1])
def test_simple_tag_errors(self):
errors = [
("'simple_one_default' received unexpected keyword argument 'three'",
'{% load custom %}{% simple_one_default 99 two="hello" three="foo" %}'),
("'simple_two_params' received too many positional arguments",
'{% load custom %}{% simple_two_params 37 42 56 %}'),
("'simple_one_default' received too many positional arguments",
'{% load custom %}{% simple_one_default 37 42 56 %}'),
("'simple_unlimited_args_kwargs' received some positional argument(s) after some keyword argument(s)",
'{% load custom %}{% simple_unlimited_args_kwargs 37 40|add:2 eggs="scrambled" 56 four=1|add:3 %}'),
("'simple_unlimited_args_kwargs' received multiple values for keyword argument 'eggs'",
'{% load custom %}{% simple_unlimited_args_kwargs 37 eggs="scrambled" eggs="scrambled" %}'),
]
for entry in errors:
with self.assertRaisesMessage(TemplateSyntaxError, entry[0]):
Template(entry[1])
for entry in errors:
with self.assertRaisesMessage(TemplateSyntaxError, entry[0]):
Template("%s as var %%}" % entry[1][0:-2])
def test_simple_tag_registration(self):
# Test that the decorators preserve the decorated function's docstring, name and attributes.
self.verify_tag(custom.no_params, 'no_params')
self.verify_tag(custom.one_param, 'one_param')
self.verify_tag(custom.explicit_no_context, 'explicit_no_context')
self.verify_tag(custom.no_params_with_context, 'no_params_with_context')
self.verify_tag(custom.params_and_context, 'params_and_context')
self.verify_tag(custom.simple_unlimited_args_kwargs, 'simple_unlimited_args_kwargs')
self.verify_tag(custom.simple_tag_without_context_parameter, 'simple_tag_without_context_parameter')
def test_simple_tag_missing_context(self):
# The 'context' parameter must be present when takes_context is True
msg = (
"'simple_tag_without_context_parameter' is decorated with "
"takes_context=True so it must have a first argument of 'context'"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
Template('{% load custom %}{% simple_tag_without_context_parameter 123 %}')
class InclusionTagTests(TagTestCase):
def test_inclusion_tags(self):
c = Context({'value': 42})
templates = [
('{% load inclusion %}{% inclusion_no_params %}', 'inclusion_no_params - Expected result\n'),
('{% load inclusion %}{% inclusion_one_param 37 %}', 'inclusion_one_param - Expected result: 37\n'),
('{% load inclusion %}{% inclusion_explicit_no_context 37 %}',
'inclusion_explicit_no_context - Expected result: 37\n'),
('{% load inclusion %}{% inclusion_no_params_with_context %}',
'inclusion_no_params_with_context - Expected result (context value: 42)\n'),
('{% load inclusion %}{% inclusion_params_and_context 37 %}',
'inclusion_params_and_context - Expected result (context value: 42): 37\n'),
('{% load inclusion %}{% inclusion_two_params 37 42 %}',
'inclusion_two_params - Expected result: 37, 42\n'),
('{% load inclusion %}{% inclusion_one_default 37 %}', 'inclusion_one_default - Expected result: 37, hi\n'),
('{% load inclusion %}{% inclusion_one_default 37 two="hello" %}',
'inclusion_one_default - Expected result: 37, hello\n'),
('{% load inclusion %}{% inclusion_one_default one=99 two="hello" %}',
'inclusion_one_default - Expected result: 99, hello\n'),
('{% load inclusion %}{% inclusion_one_default 37 42 %}',
'inclusion_one_default - Expected result: 37, 42\n'),
('{% load inclusion %}{% inclusion_unlimited_args 37 %}',
'inclusion_unlimited_args - Expected result: 37, hi\n'),
('{% load inclusion %}{% inclusion_unlimited_args 37 42 56 89 %}',
'inclusion_unlimited_args - Expected result: 37, 42, 56, 89\n'),
('{% load inclusion %}{% inclusion_only_unlimited_args %}',
'inclusion_only_unlimited_args - Expected result: \n'),
('{% load inclusion %}{% inclusion_only_unlimited_args 37 42 56 89 %}',
'inclusion_only_unlimited_args - Expected result: 37, 42, 56, 89\n'),
('{% load inclusion %}{% inclusion_unlimited_args_kwargs 37 40|add:2 56 eggs="scrambled" four=1|add:3 %}',
'inclusion_unlimited_args_kwargs - Expected result: 37, 42, 56 / eggs=scrambled, four=4\n'),
]
for entry in templates:
t = Template(entry[0])
self.assertEqual(t.render(c), entry[1])
def test_inclusion_tag_errors(self):
errors = [
("'inclusion_one_default' received unexpected keyword argument 'three'",
'{% load inclusion %}{% inclusion_one_default 99 two="hello" three="foo" %}'),
("'inclusion_two_params' received too many positional arguments",
'{% load inclusion %}{% inclusion_two_params 37 42 56 %}'),
("'inclusion_one_default' received too many positional arguments",
'{% load inclusion %}{% inclusion_one_default 37 42 56 %}'),
("'inclusion_one_default' did not receive value(s) for the argument(s): 'one'",
'{% load inclusion %}{% inclusion_one_default %}'),
("'inclusion_unlimited_args' did not receive value(s) for the argument(s): 'one'",
'{% load inclusion %}{% inclusion_unlimited_args %}'),
(
"'inclusion_unlimited_args_kwargs' received some positional argument(s) "
"after some keyword argument(s)",
'{% load inclusion %}{% inclusion_unlimited_args_kwargs 37 40|add:2 eggs="boiled" 56 four=1|add:3 %}',
),
("'inclusion_unlimited_args_kwargs' received multiple values for keyword argument 'eggs'",
'{% load inclusion %}{% inclusion_unlimited_args_kwargs 37 eggs="scrambled" eggs="scrambled" %}'),
]
for entry in errors:
with self.assertRaisesMessage(TemplateSyntaxError, entry[0]):
Template(entry[1])
def test_include_tag_missing_context(self):
# The 'context' parameter must be present when takes_context is True
msg = (
"'inclusion_tag_without_context_parameter' is decorated with "
"takes_context=True so it must have a first argument of 'context'"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
Template('{% load inclusion %}{% inclusion_tag_without_context_parameter 123 %}')
def test_inclusion_tags_from_template(self):
c = Context({'value': 42})
templates = [
('{% load inclusion %}{% inclusion_no_params_from_template %}',
'inclusion_no_params_from_template - Expected result\n'),
('{% load inclusion %}{% inclusion_one_param_from_template 37 %}',
'inclusion_one_param_from_template - Expected result: 37\n'),
('{% load inclusion %}{% inclusion_explicit_no_context_from_template 37 %}',
'inclusion_explicit_no_context_from_template - Expected result: 37\n'),
('{% load inclusion %}{% inclusion_no_params_with_context_from_template %}',
'inclusion_no_params_with_context_from_template - Expected result (context value: 42)\n'),
('{% load inclusion %}{% inclusion_params_and_context_from_template 37 %}',
'inclusion_params_and_context_from_template - Expected result (context value: 42): 37\n'),
('{% load inclusion %}{% inclusion_two_params_from_template 37 42 %}',
'inclusion_two_params_from_template - Expected result: 37, 42\n'),
('{% load inclusion %}{% inclusion_one_default_from_template 37 %}',
'inclusion_one_default_from_template - Expected result: 37, hi\n'),
('{% load inclusion %}{% inclusion_one_default_from_template 37 42 %}',
'inclusion_one_default_from_template - Expected result: 37, 42\n'),
('{% load inclusion %}{% inclusion_unlimited_args_from_template 37 %}',
'inclusion_unlimited_args_from_template - Expected result: 37, hi\n'),
('{% load inclusion %}{% inclusion_unlimited_args_from_template 37 42 56 89 %}',
'inclusion_unlimited_args_from_template - Expected result: 37, 42, 56, 89\n'),
('{% load inclusion %}{% inclusion_only_unlimited_args_from_template %}',
'inclusion_only_unlimited_args_from_template - Expected result: \n'),
('{% load inclusion %}{% inclusion_only_unlimited_args_from_template 37 42 56 89 %}',
'inclusion_only_unlimited_args_from_template - Expected result: 37, 42, 56, 89\n'),
]
for entry in templates:
t = Template(entry[0])
self.assertEqual(t.render(c), entry[1])
def test_inclusion_tag_registration(self):
# Test that the decorators preserve the decorated function's docstring, name and attributes.
self.verify_tag(inclusion.inclusion_no_params, 'inclusion_no_params')
self.verify_tag(inclusion.inclusion_one_param, 'inclusion_one_param')
self.verify_tag(inclusion.inclusion_explicit_no_context, 'inclusion_explicit_no_context')
self.verify_tag(inclusion.inclusion_no_params_with_context, 'inclusion_no_params_with_context')
self.verify_tag(inclusion.inclusion_params_and_context, 'inclusion_params_and_context')
self.verify_tag(inclusion.inclusion_two_params, 'inclusion_two_params')
self.verify_tag(inclusion.inclusion_one_default, 'inclusion_one_default')
self.verify_tag(inclusion.inclusion_unlimited_args, 'inclusion_unlimited_args')
self.verify_tag(inclusion.inclusion_only_unlimited_args, 'inclusion_only_unlimited_args')
self.verify_tag(inclusion.inclusion_tag_without_context_parameter, 'inclusion_tag_without_context_parameter')
self.verify_tag(inclusion.inclusion_tag_use_l10n, 'inclusion_tag_use_l10n')
self.verify_tag(inclusion.inclusion_tag_current_app, 'inclusion_tag_current_app')
self.verify_tag(inclusion.inclusion_unlimited_args_kwargs, 'inclusion_unlimited_args_kwargs')
@ignore_warnings(category=RemovedInDjango20Warning)
def test_15070_current_app(self):
"""
Test that inclusion tag passes down `current_app` of context to the
Context of the included/rendered template as well.
"""
c = Context({})
t = Template('{% load inclusion %}{% inclusion_tag_current_app %}')
self.assertEqual(t.render(c).strip(), 'None')
# That part produces the deprecation warning
c = Context({}, current_app='advanced')
self.assertEqual(t.render(c).strip(), 'advanced')
def test_15070_use_l10n(self):
"""
Test that inclusion tag passes down `use_l10n` of context to the
Context of the included/rendered template as well.
"""
c = Context({})
t = Template('{% load inclusion %}{% inclusion_tag_use_l10n %}')
self.assertEqual(t.render(c).strip(), 'None')
c.use_l10n = True
self.assertEqual(t.render(c).strip(), 'True')
class AssignmentTagTests(TagTestCase):
def test_assignment_tags(self):
c = Context({'value': 42})
t = Template('{% load custom %}{% assignment_no_params as var %}The result is: {{ var }}')
self.assertEqual(t.render(c), 'The result is: assignment_no_params - Expected result')
def test_assignment_tag_registration(self):
# Test that the decorators preserve the decorated function's docstring, name and attributes.
self.verify_tag(custom.assignment_no_params, 'assignment_no_params')
def test_assignment_tag_missing_context(self):
# The 'context' parameter must be present when takes_context is True
msg = (
"'assignment_tag_without_context_parameter' is decorated with "
"takes_context=True so it must have a first argument of 'context'"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
Template('{% load custom %}{% assignment_tag_without_context_parameter 123 as var %}')
|
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Rally command: info
Samples:
$ rally info find create_meter_and_get_stats
CeilometerStats.create_meter_and_get_stats (benchmark scenario).
Test creating a meter and fetching its statistics.
Meter is first created and then statistics is fetched for the same
using GET /v2/meters/(meter_name)/statistics.
Parameters:
- name_length: length of generated (random) part of meter name
- kwargs: contains optional arguments to create a meter
$ rally info find Authenticate
Authenticate (benchmark scenario group).
This class should contain authentication mechanism.
Benchmark scenarios:
---------------------------------------------------------
Name Description
---------------------------------------------------------
Authenticate.keystone
Authenticate.validate_cinder Check Cinder Client ...
Authenticate.validate_glance Check Glance Client ...
Authenticate.validate_heat Check Heat Client ...
$ rally info find some_non_existing_benchmark
Failed to find any docs for query: 'some_non_existing_benchmark'
"""
from __future__ import print_function
from rally.benchmark.scenarios import base as scenario_base
from rally.benchmark import sla
from rally.cli import cliutils
from rally.common import utils
from rally import deploy
from rally.deploy import serverprovider
from rally import exceptions
class InfoCommands(object):
"""This command allows you to get quick doc of some rally entities.
Available for scenario groups, scenarios, SLA, deploy engines and
server providers.
Usage:
$ rally info find <query>
To get information about main concepts of Rally as well as to list entities
you can query docs for, type one of the following:
$ rally info BenchmarkScenarios
$ rally info SLA
$ rally info DeploymentEngines
$ rally info ServerProviders
"""
@cliutils.args("--query", dest="query", type=str, help="Search query.")
def find(self, query):
"""Search for an entity that matches the query and print info about it.
:param query: search query.
"""
info = self._find_info(query)
if info:
print(info)
else:
substitutions = self._find_substitution(query)
if len(substitutions) == 1:
print(self._find_info(substitutions[0]))
else:
print("Failed to find any docs for query: '%s'" % query)
if substitutions:
print("Did you mean one of these?\n\t%s" %
"\n\t".join(substitutions))
return 1
def list(self):
"""List main entities in Rally for which rally info find works.
Lists benchmark scenario groups, deploy engines and server providers.
"""
self.BenchmarkScenarios()
self.SLA()
self.DeploymentEngines()
self.ServerProviders()
def BenchmarkScenarios(self):
"""Get information about benchmark scenarios available in Rally."""
def scenarios_filter(scenario_cls):
return any(scenario_base.Scenario.is_scenario(scenario_cls, m)
for m in dir(scenario_cls))
scenarios = self._get_descriptions(scenario_base.Scenario,
scenarios_filter)
info = (self._make_header("Rally - Benchmark scenarios") +
"\n\n"
"Benchmark scenarios are what Rally actually uses to test "
"the performance of an OpenStack deployment.\nEach Benchmark "
"scenario implements a sequence of atomic operations "
"(server calls) to simulate\ninteresing user/operator/"
"client activity in some typical use case, usually that of "
"a specific OpenStack\nproject. Iterative execution of this "
"sequence produces some kind of load on the target cloud.\n"
"Benchmark scenarios play the role of building blocks in "
"benchmark task configuration files."
"\n\n"
"Scenarios in Rally are put together in groups. Each "
"scenario group is concentrated on some specific \nOpenStack "
"functionality. For example, the 'NovaServers' scenario "
"group contains scenarios that employ\nseveral basic "
"operations available in Nova."
"\n\n" +
self._compose_table("List of Benchmark scenario groups",
scenarios) +
"To get information about benchmark scenarios inside "
"each scenario group, run:\n"
" $ rally info find <ScenarioGroupName>\n\n")
print(info)
def SLA(self):
"""Get information about SLA available in Rally."""
sla_descrs = self._get_descriptions(sla.SLA)
# NOTE(msdubov): Add config option names to the "Name" column
for i in range(len(sla_descrs)):
description = sla_descrs[i]
sla_cls = sla.SLA.get(description[0])
sla_descrs[i] = (sla_cls.get_name(), description[1])
info = (self._make_header("Rally - SLA checks "
"(Service-Level Agreements)") +
"\n\n"
"SLA in Rally enable quick and easy checks of "
"whether the results of a particular\nbenchmark task have "
"passed certain success criteria."
"\n\n"
"SLA checks can be configured in the 'sla' section of "
"benchmark task configuration\nfiles, used to launch new "
"tasks by the 'rally task start <config_file>' command.\n"
"For each SLA check you would like to use, you should put "
"its name as a key and the\ntarget check parameter as an "
"assosiated value, e.g.:\n\n"
" sla:\n"
" max_seconds_per_iteration: 4\n"
" failure_rate:\n"
" max: 1"
"\n\n" +
self._compose_table("List of SLA checks", sla_descrs) +
"To get information about specific SLA checks, run:\n"
" $ rally info find <sla_check_name>\n")
print(info)
def DeploymentEngines(self):
"""Get information about deploy engines available in Rally."""
engines = self._get_descriptions(deploy.EngineFactory)
info = (self._make_header("Rally - Deployment engines") +
"\n\n"
"Rally is an OpenStack benchmarking system. Before starting "
"benchmarking with Rally,\nyou obviously have either to "
"deploy a new OpenStack cloud or to register an existing\n"
"one in Rally. Deployment engines in Rally are essentially "
"plugins that control the\nprocess of deploying some "
"OpenStack distribution, say, with DevStack or FUEL, and\n"
"register these deployments in Rally before any benchmarking "
"procedures against them\ncan take place."
"\n\n"
"A typical use case in Rally would be when you first "
"register a deployment using the\n'rally deployment create' "
"command and then reference this deployment by uuid "
"when\nstarting a benchmark task with 'rally task start'. "
"The 'rally deployment create'\ncommand awaits a deployment "
"configuration file as its parameter. This file may look "
"like:\n"
"{\n"
" \"type\": \"ExistingCloud\",\n"
" \"auth_url\": \"http://example.net:5000/v2.0/\",\n"
" \"admin\": { <credentials> },\n"
" ...\n"
"}"
"\n\n" +
self._compose_table("List of Deployment engines", engines) +
"To get information about specific Deployment engines, run:\n"
" $ rally info find <DeploymentEngineName>\n")
print(info)
def ServerProviders(self):
"""Get information about server providers available in Rally."""
providers = self._get_descriptions(serverprovider.ProviderFactory)
info = (self._make_header("Rally - Server providers") +
"\n\n"
"Rally is an OpenStack benchmarking system. Before starting "
"benchmarking with Rally,\nyou obviously have either to "
"deploy a new OpenStack cloud or to register an existing\n"
"one in Rally with one of the Deployment engines. These "
"deployment engines, in turn,\nmay need Server "
"providers to manage virtual machines used for "
"OpenStack deployment\nand its following benchmarking. The "
"key feature of server providers is that they\nprovide a "
"unified interface for interacting with different "
"virtualization\ntechnologies (LXS, Virsh etc.)."
"\n\n"
"Server providers are usually referenced in deployment "
"configuration files\npassed to the 'rally deployment create'"
" command, e.g.:\n"
"{\n"
" \"type\": \"DevstackEngine\",\n"
" \"provider\": {\n"
" \"type\": \"ExistingServers\",\n"
" \"credentials\": [{\"user\": \"root\",\n"
" \"host\": \"10.2.0.8\"}]\n"
" }\n"
"}"
"\n\n" +
self._compose_table("List of Server providers", providers) +
"To get information about specific Server providers, run:\n"
" $ rally info find <ServerProviderName>\n")
print(info)
def _get_descriptions(self, base_cls, subclass_filter=None):
descriptions = []
subclasses = utils.itersubclasses(base_cls)
if subclass_filter:
subclasses = filter(subclass_filter, subclasses)
for entity in subclasses:
name = entity.get_name()
doc = utils.parse_docstring(entity.__doc__)
description = doc["short_description"] or ""
descriptions.append((name, description))
descriptions.sort(key=lambda d: d[0])
return descriptions
def _find_info(self, query):
return (self._get_scenario_group_info(query) or
self._get_scenario_info(query) or
self._get_sla_info(query) or
self._get_deploy_engine_info(query) or
self._get_server_provider_info(query))
def _find_substitution(self, query):
max_distance = min(3, len(query) / 4)
scenarios = scenario_base.Scenario.list_benchmark_scenarios()
scenario_groups = list(set(s.split(".")[0] for s in scenarios))
scenario_methods = list(set(s.split(".")[1] for s in scenarios))
sla_info = [cls.get_name() for cls in sla.SLA.get_all()]
deploy_engines = [cls.get_name() for cls in
deploy.EngineFactory.get_all()]
server_providers = [cls.get_name() for cls in
serverprovider.ProviderFactory.get_all()]
candidates = (scenarios + scenario_groups + scenario_methods +
sla_info + deploy_engines + server_providers)
suggestions = []
# NOTE(msdubov): Incorrect query may either have typos or be truncated.
for candidate in candidates:
if ((utils.distance(query, candidate) <= max_distance or
candidate.startswith(query))):
suggestions.append(candidate)
return suggestions
def _get_scenario_group_info(self, query):
try:
scenario_group = scenario_base.Scenario.get_by_name(query)
if not any(scenario_base.Scenario.is_scenario(scenario_group, m)
for m in dir(scenario_group)):
return None
info = self._make_header("%s (benchmark scenario group)" %
scenario_group.get_name())
info += "\n\n"
info += utils.format_docstring(scenario_group.__doc__)
scenarios = scenario_group.list_benchmark_scenarios()
descriptions = []
for scenario_name in scenarios:
cls, method_name = scenario_name.split(".")
if hasattr(scenario_group, method_name):
scenario = getattr(scenario_group, method_name)
doc = utils.parse_docstring(scenario.__doc__)
descr = doc["short_description"] or ""
descriptions.append((scenario_name, descr))
info += self._compose_table("Benchmark scenarios", descriptions)
return info
except exceptions.NoSuchScenario:
return None
def _get_scenario_info(self, query):
try:
scenario = scenario_base.Scenario.get_scenario_by_name(query)
scenario_group_name = utils.get_method_class(scenario).get_name()
header = ("%(scenario_group)s.%(scenario_name)s "
"(benchmark scenario)" %
{"scenario_group": scenario_group_name,
"scenario_name": scenario.__name__})
info = self._make_header(header)
info += "\n\n"
doc = utils.parse_docstring(scenario.__doc__)
if not doc["short_description"]:
return None
info += doc["short_description"] + "\n\n"
if doc["long_description"]:
info += doc["long_description"] + "\n\n"
if doc["params"]:
info += "Parameters:\n"
for param in doc["params"]:
info += " - %(name)s: %(doc)s" % param + "\n"
if doc["returns"]:
info += "Returns: %s" % doc["returns"]
return info
except exceptions.NoSuchScenario:
return None
def _get_sla_info(self, query):
try:
found_sla = sla.SLA.get(query)
header = "%s (SLA)" % found_sla.get_name()
info = self._make_header(header)
info += "\n\n"
info += utils.format_docstring(found_sla.__doc__) + "\n"
return info
except exceptions.PluginNotFound:
return None
def _get_deploy_engine_info(self, query):
try:
deploy_engine = deploy.EngineFactory.get(query)
header = "%s (deploy engine)" % deploy_engine.get_name()
info = self._make_header(header)
info += "\n\n"
info += utils.format_docstring(deploy_engine.__doc__)
return info
except exceptions.PluginNotFound:
return None
def _get_server_provider_info(self, query):
try:
server_provider = serverprovider.ProviderFactory.get(query)
header = "%s (server provider)" % server_provider.get_name()
info = self._make_header(header)
info += "\n\n"
info += utils.format_docstring(server_provider.__doc__)
return info
except exceptions.PluginNotFound:
return None
def _make_header(self, string):
header = "-" * (len(string) + 2) + "\n"
header += " " + string + " \n"
header += "-" * (len(string) + 2)
return header
def _compose_table(self, title, descriptions):
table = " " + title + ":\n"
len0 = lambda x: len(x[0])
len1 = lambda x: len(x[1])
first_column_len = max(map(len0, descriptions)) + cliutils.MARGIN
second_column_len = max(map(len1, descriptions)) + cliutils.MARGIN
table += "-" * (first_column_len + second_column_len + 1) + "\n"
table += (" Name" + " " * (first_column_len - len("Name")) +
"Description\n")
table += "-" * (first_column_len + second_column_len + 1) + "\n"
for (name, descr) in descriptions:
table += " " + name
table += " " * (first_column_len - len(name))
table += descr + "\n"
table += "-" * (first_column_len + second_column_len + 1) + "\n"
table += "\n"
return table
|
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import operator
from decimal import Decimal
import numpy as np
import pytest
from pandas import Series, Timestamp, Timedelta, Period, NaT
from pandas._libs.tslibs.period import IncompatibleFrequency
import pandas as pd
import pandas.util.testing as tm
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
# ------------------------------------------------------------------
# Comparisons
class TestSeriesComparison(object):
def test_compare_invalid(self):
# GH#8058
# ops testing
a = pd.Series(np.random.randn(5), name=0)
b = pd.Series(np.random.randn(5))
b.name = pd.Timestamp('2000-01-01')
tm.assert_series_equal(a / b, 1 / (b / a))
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_ser_flex_cmp_return_dtypes(self, opname):
# GH#15115
ser = Series([1, 3, 2], index=range(3))
const = 2
result = getattr(ser, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, Series([1], ['bool']))
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_ser_flex_cmp_return_dtypes_empty(self, opname):
# GH#15115 empty Series case
ser = Series([1, 3, 2], index=range(3))
empty = ser.iloc[:0]
const = 2
result = getattr(empty, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, Series([1], ['bool']))
@pytest.mark.parametrize('op', [operator.eq, operator.ne,
operator.le, operator.lt,
operator.ge, operator.gt])
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('baz', 'baz', 'baz')])
def test_ser_cmp_result_names(self, names, op):
# datetime64 dtype
dti = pd.date_range('1949-06-07 03:00:00',
freq='H', periods=5, name=names[0])
ser = Series(dti).rename(names[1])
result = op(ser, dti)
assert result.name == names[2]
# datetime64tz dtype
dti = dti.tz_localize('US/Central')
ser = Series(dti).rename(names[1])
result = op(ser, dti)
assert result.name == names[2]
# timedelta64 dtype
tdi = dti - dti.shift(1)
ser = Series(tdi).rename(names[1])
result = op(ser, tdi)
assert result.name == names[2]
# categorical
if op in [operator.eq, operator.ne]:
# categorical dtype comparisons raise for inequalities
cidx = tdi.astype('category')
ser = Series(cidx).rename(names[1])
result = op(ser, cidx)
assert result.name == names[2]
class TestTimestampSeriesComparison(object):
def test_dt64_ser_cmp_date_warning(self):
# https://github.com/pandas-dev/pandas/issues/21359
# Remove this test and enble invalid test below
ser = pd.Series(pd.date_range('20010101', periods=10), name='dates')
date = ser.iloc[0].to_pydatetime().date()
with tm.assert_produces_warning(FutureWarning) as m:
result = ser == date
expected = pd.Series([True] + [False] * 9, name='dates')
tm.assert_series_equal(result, expected)
assert "Comparing Series of datetimes " in str(m[0].message)
assert "will not compare equal" in str(m[0].message)
with tm.assert_produces_warning(FutureWarning) as m:
result = ser != date
tm.assert_series_equal(result, ~expected)
assert "will not compare equal" in str(m[0].message)
with tm.assert_produces_warning(FutureWarning) as m:
result = ser <= date
tm.assert_series_equal(result, expected)
assert "a TypeError will be raised" in str(m[0].message)
with tm.assert_produces_warning(FutureWarning) as m:
result = ser < date
tm.assert_series_equal(result, pd.Series([False] * 10, name='dates'))
assert "a TypeError will be raised" in str(m[0].message)
with tm.assert_produces_warning(FutureWarning) as m:
result = ser >= date
tm.assert_series_equal(result, pd.Series([True] * 10, name='dates'))
assert "a TypeError will be raised" in str(m[0].message)
with tm.assert_produces_warning(FutureWarning) as m:
result = ser > date
tm.assert_series_equal(result, pd.Series([False] + [True] * 9,
name='dates'))
assert "a TypeError will be raised" in str(m[0].message)
@pytest.mark.skip(reason="GH-21359")
def test_dt64ser_cmp_date_invalid(self):
# GH#19800 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
ser = pd.Series(pd.date_range('20010101', periods=10), name='dates')
date = ser.iloc[0].to_pydatetime().date()
assert not (ser == date).any()
assert (ser != date).all()
with pytest.raises(TypeError):
ser > date
with pytest.raises(TypeError):
ser < date
with pytest.raises(TypeError):
ser >= date
with pytest.raises(TypeError):
ser <= date
def test_dt64ser_cmp_period_scalar(self):
ser = Series(pd.period_range('2000-01-01', periods=10, freq='D'))
val = Period('2000-01-04', freq='D')
result = ser > val
expected = Series([x > val for x in ser])
tm.assert_series_equal(result, expected)
val = ser[5]
result = ser > val
expected = Series([x > val for x in ser])
tm.assert_series_equal(result, expected)
def test_timestamp_compare_series(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
ser = pd.Series(pd.date_range('20010101', periods=10), name='dates')
s_nat = ser.copy(deep=True)
ser[0] = pd.Timestamp('nat')
ser[3] = pd.Timestamp('nat')
ops = {'lt': 'gt', 'le': 'ge', 'eq': 'eq', 'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
expected = left_f(ser, pd.Timestamp('20010109'))
result = right_f(pd.Timestamp('20010109'), ser)
tm.assert_series_equal(result, expected)
# nats
expected = left_f(ser, pd.Timestamp('nat'))
result = right_f(pd.Timestamp('nat'), ser)
tm.assert_series_equal(result, expected)
# compare to timestamp with series containing nats
expected = left_f(s_nat, pd.Timestamp('20010109'))
result = right_f(pd.Timestamp('20010109'), s_nat)
tm.assert_series_equal(result, expected)
# compare to nat with series containing nats
expected = left_f(s_nat, pd.Timestamp('nat'))
result = right_f(pd.Timestamp('nat'), s_nat)
tm.assert_series_equal(result, expected)
def test_timestamp_equality(self):
# GH#11034
ser = pd.Series([pd.Timestamp('2000-01-29 01:59:00'), 'NaT'])
result = ser != ser
tm.assert_series_equal(result, pd.Series([False, True]))
result = ser != ser[0]
tm.assert_series_equal(result, pd.Series([False, True]))
result = ser != ser[1]
tm.assert_series_equal(result, pd.Series([True, True]))
result = ser == ser
tm.assert_series_equal(result, pd.Series([True, False]))
result = ser == ser[0]
tm.assert_series_equal(result, pd.Series([True, False]))
result = ser == ser[1]
tm.assert_series_equal(result, pd.Series([False, False]))
class TestTimedeltaSeriesComparisons(object):
def test_compare_timedelta_series(self):
# regresssion test for GH5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
class TestPeriodSeriesComparisons(object):
@pytest.mark.parametrize('freq', ['M', '2M', '3M'])
def test_cmp_series_period_scalar(self, freq):
# GH 13200
base = Series([Period(x, freq=freq) for x in
['2011-01', '2011-02', '2011-03', '2011-04']])
p = Period('2011-02', freq=freq)
exp = Series([False, True, False, False])
tm.assert_series_equal(base == p, exp)
tm.assert_series_equal(p == base, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base != p, exp)
tm.assert_series_equal(p != base, exp)
exp = Series([False, False, True, True])
tm.assert_series_equal(base > p, exp)
tm.assert_series_equal(p < base, exp)
exp = Series([True, False, False, False])
tm.assert_series_equal(base < p, exp)
tm.assert_series_equal(p > base, exp)
exp = Series([False, True, True, True])
tm.assert_series_equal(base >= p, exp)
tm.assert_series_equal(p <= base, exp)
exp = Series([True, True, False, False])
tm.assert_series_equal(base <= p, exp)
tm.assert_series_equal(p >= base, exp)
# different base freq
msg = "Input has different freq=A-DEC from Period"
with tm.assert_raises_regex(IncompatibleFrequency, msg):
base <= Period('2011', freq='A')
with tm.assert_raises_regex(IncompatibleFrequency, msg):
Period('2011', freq='A') >= base
@pytest.mark.parametrize('freq', ['M', '2M', '3M'])
def test_cmp_series_period_series(self, freq):
# GH#13200
base = Series([Period(x, freq=freq) for x in
['2011-01', '2011-02', '2011-03', '2011-04']])
ser = Series([Period(x, freq=freq) for x in
['2011-02', '2011-01', '2011-03', '2011-05']])
exp = Series([False, False, True, False])
tm.assert_series_equal(base == ser, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != ser, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > ser, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < ser, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= ser, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= ser, exp)
ser2 = Series([Period(x, freq='A') for x in
['2011', '2011', '2011', '2011']])
# different base freq
msg = "Input has different freq=A-DEC from Period"
with tm.assert_raises_regex(IncompatibleFrequency, msg):
base <= ser2
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series([Period('2011', freq='A'),
Period('2011-02', freq='M'),
Period('2013', freq='A'),
Period('2011-04', freq='M')])
ser = Series([Period('2012', freq='A'),
Period('2011-01', freq='M'),
Period('2013', freq='A'),
Period('2011-05', freq='M')])
exp = Series([False, False, True, False])
tm.assert_series_equal(base == ser, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != ser, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > ser, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < ser, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= ser, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= ser, exp)
# ------------------------------------------------------------------
# Arithmetic
class TestSeriesDivision(object):
# __div__, __rdiv__, __floordiv__, __rfloordiv__
# for non-timestamp/timedelta/period dtypes
def test_divide_decimal(self):
# resolves issue GH#9787
expected = Series([Decimal(5)])
ser = Series([Decimal(10)])
result = ser / Decimal(2)
tm.assert_series_equal(result, expected)
ser = Series([Decimal(10)])
result = ser // Decimal(2)
tm.assert_series_equal(result, expected)
def test_div_equiv_binop(self):
# Test Series.div as well as Series.__div__
# float/integer issue
# GH#7785
first = Series([1, 0], name='first')
second = Series([-0.01, -0.02], name='second')
expected = Series([-0.01, -np.inf])
result = second.div(first)
tm.assert_series_equal(result, expected, check_names=False)
result = second / first
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('dtype2', [
np.int64, np.int32, np.int16, np.int8,
np.float64, np.float32, np.float16,
np.uint64, np.uint32, np.uint16, np.uint8])
@pytest.mark.parametrize('dtype1', [np.int64, np.float64, np.uint64])
def test_ser_div_ser(self, dtype1, dtype2):
# no longer do integer div for any ops, but deal with the 0's
first = Series([3, 4, 5, 8], name='first').astype(dtype1)
second = Series([0, 0, 0, 3], name='second').astype(dtype2)
with np.errstate(all='ignore'):
expected = Series(first.values.astype(np.float64) / second.values,
dtype='float64', name=None)
expected.iloc[0:3] = np.inf
result = first / second
tm.assert_series_equal(result, expected)
assert not result.equals(second / first)
def test_rdiv_zero_compat(self):
# GH#8674
zero_array = np.array([0] * 5)
data = np.random.randn(5)
expected = Series([0.] * 5)
result = zero_array / Series(data)
tm.assert_series_equal(result, expected)
result = Series(zero_array) / data
tm.assert_series_equal(result, expected)
result = Series(zero_array) / Series(data)
tm.assert_series_equal(result, expected)
def test_div_zero_inf_signs(self):
# GH#9144, inf signing
ser = Series([-1, 0, 1], name='first')
expected = Series([-np.inf, np.nan, np.inf], name='first')
result = ser / 0
tm.assert_series_equal(result, expected)
def test_rdiv_zero(self):
# GH#9144
ser = Series([-1, 0, 1], name='first')
expected = Series([0.0, np.nan, 0.0], name='first')
result = 0 / ser
tm.assert_series_equal(result, expected)
def test_floordiv_div(self):
# GH#9144
ser = Series([-1, 0, 1], name='first')
result = ser // 0
expected = Series([-np.inf, np.nan, np.inf], name='first')
tm.assert_series_equal(result, expected)
class TestSeriesArithmetic(object):
# Standard, numeric, or otherwise not-Timestamp/Timedelta/Period dtypes
@pytest.mark.parametrize('data', [
[1, 2, 3],
[1.1, 2.2, 3.3],
[Timestamp('2011-01-01'), Timestamp('2011-01-02'), pd.NaT],
['x', 'y', 1]])
@pytest.mark.parametrize('dtype', [None, object])
def test_series_radd_str_invalid(self, dtype, data):
ser = Series(data, dtype=dtype)
with pytest.raises(TypeError):
'foo_' + ser
# TODO: parametrize, better name
def test_object_ser_add_invalid(self):
# invalid ops
obj_ser = tm.makeObjectSeries()
obj_ser.name = 'objects'
with pytest.raises(Exception):
obj_ser + 1
with pytest.raises(Exception):
obj_ser + np.array(1, dtype=np.int64)
with pytest.raises(Exception):
obj_ser - 1
with pytest.raises(Exception):
obj_ser - np.array(1, dtype=np.int64)
@pytest.mark.parametrize('dtype', [None, object])
def test_series_with_dtype_radd_nan(self, dtype):
ser = pd.Series([1, 2, 3], dtype=dtype)
expected = pd.Series([np.nan, np.nan, np.nan], dtype=dtype)
result = np.nan + ser
tm.assert_series_equal(result, expected)
result = ser + np.nan
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_series_with_dtype_radd_int(self, dtype):
ser = pd.Series([1, 2, 3], dtype=dtype)
expected = pd.Series([2, 3, 4], dtype=dtype)
result = 1 + ser
tm.assert_series_equal(result, expected)
result = ser + 1
tm.assert_series_equal(result, expected)
def test_series_radd_str(self):
ser = pd.Series(['x', np.nan, 'x'])
tm.assert_series_equal('a' + ser, pd.Series(['ax', np.nan, 'ax']))
tm.assert_series_equal(ser + 'a', pd.Series(['xa', np.nan, 'xa']))
@pytest.mark.parametrize('dtype', [None, object])
def test_series_with_dtype_radd_timedelta(self, dtype):
# note this test is _not_ aimed at timedelta64-dtyped Series
ser = pd.Series([pd.Timedelta('1 days'), pd.Timedelta('2 days'),
pd.Timedelta('3 days')], dtype=dtype)
expected = pd.Series([pd.Timedelta('4 days'), pd.Timedelta('5 days'),
pd.Timedelta('6 days')])
result = pd.Timedelta('3 days') + ser
tm.assert_series_equal(result, expected)
result = ser + pd.Timedelta('3 days')
tm.assert_series_equal(result, expected)
class TestPeriodSeriesArithmetic(object):
def test_ops_series_timedelta(self):
# GH 13043
ser = pd.Series([pd.Period('2015-01-01', freq='D'),
pd.Period('2015-01-02', freq='D')], name='xxx')
assert ser.dtype == object
expected = pd.Series([pd.Period('2015-01-02', freq='D'),
pd.Period('2015-01-03', freq='D')], name='xxx')
result = ser + pd.Timedelta('1 days')
tm.assert_series_equal(result, expected)
result = pd.Timedelta('1 days') + ser
tm.assert_series_equal(result, expected)
result = ser + pd.tseries.offsets.Day()
tm.assert_series_equal(result, expected)
result = pd.tseries.offsets.Day() + ser
tm.assert_series_equal(result, expected)
def test_ops_series_period(self):
# GH 13043
ser = pd.Series([pd.Period('2015-01-01', freq='D'),
pd.Period('2015-01-02', freq='D')], name='xxx')
assert ser.dtype == object
per = pd.Period('2015-01-10', freq='D')
off = per.freq
# dtype will be object because of original dtype
expected = pd.Series([9 * off, 8 * off], name='xxx', dtype=object)
tm.assert_series_equal(per - ser, expected)
tm.assert_series_equal(ser - per, -1 * expected)
s2 = pd.Series([pd.Period('2015-01-05', freq='D'),
pd.Period('2015-01-04', freq='D')], name='xxx')
assert s2.dtype == object
expected = pd.Series([4 * off, 2 * off], name='xxx', dtype=object)
tm.assert_series_equal(s2 - ser, expected)
tm.assert_series_equal(ser - s2, -1 * expected)
class TestTimestampSeriesArithmetic(object):
def test_timestamp_sub_series(self):
ser = pd.Series(pd.date_range('2014-03-17', periods=2, freq='D',
tz='US/Eastern'))
ts = ser[0]
delta_series = pd.Series([np.timedelta64(0, 'D'),
np.timedelta64(1, 'D')])
tm.assert_series_equal(ser - ts, delta_series)
tm.assert_series_equal(ts - ser, -delta_series)
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == 'timedelta64[ns]'
class TestTimedeltaSeriesAdditionSubtraction(object):
# Tests for Series[timedelta64[ns]] __add__, __sub__, __radd__, __rsub__
# ------------------------------------------------------------------
# Operations with int-like others
def test_td64series_add_int_series_invalid(self, tdser):
with pytest.raises(TypeError):
tdser + Series([2, 3, 4])
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds')
def test_td64series_radd_int_series_invalid(self, tdser):
with pytest.raises(TypeError):
Series([2, 3, 4]) + tdser
def test_td64series_sub_int_series_invalid(self, tdser):
with pytest.raises(TypeError):
tdser - Series([2, 3, 4])
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds')
def test_td64series_rsub_int_series_invalid(self, tdser):
with pytest.raises(TypeError):
Series([2, 3, 4]) - tdser
def test_td64_series_add_intlike(self):
# GH#19123
tdi = pd.TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = Series(tdi)
other = Series([20, 30, 40], dtype='uint8')
pytest.raises(TypeError, ser.__add__, 1)
pytest.raises(TypeError, ser.__sub__, 1)
pytest.raises(TypeError, ser.__add__, other)
pytest.raises(TypeError, ser.__sub__, other)
pytest.raises(TypeError, ser.__add__, other.values)
pytest.raises(TypeError, ser.__sub__, other.values)
pytest.raises(TypeError, ser.__add__, pd.Index(other))
pytest.raises(TypeError, ser.__sub__, pd.Index(other))
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64series_add_sub_numeric_scalar_invalid(self, scalar, tdser):
with pytest.raises(TypeError):
tdser + scalar
with pytest.raises(TypeError):
scalar + tdser
with pytest.raises(TypeError):
tdser - scalar
with pytest.raises(TypeError):
scalar - tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
pytest.param(Series([1, 2, 3]),
marks=pytest.mark.xfail(reason='GH#19123 integer '
'interpreted as nanos'))
])
def test_td64series_add_sub_numeric_array_invalid(self, vector,
dtype, tdser):
vector = vector.astype(dtype)
with pytest.raises(TypeError):
tdser + vector
with pytest.raises(TypeError):
vector + tdser
with pytest.raises(TypeError):
tdser - vector
with pytest.raises(TypeError):
vector - tdser
# ------------------------------------------------------------------
# Operations with datetime-like others
def test_td64series_add_sub_timestamp(self):
# GH#11925
tdser = Series(pd.timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(pd.date_range('2012-01-02', periods=3))
tm.assert_series_equal(ts + tdser, expected)
tm.assert_series_equal(tdser + ts, expected)
expected2 = Series(pd.date_range('2011-12-31', periods=3, freq='-1D'))
tm.assert_series_equal(ts - tdser, expected2)
tm.assert_series_equal(ts + (-tdser), expected2)
with pytest.raises(TypeError):
tdser - ts
# ------------------------------------------------------------------
# Operations with timedelta-like others (including DateOffsets)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64_series_with_tdi(self, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
tdi = pd.TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
result = tdi + ser
tm.assert_series_equal(result, expected)
assert result.dtype == 'timedelta64[ns]'
result = ser + tdi
tm.assert_series_equal(result, expected)
assert result.dtype == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
result = tdi - ser
tm.assert_series_equal(result, expected)
assert result.dtype == 'timedelta64[ns]'
result = ser - tdi
tm.assert_series_equal(result, -expected)
assert result.dtype == 'timedelta64[ns]'
def test_td64_sub_NaT(self):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
res = ser - NaT
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
tm.assert_series_equal(res, expected)
class TestTimedeltaSeriesMultiplicationDivision(object):
# Tests for Series[timedelta64[ns]]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()])
def test_timedelta_floordiv(self, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
result = td1 // scalar_td
expected = Series([0, 0, np.nan])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()])
def test_timedelta_rfloordiv(self, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
result = scalar_td // td1
expected = Series([1, 1, np.nan])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()])
def test_timedelta_rfloordiv_explicit(self, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
expected = Series([1, 1, np.nan])
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])])
def test_td64series_div_numeric_array(self, vector, dtype, tdser):
# GH#4521
# divide/multiply by integers
vector = vector.astype(dtype)
expected = Series(['2.95D', '1D 23H 12m', 'NaT'],
dtype='timedelta64[ns]')
result = tdser / vector
tm.assert_series_equal(result, expected)
with pytest.raises(TypeError):
vector / tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])])
def test_td64series_mul_numeric_array(self, vector, dtype, tdser):
# GH#4521
# divide/multiply by integers
vector = vector.astype(dtype)
expected = Series(['1180 Days', '1770 Days', 'NaT'],
dtype='timedelta64[ns]')
result = tdser * vector
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [
np.array([20, 30, 40]),
pytest.param(pd.Index([20, 30, 40]),
marks=pytest.mark.xfail(reason='__mul__ raises '
'instead of returning '
'NotImplemented')),
Series([20, 30, 40])
])
def test_td64series_rmul_numeric_array(self, vector, dtype, tdser):
# GH#4521
# divide/multiply by integers
vector = vector.astype(dtype)
expected = Series(['1180 Days', '1770 Days', 'NaT'],
dtype='timedelta64[ns]')
result = vector * tdser
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('one', [1, np.array(1), 1.0, np.array(1.0)])
def test_td64series_mul_numeric_scalar(self, one, tdser):
# GH#4521
# divide/multiply by integers
expected = Series(['-59 Days', '-59 Days', 'NaT'],
dtype='timedelta64[ns]')
result = tdser * (-one)
tm.assert_series_equal(result, expected)
result = (-one) * tdser
tm.assert_series_equal(result, expected)
expected = Series(['118 Days', '118 Days', 'NaT'],
dtype='timedelta64[ns]')
result = tdser * (2 * one)
tm.assert_series_equal(result, expected)
result = (2 * one) * tdser
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('two', [
2, 2.0,
pytest.param(np.array(2),
marks=pytest.mark.xfail(reason='GH#19011 is_list_like '
'incorrectly True.')),
pytest.param(np.array(2.0),
marks=pytest.mark.xfail(reason='GH#19011 is_list_like '
'incorrectly True.')),
])
def test_td64series_div_numeric_scalar(self, two, tdser):
# GH#4521
# divide/multiply by integers
expected = Series(['29.5D', '29.5D', 'NaT'], dtype='timedelta64[ns]')
result = tdser / two
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Operations with timedelta-like others
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_tdi_mul_int_series(self, names):
# GH#19042
tdi = pd.TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'],
name=names[0])
ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1])
expected = Series(['0days', '1day', '4days', '9days', '16days'],
dtype='timedelta64[ns]',
name=names[2])
result = ser * tdi
tm.assert_series_equal(result, expected)
# The direct operation tdi * ser still needs to be fixed.
result = ser.__rmul__(tdi)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_float_series_rdiv_tdi(self, names):
# GH#19042
# TODO: the direct operation TimedeltaIndex / Series still
# needs to be fixed.
tdi = pd.TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'],
name=names[0])
ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1])
expected = Series([tdi[n] / ser[n] for n in range(len(ser))],
dtype='timedelta64[ns]',
name=names[2])
result = ser.__rdiv__(tdi)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()])
def test_td64series_mul_timedeltalike_invalid(self, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = 'operate|unsupported|cannot|not supported'
with tm.assert_raises_regex(TypeError, pattern):
td1 * scalar_td
with tm.assert_raises_regex(TypeError, pattern):
scalar_td * td1
class TestTimedeltaSeriesInvalidArithmeticOps(object):
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()])
def test_td64series_pow_invalid(self, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = 'operate|unsupported|cannot|not supported'
with tm.assert_raises_regex(TypeError, pattern):
scalar_td ** td1
with tm.assert_raises_regex(TypeError, pattern):
td1 ** scalar_td
|
|
"""
The :mod:`sklearn.model_selection._validation` module includes classes and
functions to validate the model.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
import numbers
import time
import numpy as np
import scipy.sparse as sp
from ..base import is_classifier, clone
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.fixes import astype
from ..utils.validation import _is_arraylike, _num_samples
from ..utils.metaestimators import _safe_split
from ..externals.joblib import Parallel, delayed, logger
from ..metrics.scorer import check_scoring
from ..exceptions import FitFailedWarning
from ._split import check_cv
__all__ = ['cross_val_score', 'cross_val_predict', 'permutation_test_score',
'learning_curve', 'validation_curve']
def cross_val_score(estimator, X, y=None, groups=None, scoring=None, cv=None,
n_jobs=1, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_score
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> print(cross_val_score(lasso, X, y)) # doctest: +ELLIPSIS
[ 0.33150734 0.08022311 0.03531764]
See Also
---------
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv.split(X, y, groups))
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, return_n_test_samples=False,
return_times=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
fit_time : float
Time spent for fitting in seconds.
score_time : float
Time spent for scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
# Note fit time as time until error
fit_time = time.time() - start_time
score_time = 0.0
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)")
else:
fit_time = time.time() - start_time
test_score = _score(estimator, X_test, y_test, scorer)
score_time = time.time() - start_time - fit_time
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(score_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score, test_score] if return_train_score else [test_score]
if return_n_test_samples:
ret.append(_num_samples(X_test))
if return_times:
ret.extend([fit_time, score_time])
if return_parameters:
ret.append(parameters)
return ret
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, 'item'):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def cross_val_predict(estimator, X, y=None, groups=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs',
method='predict'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
method : string, optional, default: 'predict'
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : ndarray
This is the result of calling ``method``
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_predict
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> y_pred = cross_val_predict(lasso, X, y)
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
# Ensure the estimator has implemented the passed decision function
if not callable(getattr(estimator, method)):
raise AttributeError('{} not implemented in estimator'
.format(method))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
prediction_blocks = parallel(delayed(_fit_and_predict)(
clone(estimator), X, y, train, test, verbose, fit_params, method)
for train, test in cv.split(X, y, groups))
# Concatenate the predictions
predictions = [pred_block_i for pred_block_i, _ in prediction_blocks]
test_indices = np.concatenate([indices_i
for _, indices_i in prediction_blocks])
if not _check_is_permutation(test_indices, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_test_indices = np.empty(len(test_indices), dtype=int)
inv_test_indices[test_indices] = np.arange(len(test_indices))
# Check for sparse predictions
if sp.issparse(predictions[0]):
predictions = sp.vstack(predictions, format=predictions[0].format)
else:
predictions = np.concatenate(predictions)
return predictions[inv_test_indices]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params,
method):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
method : string
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : sequence
Result of calling 'estimator.method'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
func = getattr(estimator, method)
predictions = func(X_test)
return predictions, test
def _check_is_permutation(indices, n_samples):
"""Check whether indices is a reordering of the array np.arange(n_samples)
Parameters
----------
indices : ndarray
integer array to test
n_samples : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(indices) is np.arange(n)
"""
if len(indices) != n_samples:
return False
hit = np.zeros(n_samples, dtype=bool)
hit[indices] = True
if not np.all(hit):
return False
return True
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def permutation_test_score(estimator, X, y, groups=None, cv=None,
n_permutations=100, n_jobs=1, random_state=0,
verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Labels to constrain permutation within groups, i.e. ``y`` values
are permuted among samples with the same group identifier.
When not specified, ``y`` values are permuted among all samples.
When a grouped cross-validator is used, the group labels are
also passed on to the ``split`` method of the cross-validator. The
cross-validator uses them for grouping the samples while splitting
the dataset into train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, groups, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, groups, random_state),
groups, cv, scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def _permutation_test_score(estimator, X, y, groups, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv.split(X, y, groups):
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, groups, random_state):
"""Return a shuffled copy of y eventually shuffle among same groups."""
if groups is None:
indices = random_state.permutation(len(y))
else:
indices = np.arange(len(groups))
for group in np.unique(groups):
this_mask = (groups == group)
indices[this_mask] = random_state.permutation(indices[this_mask])
return y[indices]
def learning_curve(estimator, X, y, groups=None,
train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None,
exploit_incremental_learning=False, n_jobs=1,
pre_dispatch="all", verbose=0, shuffle=False,
random_state=None):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
shuffle : boolean, optional
Whether to shuffle training data before taking prefixes of it
based on``train_sizes``.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<sphx_glr_auto_examples_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
cv_iter = cv.split(X, y, groups)
# Make a list since we will be iterating multiple times over the folds
cv_iter = list(cv_iter)
scorer = check_scoring(estimator, scoring=scoring)
n_max_training_samples = len(cv_iter[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if shuffle:
rng = check_random_state(random_state)
cv_iter = ((rng.permutation(train), test) for train, test in cv_iter)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train,
test, train_sizes_abs, scorer, verbose)
for train, test in cv_iter)
else:
train_test_proportions = []
for train, test in cv_iter:
for n_train_samples in train_sizes_abs:
train_test_proportions.append((train[:n_train_samples], test))
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train, test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in train_test_proportions)
out = np.array(out)
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, groups=None,
cv=None, scoring=None, n_jobs=1, pre_dispatch="all",
verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`sphx_glr_auto_examples_model_selection_plot_validation_curve.py`
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv.split(X, y, groups) for v in param_range)
out = np.asarray(out)
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
|
|
#!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: azure_rm_virtualmachine
version_added: "2.1"
short_description: Manage Azure virtual machines.
description:
- Create, update, stop and start a virtual machine. Provide an existing storage account and network interface or
allow the module to create these for you. If you choose not to provide a network interface, the resource group
must contain a virtual network with at least one subnet.
- Currently requires an image found in the Azure Marketplace. Use azure_rm_virtualmachineimage_facts module
to discover the publisher, offer, sku and version of a particular image.
options:
resource_group:
description:
- Name of the resource group containing the virtual machine.
required: true
name:
description:
- Name of the virtual machine.
required: true
state:
description:
- Assert the state of the virtual machine.
- State 'present' will check that the machine exists with the requested configuration. If the configuration
of the existing machine does not match, the machine will be updated. Use options started, allocated and restarted to change the machine's power
state.
- State 'absent' will remove the virtual machine.
default: present
choices:
- absent
- present
started:
description:
- Use with state 'present' to start the machine. Set to false to have the machine be 'stopped'.
default: true
allocated:
description:
- Toggle that controls if the machine is allocated/deallocated, only useful with state='present'.
default: True
restarted:
description:
- Use with state 'present' to restart a running VM.
default: false
location:
description:
- Valid Azure location. Defaults to location of the resource group.
short_hostname:
description:
- Name assigned internally to the host. On a linux VM this is the name returned by the `hostname` command.
When creating a virtual machine, short_hostname defaults to name.
vm_size:
description:
- A valid Azure VM size value. For example, 'Standard_D4'. The list of choices varies depending on the
subscription and location. Check your subscription for available choices. Required when creating a VM.
admin_username:
description:
- Admin username used to access the host after it is created. Required when creating a VM.
admin_password:
description:
- Password for the admin username. Not required if the os_type is Linux and SSH password authentication
is disabled by setting ssh_password_enabled to false.
ssh_password_enabled:
description:
- When the os_type is Linux, setting ssh_password_enabled to false will disable SSH password authentication
and require use of SSH keys.
default: true
ssh_public_keys:
description:
- "For os_type Linux provide a list of SSH keys. Each item in the list should be a dictionary where the
dictionary contains two keys: path and key_data. Set the path to the default location of the
authorized_keys files. On an Enterprise Linux host, for example, the path will be
/home/<admin username>/.ssh/authorized_keys. Set key_data to the actual value of the public key."
image:
description:
- "A dictionary describing the Marketplace image used to build the VM. Will contain keys: publisher,
offer, sku and version. NOTE: set image.version to 'latest' to get the most recent version of a given
image."
required: true
storage_account_name:
description:
- Name of an existing storage account that supports creation of VHD blobs. If not specified for a new VM,
a new storage account named <vm name>01 will be created using storage type 'Standard_LRS'.
storage_container_name:
description:
- Name of the container to use within the storage account to store VHD blobs. If no name is specified a
default container will created.
default: vhds
storage_blob_name:
description:
- Name fo the storage blob used to hold the VM's OS disk image. If no name is provided, defaults to
the VM name + '.vhd'. If you provide a name, it must end with '.vhd'
aliases:
- storage_blob
managed_disk_type:
description:
- Managed OS disk type
choices:
- Standard_LRS
- Premium_LRS
version_added: "2.4"
os_disk_caching:
description:
- Type of OS disk caching.
choices:
- ReadOnly
- ReadWrite
default: ReadOnly
aliases:
- disk_caching
os_type:
description:
- Base type of operating system.
choices:
- Windows
- Linux
default:
- Linux
data_disks:
description:
- Describes list of data disks.
required: false
default: null
version_added: "2.4"
suboptions:
lun:
description:
- The logical unit number for data disk
default: 0
version_added: "2.4"
disk_size_gb:
description:
- The initial disk size in GB for blank data disks
version_added: "2.4"
managed_disk_type:
description:
- Managed data disk type
choices:
- Standard_LRS
- Premium_LRS
version_added: "2.4"
storage_account_name:
description:
- Name of an existing storage account that supports creation of VHD blobs. If not specified for a new VM,
a new storage account named <vm name>01 will be created using storage type 'Standard_LRS'.
version_added: "2.4"
storage_container_name:
description:
- Name of the container to use within the storage account to store VHD blobs. If no name is specified a
default container will created.
default: vhds
version_added: "2.4"
storage_blob_name:
description:
- Name fo the storage blob used to hold the VM's OS disk image. If no name is provided, defaults to
the VM name + '.vhd'. If you provide a name, it must end with '.vhd'
version_added: "2.4"
caching:
description:
- Type of data disk caching.
choices:
- ReadOnly
- ReadWrite
default: ReadOnly
version_added: "2.4"
public_ip_allocation_method:
description:
- If a public IP address is created when creating the VM (because a Network Interface was not provided),
determines if the public IP address remains permanently associated with the Network Interface. If set
to 'Dynamic' the public IP address may change any time the VM is rebooted or power cycled.
choices:
- Dynamic
- Static
default:
- Static
aliases:
- public_ip_allocation
open_ports:
description:
- If a network interface is created when creating the VM, a security group will be created as well. For
Linux hosts a rule will be added to the security group allowing inbound TCP connections to the default
SSH port 22, and for Windows hosts ports 3389 and 5986 will be opened. Override the default open ports by
providing a list of ports.
network_interface_names:
description:
- List of existing network interface names to add to the VM. If a network interface name is not provided
when the VM is created, a default network interface will be created. In order for the module to create
a network interface, at least one Virtual Network with one Subnet must exist.
virtual_network_resource_group:
description:
- When creating a virtual machine, if a specific virtual network from another resource group should be
used, use this parameter to specify the resource group to use.
version_added: "2.4"
virtual_network_name:
description:
- When creating a virtual machine, if a network interface name is not provided, one will be created.
The new network interface will be assigned to the first virtual network found in the resource group.
Use this parameter to provide a specific virtual network instead.
aliases:
- virtual_network
subnet_name:
description:
- When creating a virtual machine, if a network interface name is not provided, one will be created.
The new network interface will be assigned to the first subnet found in the virtual network.
Use this parameter to provide a specific subnet instead.
aliases:
- subnet
remove_on_absent:
description:
- When removing a VM using state 'absent', also remove associated resources
- "It can be 'all' or a list with any of the following: ['network_interfaces', 'virtual_storage', 'public_ips']"
- Any other input will be ignored
default: ['all']
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Create VM with defaults
azure_rm_virtualmachine:
resource_group: Testing
name: testvm10
admin_username: chouseknecht
admin_password: <your password here>
image:
offer: CentOS
publisher: OpenLogic
sku: '7.1'
version: latest
- name: Create a VM with managed disk
azure_rm_virtualmachine:
resource_group: Testing
name: testvm001
vm_size: Standard_D4
managed_disk_type: Standard_LRS
admin_username: adminUser
ssh_public_keys:
- path: /home/adminUser/.ssh/authorized_keys
key_data: < insert yor ssh public key here... >
image:
offer: CoreOS
publisher: CoreOS
sku: Stable
version: latest
- name: Create a VM with existing storage account and NIC
azure_rm_virtualmachine:
resource_group: Testing
name: testvm002
vm_size: Standard_D4
storage_account: testaccount001
admin_username: adminUser
ssh_public_keys:
- path: /home/adminUser/.ssh/authorized_keys
key_data: < insert yor ssh public key here... >
network_interfaces: testvm001
image:
offer: CentOS
publisher: OpenLogic
sku: '7.1'
version: latest
- name: Create a VM with OS and multiple data managed disks
azure_rm_virtualmachine:
resource_group: Testing
name: testvm001
vm_size: Standard_D4
managed_disk_type: Standard_LRS
admin_username: adminUser
ssh_public_keys:
- path: /home/adminUser/.ssh/authorized_keys
key_data: < insert yor ssh public key here... >
image:
offer: CoreOS
publisher: CoreOS
sku: Stable
version: latest
data_disks:
- lun: 0
disk_size_gb: 64
managed_disk_type: Standard_LRS
- lun: 1
disk_size_gb: 128
managed_disk_type: Premium_LRS
- name: Create a VM with OS and multiple data storage accounts
azure_rm_virtualmachine:
resource_group: Testing
name: testvm001
vm_size: Standard_DS1_v2
admin_username: adminUser
ssh_password_enabled: false
ssh_public_keys:
- path: /home/adminUser/.ssh/authorized_keys
key_data: < insert yor ssh public key here... >
network_interfaces: testvm001
storage_container: osdisk
storage_blob: osdisk.vhd
image:
offer: CoreOS
publisher: CoreOS
sku: Stable
version: latest
data_disks:
- lun: 0
disk_size_gb: 64
storage_container_name: datadisk1
storage_blob_name: datadisk1.vhd
- lun: 1
disk_size_gb: 128
storage_container_name: datadisk2
storage_blob_name: datadisk2.vhd
- name: Power Off
azure_rm_virtualmachine:
resource_group: Testing
name: testvm002
started: no
- name: Deallocate
azure_rm_virtualmachine:
resource_group: Testing
name: testvm002
allocated: no
- name: Power On
azure_rm_virtualmachine:
resource_group:
name: testvm002
- name: Restart
azure_rm_virtualmachine:
resource_group:
name: testvm002
restarted: yes
- name: remove vm and all resources except public ips
azure_rm_virtualmachine:
resource_group: Testing
name: testvm002
state: absent
remove_on_absent:
- network_interfaces
- virtual_storage
'''
RETURN = '''
powerstate:
description: Indicates if the state is running, stopped, deallocated
returned: always
type: string
example: running
deleted_vhd_uris:
description: List of deleted Virtual Hard Disk URIs.
returned: 'on delete'
type: list
example: ["https://testvm104519.blob.core.windows.net/vhds/testvm10.vhd"]
deleted_network_interfaces:
description: List of deleted NICs.
returned: 'on delete'
type: list
example: ["testvm1001"]
deleted_public_ips:
description: List of deleted public IP address names.
returned: 'on delete'
type: list
example: ["testvm1001"]
azure_vm:
description: Facts about the current state of the object. Note that facts are not part of the registered output but available directly.
returned: always
type: complex
contains: {
"properties": {
"hardwareProfile": {
"vmSize": "Standard_D1"
},
"instanceView": {
"disks": [
{
"name": "testvm10.vhd",
"statuses": [
{
"code": "ProvisioningState/succeeded",
"displayStatus": "Provisioning succeeded",
"level": "Info",
"time": "2016-03-30T07:11:16.187272Z"
}
]
}
],
"statuses": [
{
"code": "ProvisioningState/succeeded",
"displayStatus": "Provisioning succeeded",
"level": "Info",
"time": "2016-03-30T20:33:38.946916Z"
},
{
"code": "PowerState/running",
"displayStatus": "VM running",
"level": "Info"
}
],
"vmAgent": {
"extensionHandlers": [],
"statuses": [
{
"code": "ProvisioningState/succeeded",
"displayStatus": "Ready",
"level": "Info",
"message": "GuestAgent is running and accepting new configurations.",
"time": "2016-03-30T20:31:16.000Z"
}
],
"vmAgentVersion": "WALinuxAgent-2.0.16"
}
},
"networkProfile": {
"networkInterfaces": [
{
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkInterfaces/testvm10_NIC01",
"name": "testvm10_NIC01",
"properties": {
"dnsSettings": {
"appliedDnsServers": [],
"dnsServers": []
},
"enableIPForwarding": false,
"ipConfigurations": [
{
"etag": 'W/"041c8c2a-d5dd-4cd7-8465-9125cfbe2cf8"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkInterfaces/testvm10_NIC01/ipConfigurations/default",
"name": "default",
"properties": {
"privateIPAddress": "10.10.0.5",
"privateIPAllocationMethod": "Dynamic",
"provisioningState": "Succeeded",
"publicIPAddress": {
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/publicIPAddresses/testvm10_PIP01",
"name": "testvm10_PIP01",
"properties": {
"idleTimeoutInMinutes": 4,
"ipAddress": "13.92.246.197",
"ipConfiguration": {
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkInterfaces/testvm10_NIC01/ipConfigurations/default"
},
"provisioningState": "Succeeded",
"publicIPAllocationMethod": "Static",
"resourceGuid": "3447d987-ca0d-4eca-818b-5dddc0625b42"
}
}
}
}
],
"macAddress": "00-0D-3A-12-AA-14",
"primary": true,
"provisioningState": "Succeeded",
"resourceGuid": "10979e12-ccf9-42ee-9f6d-ff2cc63b3844",
"virtualMachine": {
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Compute/virtualMachines/testvm10"
}
}
}
]
},
"osProfile": {
"adminUsername": "chouseknecht",
"computerName": "test10",
"linuxConfiguration": {
"disablePasswordAuthentication": false
},
"secrets": []
},
"provisioningState": "Succeeded",
"storageProfile": {
"dataDisks": [
{
"caching": "ReadWrite",
"createOption": "empty",
"diskSizeGB": 64,
"lun": 0,
"name": "datadisk1.vhd",
"vhd": {
"uri": "https://testvm10sa1.blob.core.windows.net/datadisk/datadisk1.vhd"
}
}
],
"imageReference": {
"offer": "CentOS",
"publisher": "OpenLogic",
"sku": "7.1",
"version": "7.1.20160308"
},
"osDisk": {
"caching": "ReadOnly",
"createOption": "fromImage",
"name": "testvm10.vhd",
"osType": "Linux",
"vhd": {
"uri": "https://testvm10sa1.blob.core.windows.net/vhds/testvm10.vhd"
}
}
}
},
"type": "Microsoft.Compute/virtualMachines"
}
''' # NOQA
import random
import re
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.compute.models import NetworkInterfaceReference, \
VirtualMachine, HardwareProfile, \
StorageProfile, OSProfile, OSDisk, DataDisk, \
VirtualHardDisk, ManagedDiskParameters, \
ImageReference, NetworkProfile, LinuxConfiguration, \
SshConfiguration, SshPublicKey, VirtualMachineSizeTypes, \
DiskCreateOptionTypes
from azure.mgmt.network.models import PublicIPAddress, NetworkSecurityGroup, NetworkInterface, \
NetworkInterfaceIPConfiguration, Subnet
from azure.mgmt.storage.models import StorageAccountCreateParameters, Sku
from azure.mgmt.storage.models import Kind, SkuTier, SkuName
except ImportError:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, azure_id_to_dict
AZURE_OBJECT_CLASS = 'VirtualMachine'
AZURE_ENUM_MODULES = ['azure.mgmt.compute.models']
def extract_names_from_blob_uri(blob_uri, storage_suffix):
# HACK: ditch this once python SDK supports get by URI
m = re.match('^https://(?P<accountname>[^\.]+)\.blob\.{0}/'
'(?P<containername>[^/]+)/(?P<blobname>.+)$'.format(storage_suffix), blob_uri)
if not m:
raise Exception("unable to parse blob uri '%s'" % blob_uri)
extracted_names = m.groupdict()
return extracted_names
class AzureRMVirtualMachine(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
state=dict(choices=['present', 'absent'], default='present', type='str'),
location=dict(type='str'),
short_hostname=dict(type='str'),
vm_size=dict(type='str'),
admin_username=dict(type='str'),
admin_password=dict(type='str', no_log=True),
ssh_password_enabled=dict(type='bool', default=True),
ssh_public_keys=dict(type='list'),
image=dict(type='dict'),
storage_account_name=dict(type='str', aliases=['storage_account']),
storage_container_name=dict(type='str', aliases=['storage_container'], default='vhds'),
storage_blob_name=dict(type='str', aliases=['storage_blob']),
os_disk_caching=dict(type='str', aliases=['disk_caching'], choices=['ReadOnly', 'ReadWrite'],
default='ReadOnly'),
managed_disk_type=dict(type='str', choices=['Standard_LRS', 'Premium_LRS']),
os_type=dict(type='str', choices=['Linux', 'Windows'], default='Linux'),
public_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static'], default='Static',
aliases=['public_ip_allocation']),
open_ports=dict(type='list'),
network_interface_names=dict(type='list', aliases=['network_interfaces']),
remove_on_absent=dict(type='list', default=['all']),
virtual_network_resource_group=dict(type = 'str'),
virtual_network_name=dict(type='str', aliases=['virtual_network']),
subnet_name=dict(type='str', aliases=['subnet']),
allocated=dict(type='bool', default=True),
restarted=dict(type='bool', default=False),
started=dict(type='bool', default=True),
data_disks=dict(type='list'),
)
self.resource_group = None
self.name = None
self.state = None
self.location = None
self.short_hostname = None
self.vm_size = None
self.admin_username = None
self.admin_password = None
self.ssh_password_enabled = None
self.ssh_public_keys = None
self.image = None
self.storage_account_name = None
self.storage_container_name = None
self.storage_blob_name = None
self.os_type = None
self.os_disk_caching = None
self.managed_disk_type = None
self.network_interface_names = None
self.remove_on_absent = set()
self.tags = None
self.force = None
self.public_ip_allocation_method = None
self.open_ports = None
self.virtual_network_resource_group = None
self.virtual_network_name = None
self.subnet_name = None
self.allocated = None
self.restarted = None
self.started = None
self.differences = None
self.data_disks = None
self.results = dict(
changed=False,
actions=[],
powerstate_change=None,
ansible_facts=dict(azure_vm=None)
)
super(AzureRMVirtualMachine, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
# make sure options are lower case
self.remove_on_absent = set([resource.lower() for resource in self.remove_on_absent])
changed = False
powerstate_change = None
results = dict()
vm = None
network_interfaces = []
requested_vhd_uri = None
data_disk_requested_vhd_uri = None
disable_ssh_password = None
vm_dict = None
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
# Set default location
self.location = resource_group.location
if self.state == 'present':
# Verify parameters and resolve any defaults
if self.vm_size and not self.vm_size_is_valid():
self.fail("Parameter error: vm_size {0} is not valid for your subscription and location.".format(
self.vm_size
))
if self.network_interface_names:
for name in self.network_interface_names:
nic = self.get_network_interface(name)
network_interfaces.append(nic.id)
if self.ssh_public_keys:
msg = "Parameter error: expecting ssh_public_keys to be a list of type dict where " \
"each dict contains keys: path, key_data."
for key in self.ssh_public_keys:
if not isinstance(key, dict):
self.fail(msg)
if not key.get('path') or not key.get('key_data'):
self.fail(msg)
if self.image:
if not self.image.get('publisher') or not self.image.get('offer') or not self.image.get('sku') \
or not self.image.get('version'):
self.error("parameter error: expecting image to contain publisher, offer, sku and version keys.")
image_version = self.get_image_version()
if self.image['version'] == 'latest':
self.image['version'] = image_version.name
self.log("Using image version {0}".format(self.image['version']))
if not self.storage_blob_name and not self.managed_disk_type:
self.storage_blob_name = self.name + '.vhd'
elif self.managed_disk_type:
self.storage_blob_name = self.name
if self.storage_account_name and not self.managed_disk_type:
self.get_storage_account(self.storage_account_name)
requested_vhd_uri = 'https://{0}.blob.{1}/{2}/{3}'.format(self.storage_account_name,
self._cloud_environment.suffixes.storage_endpoint,
self.storage_container_name,
self.storage_blob_name)
disable_ssh_password = not self.ssh_password_enabled
try:
self.log("Fetching virtual machine {0}".format(self.name))
vm = self.compute_client.virtual_machines.get(self.resource_group, self.name, expand='instanceview')
self.check_provisioning_state(vm, self.state)
vm_dict = self.serialize_vm(vm)
if self.state == 'present':
differences = []
current_nics = []
results = vm_dict
# Try to determine if the VM needs to be updated
if self.network_interface_names:
for nic in vm_dict['properties']['networkProfile']['networkInterfaces']:
current_nics.append(nic['id'])
if set(current_nics) != set(network_interfaces):
self.log('CHANGED: virtual machine {0} - network interfaces are different.'.format(self.name))
differences.append('Network Interfaces')
updated_nics = [dict(id=id) for id in network_interfaces]
vm_dict['properties']['networkProfile']['networkInterfaces'] = updated_nics
changed = True
if self.os_disk_caching and \
self.os_disk_caching != vm_dict['properties']['storageProfile']['osDisk']['caching']:
self.log('CHANGED: virtual machine {0} - OS disk caching'.format(self.name))
differences.append('OS Disk caching')
changed = True
vm_dict['properties']['storageProfile']['osDisk']['caching'] = self.os_disk_caching
update_tags, vm_dict['tags'] = self.update_tags(vm_dict.get('tags', dict()))
if update_tags:
differences.append('Tags')
changed = True
if self.short_hostname and self.short_hostname != vm_dict['properties']['osProfile']['computerName']:
self.log('CHANGED: virtual machine {0} - short hostname'.format(self.name))
differences.append('Short Hostname')
changed = True
vm_dict['properties']['osProfile']['computerName'] = self.short_hostname
if self.started and vm_dict['powerstate'] not in ['starting', 'running'] and self.allocated:
self.log("CHANGED: virtual machine {0} not running and requested state 'running'".format(self.name))
changed = True
powerstate_change = 'poweron'
elif self.state == 'present' and vm_dict['powerstate'] == 'running' and self.restarted:
self.log("CHANGED: virtual machine {0} {1} and requested state 'restarted'"
.format(self.name, vm_dict['powerstate']))
changed = True
powerstate_change = 'restarted'
elif self.state == 'present' and not self.allocated and vm_dict['powerstate'] not in ['deallocated', 'deallocating']:
self.log("CHANGED: virtual machine {0} {1} and requested state 'deallocated'"
.format(self.name, vm_dict['powerstate']))
changed = True
powerstate_change = 'deallocated'
elif not self.started and vm_dict['powerstate'] == 'running':
self.log("CHANGED: virtual machine {0} running and requested state 'stopped'".format(self.name))
changed = True
powerstate_change = 'poweroff'
self.differences = differences
elif self.state == 'absent':
self.log("CHANGED: virtual machine {0} exists and requested state is 'absent'".format(self.name))
results = dict()
changed = True
except CloudError:
self.log('Virtual machine {0} does not exist'.format(self.name))
if self.state == 'present':
self.log("CHANGED: virtual machine {0} does not exist but state is 'present'.".format(self.name))
changed = True
self.results['changed'] = changed
self.results['ansible_facts']['azure_vm'] = results
self.results['powerstate_change'] = powerstate_change
if self.check_mode:
return self.results
if changed:
if self.state == 'present':
default_storage_account = None
if not vm:
# Create the VM
self.log("Create virtual machine {0}".format(self.name))
self.results['actions'].append('Created VM {0}'.format(self.name))
# Validate parameters
if not self.admin_username:
self.fail("Parameter error: admin_username required when creating a virtual machine.")
if self.os_type == 'Linux':
if disable_ssh_password and not self.ssh_public_keys:
self.fail("Parameter error: ssh_public_keys required when disabling SSH password.")
if not self.image:
self.fail("Parameter error: an image is required when creating a virtual machine.")
# Get defaults
if not self.network_interface_names:
default_nic = self.create_default_nic()
self.log("network interface:")
self.log(self.serialize_obj(default_nic, 'NetworkInterface'), pretty_print=True)
network_interfaces = [default_nic.id]
# os disk
if not self.storage_account_name and not self.managed_disk_type:
storage_account = self.create_default_storage_account()
self.log("storage account:")
self.log(self.serialize_obj(storage_account, 'StorageAccount'), pretty_print=True)
requested_vhd_uri = 'https://{0}.blob.{1}/{2}/{3}'.format(
storage_account.name,
self._cloud_environment.suffixes.storage_endpoint,
self.storage_container_name,
self.storage_blob_name)
default_storage_account = storage_account # store for use by data disks if necessary
if not self.short_hostname:
self.short_hostname = self.name
nics = [NetworkInterfaceReference(id=id) for id in network_interfaces]
# os disk
if not self.managed_disk_type:
managed_disk = None
vhd = VirtualHardDisk(uri=requested_vhd_uri)
else:
vhd = None
managed_disk = ManagedDiskParameters(storage_account_type=self.managed_disk_type)
vm_resource = VirtualMachine(
self.location,
tags=self.tags,
os_profile=OSProfile(
admin_username=self.admin_username,
computer_name=self.short_hostname,
),
hardware_profile=HardwareProfile(
vm_size=self.vm_size
),
storage_profile=StorageProfile(
os_disk=OSDisk(
name=self.storage_blob_name,
vhd=vhd,
managed_disk=managed_disk,
create_option=DiskCreateOptionTypes.from_image,
caching=self.os_disk_caching,
),
image_reference=ImageReference(
publisher=self.image['publisher'],
offer=self.image['offer'],
sku=self.image['sku'],
version=self.image['version'],
),
),
network_profile=NetworkProfile(
network_interfaces=nics
),
)
if self.admin_password:
vm_resource.os_profile.admin_password = self.admin_password
if self.os_type == 'Linux':
vm_resource.os_profile.linux_configuration = LinuxConfiguration(
disable_password_authentication=disable_ssh_password
)
if self.ssh_public_keys:
ssh_config = SshConfiguration()
ssh_config.public_keys = \
[SshPublicKey(path=key['path'], key_data=key['key_data']) for key in self.ssh_public_keys]
vm_resource.os_profile.linux_configuration.ssh = ssh_config
# data disk
if self.data_disks:
data_disks = []
count = 0
for data_disk in self.data_disks:
if not data_disk.get('managed_disk_type'):
if not data_disk.get('storage_blob_name'):
data_disk['storage_blob_name'] = self.name + '-data-' + str(count) + '.vhd'
count += 1
if data_disk.get('storage_account_name'):
data_disk_storage_account = self.get_storage_account(data_disk['storage_account_name'])
else:
if(not default_storage_account):
data_disk_storage_account = self.create_default_storage_account()
self.log("data disk storage account:")
self.log(self.serialize_obj(data_disk_storage_account, 'StorageAccount'), pretty_print=True)
default_storage_account = data_disk_storage_account # store for use by future data disks if necessary
else:
data_disk_storage_account = default_storage_account
if not data_disk.get('storage_container_name'):
data_disk['storage_container_name'] = 'vhds'
data_disk_requested_vhd_uri = 'https://{0}.blob.{1}/{2}/{3}'.format(
data_disk_storage_account.name,
self._cloud_environment.suffixes.storage_endpoint,
data_disk['storage_container_name'],
data_disk['storage_blob_name']
)
if not data_disk.get('managed_disk_type'):
data_disk_managed_disk = None
disk_name = data_disk['storage_blob_name']
data_disk_vhd = VirtualHardDisk(uri=data_disk_requested_vhd_uri)
else:
data_disk_vhd = None
data_disk_managed_disk = ManagedDiskParameters(storage_account_type=data_disk['managed_disk_type'])
disk_name = self.name + "-datadisk-" + str(count)
count += 1
data_disk['caching'] = data_disk.get(
'caching', 'ReadOnly'
)
data_disks.append(DataDisk(
lun=data_disk['lun'],
name=disk_name,
vhd=data_disk_vhd,
caching=data_disk['caching'],
create_option=DiskCreateOptionTypes.empty,
disk_size_gb=data_disk['disk_size_gb'],
managed_disk=data_disk_managed_disk,
))
vm_resource.storage_profile.data_disks = data_disks
self.log("Create virtual machine with parameters:")
self.create_or_update_vm(vm_resource)
elif self.differences and len(self.differences) > 0:
# Update the VM based on detected config differences
self.log("Update virtual machine {0}".format(self.name))
self.results['actions'].append('Updated VM {0}'.format(self.name))
nics = [NetworkInterfaceReference(id=interface['id'])
for interface in vm_dict['properties']['networkProfile']['networkInterfaces']]
# os disk
if not vm_dict['properties']['storageProfile']['osDisk'].get('managedDisk'):
managed_disk = None
vhd = VirtualHardDisk(uri=vm_dict['properties']['storageProfile']['osDisk']['vhd']['uri'])
else:
vhd = None
managed_disk = ManagedDiskParameters(
storage_account_type=vm_dict['properties']['storageProfile']['osDisk']['managedDisk']['storageAccountType']
)
vm_resource = VirtualMachine(
vm_dict['location'],
os_profile=OSProfile(
admin_username=vm_dict['properties']['osProfile']['adminUsername'],
computer_name=vm_dict['properties']['osProfile']['computerName']
),
hardware_profile=HardwareProfile(
vm_size=vm_dict['properties']['hardwareProfile']['vmSize']
),
storage_profile=StorageProfile(
os_disk=OSDisk(
name=vm_dict['properties']['storageProfile']['osDisk']['name'],
vhd=vhd,
managed_disk=managed_disk,
create_option=vm_dict['properties']['storageProfile']['osDisk']['createOption'],
os_type=vm_dict['properties']['storageProfile']['osDisk']['osType'],
caching=vm_dict['properties']['storageProfile']['osDisk']['caching'],
),
image_reference=ImageReference(
publisher=vm_dict['properties']['storageProfile']['imageReference']['publisher'],
offer=vm_dict['properties']['storageProfile']['imageReference']['offer'],
sku=vm_dict['properties']['storageProfile']['imageReference']['sku'],
version=vm_dict['properties']['storageProfile']['imageReference']['version']
),
),
network_profile=NetworkProfile(
network_interfaces=nics
),
)
if vm_dict.get('tags'):
vm_resource.tags = vm_dict['tags']
# Add admin password, if one provided
if vm_dict['properties']['osProfile'].get('adminPassword'):
vm_resource.os_profile.admin_password = vm_dict['properties']['osProfile']['adminPassword']
# Add linux configuration, if applicable
linux_config = vm_dict['properties']['osProfile'].get('linuxConfiguration')
if linux_config:
ssh_config = linux_config.get('ssh', None)
vm_resource.os_profile.linux_configuration = LinuxConfiguration(
disable_password_authentication=linux_config.get('disablePasswordAuthentication', False)
)
if ssh_config:
public_keys = ssh_config.get('publicKeys')
if public_keys:
vm_resource.os_profile.linux_configuration.ssh = SshConfiguration(public_keys=[])
for key in public_keys:
vm_resource.os_profile.linux_configuration.ssh.public_keys.append(
SshPublicKey(path=key['path'], key_data=key['keyData'])
)
# data disk
if vm_dict['properties']['storageProfile'].get('dataDisks'):
data_disks = []
for data_disk in vm_dict['properties']['storageProfile']['dataDisks']:
if data_disk.get('managedDisk'):
managed_disk_type = data_disk['managedDisk']['storageAccountType']
data_disk_managed_disk = ManagedDiskParameters(storage_account_type=managed_disk_type)
data_disk_vhd = None
else:
data_disk_vhd = data_disk['vhd']['uri']
data_disk_managed_disk = None
data_disks.append(DataDisk(
lun=int(data_disk['lun']),
name=data_disk.get('name'),
vhd=data_disk_vhd,
caching=data_disk.get('caching'),
create_option=data_disk.get('createOption'),
disk_size_gb=int(data_disk['diskSizeGB']),
managed_disk=data_disk_managed_disk,
))
vm_resource.storage_profile.data_disks = data_disks
self.log("Update virtual machine with parameters:")
self.create_or_update_vm(vm_resource)
# Make sure we leave the machine in requested power state
if (powerstate_change == 'poweron' and
self.results['ansible_facts']['azure_vm']['powerstate'] != 'running'):
# Attempt to power on the machine
self.power_on_vm()
elif (powerstate_change == 'poweroff' and
self.results['ansible_facts']['azure_vm']['powerstate'] == 'running'):
# Attempt to power off the machine
self.power_off_vm()
elif powerstate_change == 'restarted':
self.restart_vm()
elif powerstate_change == 'deallocated':
self.deallocate_vm()
self.results['ansible_facts']['azure_vm'] = self.serialize_vm(self.get_vm())
elif self.state == 'absent':
# delete the VM
self.log("Delete virtual machine {0}".format(self.name))
self.results['ansible_facts']['azure_vm'] = None
self.delete_vm(vm)
# until we sort out how we want to do this globally
del self.results['actions']
return self.results
def get_vm(self):
'''
Get the VM with expanded instanceView
:return: VirtualMachine object
'''
try:
vm = self.compute_client.virtual_machines.get(self.resource_group, self.name, expand='instanceview')
return vm
except Exception as exc:
self.fail("Error getting virtual machine {0} - {1}".format(self.name, str(exc)))
def serialize_vm(self, vm):
'''
Convert a VirtualMachine object to dict.
:param vm: VirtualMachine object
:return: dict
'''
result = self.serialize_obj(vm, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES)
result['id'] = vm.id
result['name'] = vm.name
result['type'] = vm.type
result['location'] = vm.location
result['tags'] = vm.tags
result['powerstate'] = dict()
if vm.instance_view:
result['powerstate'] = next((s.code.replace('PowerState/', '')
for s in vm.instance_view.statuses if s.code.startswith('PowerState')), None)
# Expand network interfaces to include config properties
for interface in vm.network_profile.network_interfaces:
int_dict = azure_id_to_dict(interface.id)
nic = self.get_network_interface(int_dict['networkInterfaces'])
for interface_dict in result['properties']['networkProfile']['networkInterfaces']:
if interface_dict['id'] == interface.id:
nic_dict = self.serialize_obj(nic, 'NetworkInterface')
interface_dict['name'] = int_dict['networkInterfaces']
interface_dict['properties'] = nic_dict['properties']
# Expand public IPs to include config properties
for interface in result['properties']['networkProfile']['networkInterfaces']:
for config in interface['properties']['ipConfigurations']:
if config['properties'].get('publicIPAddress'):
pipid_dict = azure_id_to_dict(config['properties']['publicIPAddress']['id'])
try:
pip = self.network_client.public_ip_addresses.get(self.resource_group,
pipid_dict['publicIPAddresses'])
except Exception as exc:
self.fail("Error fetching public ip {0} - {1}".format(pipid_dict['publicIPAddresses'],
str(exc)))
pip_dict = self.serialize_obj(pip, 'PublicIPAddress')
config['properties']['publicIPAddress']['name'] = pipid_dict['publicIPAddresses']
config['properties']['publicIPAddress']['properties'] = pip_dict['properties']
self.log(result, pretty_print=True)
if self.state != 'absent' and not result['powerstate']:
self.fail("Failed to determine PowerState of virtual machine {0}".format(self.name))
return result
def power_off_vm(self):
self.log("Powered off virtual machine {0}".format(self.name))
self.results['actions'].append("Powered off virtual machine {0}".format(self.name))
try:
poller = self.compute_client.virtual_machines.power_off(self.resource_group, self.name)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error powering off virtual machine {0} - {1}".format(self.name, str(exc)))
return True
def power_on_vm(self):
self.results['actions'].append("Powered on virtual machine {0}".format(self.name))
self.log("Power on virtual machine {0}".format(self.name))
try:
poller = self.compute_client.virtual_machines.start(self.resource_group, self.name)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error powering on virtual machine {0} - {1}".format(self.name, str(exc)))
return True
def restart_vm(self):
self.results['actions'].append("Restarted virtual machine {0}".format(self.name))
self.log("Restart virtual machine {0}".format(self.name))
try:
poller = self.compute_client.virtual_machines.restart(self.resource_group, self.name)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error restarting virtual machine {0} - {1}".format(self.name, str(exc)))
return True
def deallocate_vm(self):
self.results['actions'].append("Deallocated virtual machine {0}".format(self.name))
self.log("Deallocate virtual machine {0}".format(self.name))
try:
poller = self.compute_client.virtual_machines.deallocate(self.resource_group, self.name)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error deallocating virtual machine {0} - {1}".format(self.name, str(exc)))
return True
def delete_vm(self, vm):
vhd_uris = []
managed_disk_ids = []
nic_names = []
pip_names = []
if self.remove_on_absent.intersection(set(['all','virtual_storage'])):
# store the attached vhd info so we can nuke it after the VM is gone
if(vm.storage_profile.os_disk.managed_disk):
self.log('Storing managed disk ID for deletion')
managed_disk_ids.append(vm.storage_profile.os_disk.managed_disk.id)
elif(vm.storage_profile.os_disk.vhd):
self.log('Storing VHD URI for deletion')
vhd_uris.append(vm.storage_profile.os_disk.vhd.uri)
data_disks = vm.storage_profile.data_disks
for data_disk in data_disks:
if(data_disk.vhd):
vhd_uris.append(data_disk.vhd.uri)
elif(data_disk.managed_disk):
managed_disk_ids.append(data_disk.managed_disk.id)
# FUTURE enable diff mode, move these there...
self.log("VHD URIs to delete: {0}".format(', '.join(vhd_uris)))
self.results['deleted_vhd_uris'] = vhd_uris
self.log("Managed disk IDs to delete: {0}".format(', '.join(managed_disk_ids)))
self.results['deleted_managed_disk_ids'] = managed_disk_ids
if self.remove_on_absent.intersection(set(['all','network_interfaces'])):
# store the attached nic info so we can nuke them after the VM is gone
self.log('Storing NIC names for deletion.')
for interface in vm.network_profile.network_interfaces:
id_dict = azure_id_to_dict(interface.id)
nic_names.append(id_dict['networkInterfaces'])
self.log('NIC names to delete {0}'.format(', '.join(nic_names)))
self.results['deleted_network_interfaces'] = nic_names
if self.remove_on_absent.intersection(set(['all','public_ips'])):
# also store each nic's attached public IPs and delete after the NIC is gone
for name in nic_names:
nic = self.get_network_interface(name)
for ipc in nic.ip_configurations:
if ipc.public_ip_address:
pip_dict = azure_id_to_dict(ipc.public_ip_address.id)
pip_names.append(pip_dict['publicIPAddresses'])
self.log('Public IPs to delete are {0}'.format(', '.join(pip_names)))
self.results['deleted_public_ips'] = pip_names
self.log("Deleting virtual machine {0}".format(self.name))
self.results['actions'].append("Deleted virtual machine {0}".format(self.name))
try:
poller = self.compute_client.virtual_machines.delete(self.resource_group, self.name)
# wait for the poller to finish
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error deleting virtual machine {0} - {1}".format(self.name, str(exc)))
# TODO: parallelize nic, vhd, and public ip deletions with begin_deleting
# TODO: best-effort to keep deleting other linked resources if we encounter an error
if self.remove_on_absent.intersection(set(['all','virtual_storage'])):
self.log('Deleting VHDs')
self.delete_vm_storage(vhd_uris)
self.log('Deleting managed disks')
self.delete_managed_disks(managed_disk_ids)
if self.remove_on_absent.intersection(set(['all','network_interfaces'])):
self.log('Deleting network interfaces')
for name in nic_names:
self.delete_nic(name)
if self.remove_on_absent.intersection(set(['all','public_ips'])):
self.log('Deleting public IPs')
for name in pip_names:
self.delete_pip(name)
return True
def get_network_interface(self, name):
try:
nic = self.network_client.network_interfaces.get(self.resource_group, name)
return nic
except Exception as exc:
self.fail("Error fetching network interface {0} - {1}".format(name, str(exc)))
def delete_nic(self, name):
self.log("Deleting network interface {0}".format(name))
self.results['actions'].append("Deleted network interface {0}".format(name))
try:
poller = self.network_client.network_interfaces.delete(self.resource_group, name)
except Exception as exc:
self.fail("Error deleting network interface {0} - {1}".format(name, str(exc)))
self.get_poller_result(poller)
# Delete doesn't return anything. If we get this far, assume success
return True
def delete_pip(self, name):
self.results['actions'].append("Deleted public IP {0}".format(name))
try:
poller = self.network_client.public_ip_addresses.delete(self.resource_group, name)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error deleting {0} - {1}".format(name, str(exc)))
# Delete returns nada. If we get here, assume that all is well.
return True
def delete_managed_disks(self, managed_disk_ids):
for mdi in managed_disk_ids:
try:
poller = self.rm_client.resources.delete_by_id(mdi, '2017-03-30')
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error deleting managed disk {0} - {1}".format(mdi, str(exc)))
def delete_vm_storage(self, vhd_uris):
# FUTURE: figure out a cloud_env indepdendent way to delete these
for uri in vhd_uris:
self.log("Extracting info from blob uri '{0}'".format(uri))
try:
blob_parts = extract_names_from_blob_uri(uri, self._cloud_environment.suffixes.storage_endpoint)
except Exception as exc:
self.fail("Error parsing blob URI {0}".format(str(exc)))
storage_account_name = blob_parts['accountname']
container_name = blob_parts['containername']
blob_name = blob_parts['blobname']
blob_client = self.get_blob_client(self.resource_group, storage_account_name)
self.log("Delete blob {0}:{1}".format(container_name, blob_name))
self.results['actions'].append("Deleted blob {0}:{1}".format(container_name, blob_name))
try:
blob_client.delete_blob(container_name, blob_name)
except Exception as exc:
self.fail("Error deleting blob {0}:{1} - {2}".format(container_name, blob_name, str(exc)))
def get_image_version(self):
try:
versions = self.compute_client.virtual_machine_images.list(self.location,
self.image['publisher'],
self.image['offer'],
self.image['sku'])
except Exception as exc:
self.fail("Error fetching image {0} {1} {2} - {3}".format(self.image['publisher'],
self.image['offer'],
self.image['sku'],
str(exc)))
if versions and len(versions) > 0:
if self.image['version'] == 'latest':
return versions[len(versions) - 1]
for version in versions:
if version.name == self.image['version']:
return version
self.fail("Error could not find image {0} {1} {2} {3}".format(self.image['publisher'],
self.image['offer'],
self.image['sku'],
self.image['version']))
def get_storage_account(self, name):
try:
account = self.storage_client.storage_accounts.get_properties(self.resource_group,
name)
return account
except Exception as exc:
self.fail("Error fetching storage account {0} - {1}".format(name, str(exc)))
def create_or_update_vm(self, params):
try:
poller = self.compute_client.virtual_machines.create_or_update(self.resource_group, self.name, params)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error creating or updating virtual machine {0} - {1}".format(self.name, str(exc)))
def vm_size_is_valid(self):
'''
Validate self.vm_size against the list of virtual machine sizes available for the account and location.
:return: boolean
'''
try:
sizes = self.compute_client.virtual_machine_sizes.list(self.location)
except Exception as exc:
self.fail("Error retrieving available machine sizes - {0}".format(str(exc)))
for size in sizes:
if size.name == self.vm_size:
return True
return False
def create_default_storage_account(self):
'''
Create a default storage account <vm name>XXXX, where XXXX is a random number. If <vm name>XXXX exists, use it.
Otherwise, create one.
:return: storage account object
'''
account = None
valid_name = False
# Attempt to find a valid storage account name
storage_account_name_base = re.sub('[^a-zA-Z0-9]', '', self.name[:20].lower())
for i in range(0, 5):
rand = random.randrange(1000, 9999)
storage_account_name = storage_account_name_base + str(rand)
if self.check_storage_account_name(storage_account_name):
valid_name = True
break
if not valid_name:
self.fail("Failed to create a unique storage account name for {0}. Try using a different VM name."
.format(self.name))
try:
account = self.storage_client.storage_accounts.get_properties(self.resource_group, storage_account_name)
except CloudError:
pass
if account:
self.log("Storage account {0} found.".format(storage_account_name))
self.check_provisioning_state(account)
return account
sku = Sku(SkuName.standard_lrs)
Sku.tier = SkuTier.standard
kind = Kind.storage
parameters = StorageAccountCreateParameters(sku, kind, self.location)
self.log("Creating storage account {0} in location {1}".format(storage_account_name, self.location))
self.results['actions'].append("Created storage account {0}".format(storage_account_name))
try:
poller = self.storage_client.storage_accounts.create(self.resource_group, storage_account_name, parameters)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Failed to create storage account: {0} - {1}".format(storage_account_name, str(exc)))
return self.get_storage_account(storage_account_name)
def check_storage_account_name(self, name):
self.log("Checking storage account name availability for {0}".format(name))
try:
response = self.storage_client.storage_accounts.check_name_availability(name)
if response.reason == 'AccountNameInvalid':
raise Exception("Invalid default storage account name: {0}".format(name))
except Exception as exc:
self.fail("Error checking storage account name availability for {0} - {1}".format(name, str(exc)))
return response.name_available
def create_default_nic(self):
'''
Create a default Network Interface <vm name>01. Requires an existing virtual network
with one subnet. If NIC <vm name>01 exists, use it. Otherwise, create one.
:return: NIC object
'''
network_interface_name = self.name + '01'
nic = None
self.log("Create default NIC {0}".format(network_interface_name))
self.log("Check to see if NIC {0} exists".format(network_interface_name))
try:
nic = self.network_client.network_interfaces.get(self.resource_group, network_interface_name)
except CloudError:
pass
if nic:
self.log("NIC {0} found.".format(network_interface_name))
self.check_provisioning_state(nic)
return nic
self.log("NIC {0} does not exist.".format(network_interface_name))
virtual_network_resource_group = None
if self.virtual_network_resource_group:
virtual_network_resource_group = self.virtual_network_resource_group
else:
virtual_network_resource_group = self.resource_group
if self.virtual_network_name:
try:
self.network_client.virtual_networks.list(virtual_network_resource_group, self.virtual_network_name)
virtual_network_name = self.virtual_network_name
except CloudError as exc:
self.fail("Error: fetching virtual network {0} - {1}".format(self.virtual_network_name, str(exc)))
else:
# Find a virtual network
no_vnets_msg = "Error: unable to find virtual network in resource group {0}. A virtual network " \
"with at least one subnet must exist in order to create a NIC for the virtual " \
"machine.".format(self.resource_group)
virtual_network_name = None
try:
vnets = self.network_client.virtual_networks.list(self.resource_group)
except CloudError:
self.log('cloud error!')
self.fail(no_vnets_msg)
for vnet in vnets:
virtual_network_name = vnet.name
self.log('vnet name: {0}'.format(vnet.name))
break
if not virtual_network_name:
self.fail(no_vnets_msg)
if self.subnet_name:
try:
subnet = self.network_client.subnets.get(self.resource_group, virtual_network_name, self.subnet_name)
subnet_id = subnet.id
except Exception as exc:
self.fail("Error: fetching subnet {0} - {1}".format(self.subnet_name, str(exc)))
else:
no_subnets_msg = "Error: unable to find a subnet in virtual network {0}. A virtual network " \
"with at least one subnet must exist in order to create a NIC for the virtual " \
"machine.".format(virtual_network_name)
subnet_id = None
try:
subnets = self.network_client.subnets.list(virtual_network_resource_group, virtual_network_name)
except CloudError:
self.fail(no_subnets_msg)
for subnet in subnets:
subnet_id = subnet.id
self.log('subnet id: {0}'.format(subnet_id))
break
if not subnet_id:
self.fail(no_subnets_msg)
self.results['actions'].append('Created default public IP {0}'.format(self.name + '01'))
pip = self.create_default_pip(self.resource_group, self.location, self.name, self.public_ip_allocation_method)
self.results['actions'].append('Created default security group {0}'.format(self.name + '01'))
group = self.create_default_securitygroup(self.resource_group, self.location, self.name, self.os_type,
self.open_ports)
parameters = NetworkInterface(
location=self.location,
ip_configurations=[
NetworkInterfaceIPConfiguration(
private_ip_allocation_method='Dynamic',
)
]
)
parameters.ip_configurations[0].subnet = Subnet(id=subnet_id)
parameters.ip_configurations[0].name = 'default'
parameters.network_security_group = NetworkSecurityGroup(id=group.id,
location=group.location,
resource_guid=group.resource_guid)
parameters.ip_configurations[0].public_ip_address = PublicIPAddress(id=pip.id,
location=pip.location,
resource_guid=pip.resource_guid)
self.log("Creating NIC {0}".format(network_interface_name))
self.log(self.serialize_obj(parameters, 'NetworkInterface'), pretty_print=True)
self.results['actions'].append("Created NIC {0}".format(network_interface_name))
try:
poller = self.network_client.network_interfaces.create_or_update(self.resource_group,
network_interface_name,
parameters)
new_nic = self.get_poller_result(poller)
except Exception as exc:
self.fail("Error creating network interface {0} - {1}".format(network_interface_name, str(exc)))
return new_nic
def main():
AzureRMVirtualMachine()
if __name__ == '__main__':
main()
|
|
import re
import string
from termcolor import colored
import parser
def tab_to_space (c, spaces=4):
if c == "\t":
return "".join (list (map (lambda x: " ", range (spaces))))
return c
def escape (string):
def _inner (s):
if s == "\n":
return "\\n"
elif s == "\t":
return "\\t"
else:
return s
return "".join (map (_inner, string))
def ispunct (c):
return string.punctuation.find (c) > 0
class Keyword ():
def __init__ (self, value, line):
self.value = value
self.line = line
self.token_type = "keyword"
class Identifier ():
def __init__ (self, value, line):
self.value = value
self.line = line
self.token_type = "identifier"
class Number ():
def __init__ (self, value, line):
self.value = value
self.line = line
self.token_type = "number"
class String ():
def __init__ (self, value, line):
self.value = value
self.line = line
self.token_type = "string"
class Character ():
def __init__ (self, value, line):
self.value = value
self.line = line
self.token_type = "character"
class Punctuation ():
def __init__ (self, value, line):
self.value = value
self.line = line
self.token_type = "punctuation"
# This seems terrible and stupid
# Do dictionaries have faster look-up times than plain arrays?
# The average for hash tables is O(1). An array is always O(1), direct indexing.
KEYWORDS = { "and": True,
"begin": True,
"do": True,
"else": True,
"end": True,
"false": True,
"for": True,
"function": True,
"if": True,
"in": True,
"nil": True,
"not": True,
"of": True,
"or": True,
"procedure": True,
"program": True,
"record": True,
"then": True,
"true": True,
"type": True,
"var": True,
"while": True, }
N_TWO_OPS = { "==": True,
"!=": True,
":=": True,
"<=": True,
">=": True, }
class Lexer ():
def __init__(self, file=None, char=None, col=None, line=None, current_line=None, toks=None, has_error=None, prev=None, defaults=False):
if prev:
self.file = prev.file
self.char = prev.char
self.col = prev.col
self.line = prev.line
self.current_line = prev.current_line
self.toks = prev.toks
self.has_error = prev.has_error
if file != None: self.file = file
if char != None: self.char = char
if col != None: self.col = col
if line != None: self.line = line
if current_line != None: self.current_line = current_line
if toks != None: self.toks = toks
if has_error != None: self.has_error = has_error
if defaults:
# Still need to specify a file
self.file = file
self.current_line = file.readline ()
self.char = self.current_line[0] if self.current_line != "" else ""
self.col = 0
self.line = 1
self.toks = []
self.has_error = False
def arrow (lexer, line=""):
def _spaces (l="", c=0):
if c < lexer.col - 1:
return _spaces (l + " ", c + 1)
return l
def _shaft (l="", c=0):
if c < len (lexer.current_line) - lexer.col - 1:
return _shaft (l + "~", c + 1)
return l
return _shaft (_spaces () + "^")
def error (lexer, msg):
line = colored (arrow (lexer), "red")
print ("%s:%i:%i %s\n%s%s" % (colored ("error", "red"), lexer.line, lexer.col - 1, colored (msg, "white", attrs=["bold"]),
lexer.current_line,
line))
return Lexer (has_error=True, prev=lexer)
def peek (lexer):
return lexer.current_line[lexer.col + 1]
def getch (lexer):
if lexer.current_line != "":
if lexer.col + 1 >= len (lexer.current_line):
line = lexer.file.readline ()
return Lexer (current_line=line,
char=line[0] if line != "" else "",
col=0,
line=lexer.line + 1,
prev=lexer)
else:
return Lexer (char=lexer.current_line[lexer.col + 1],
col=lexer.col + 1,
prev=lexer)
else:
return Lexer (char="",
prev=lexer)
def lex_sl_comment (lexer):
if lexer.char != "\n" and lexer.char != "":
return lex_sl_comment (getch (lexer))
else:
return lexer
def lex_ml_comment (lexer):
if lexer.char != "}" and lexer.char != "":
return lex_ml_comment (getch (lexer))
else:
return lexer
def lex_operator (lexer):
if ispunct (peek (lexer)):
if N_TWO_OPS.get (lexer.char + peek (lexer)):
return Lexer (toks=lexer.toks + [Punctuation (lexer.char + peek (lexer), lexer.line)],
prev=getch (lexer))
return Lexer (toks=lexer.toks + [Punctuation (lexer.char, lexer.line)],
prev=lexer)
def lex_identifier (lexer, ident=""):
if peek (lexer).isspace () or ispunct (peek (lexer)):
if KEYWORDS.get (ident + lexer.char):
return Lexer (toks=lexer.toks + [Keyword (ident + lexer.char, lexer.line)],
prev=lexer)
return Lexer (toks=lexer.toks + [Identifier (ident + lexer.char, lexer.line)],
prev=lexer)
elif lexer.char.isalpha ():
return lex_identifier (getch (lexer), ident + lexer.char)
else:
return error (lexer, "Character is not valid for identifiers!")
def lex_number (lexer, number=""):
if lexer.char.isnumeric ():
return lex_number (getch (lexer), number + lexer.char)
elif lexer.char == ".":
if number.find (".") == -1:
return lex_number (getch (lexer), number + lexer.char)
else:
return error (lexer, "The number already has a decimal!")
elif lexer.char.isspace () or ispunct (lexer.char):
return Lexer (toks=lexer.toks + [Number (number, lexer.line)],
prev=lexer)
else:
return error (lexer, "Unexpected character!")
def lex_string (lexer, s=""):
if lexer.char != "\"" and lexer.char != "":
return lex_string (getch (lexer), s + escape (lexer.char))
elif lexer.char == "":
return error (lexer, "Unexpected EOF. Expected closing quotation!")
else:
return Lexer (toks=lexer.toks + [String (s, lexer.line)],
prev=lexer)
def lexch (lexer):
if lexer.char != "" and not lexer.has_error:
if lexer.char == "/" and lexer.current_line[lexer.col + 1] == "/":
return lexch (getch (lex_sl_comment (lexer)))
elif lexer.char == "{":
return lexch (getch (lex_ml_comment (lexer)))
elif lexer.char == "\"":
return lexch (getch (lex_string (getch (lexer))))
elif ispunct (lexer.char):
return lexch (getch (lex_operator (lexer)))
elif lexer.char.isalpha ():
return lexch (getch (lex_identifier (lexer)))
elif lexer.char.isnumeric ():
return lexch (lex_number (lexer))
elif not lexer.char.isspace ():
return lexch (getch (Lexer (toks=lexer.toks + [Character (lexer.char, lexer.line)],
prev=lexer)))
return lexch (getch (lexer))
return lexer
lexer = Lexer (open ("test.il", "r"), defaults=True)
lexer = lexch (lexer)
my_parser = parser.Parser (toks=lexer.toks, defaults=True)
result = parser.parse (my_parser)
for a in result.ast:
print (a.get ())
|
|
# coding: utf-8
"""
pytorch
Python SDK for PyTorch-Operator # noqa: E501
OpenAPI spec version: v0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import io
import json
import logging
import re
import ssl
import certifi
# python 2 and python 3 compatibility library
import six
from six.moves.urllib.parse import urlencode
try:
import urllib3
except ImportError:
raise ImportError('Swagger python client requires urllib3.')
logger = logging.getLogger(__name__)
class RESTResponse(io.IOBase):
def __init__(self, resp):
self.urllib3_response = resp
self.status = resp.status
self.reason = resp.reason
self.data = resp.data
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.getheader(name, default)
class RESTClientObject(object):
def __init__(self, configuration, pools_size=4, maxsize=None):
# urllib3.PoolManager will pass all kw parameters to connectionpool
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501
# maxsize is the number of requests to host that are allowed in parallel # noqa: E501
# Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501
# cert_reqs
if configuration.verify_ssl:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
# ca_certs
if configuration.ssl_ca_cert:
ca_certs = configuration.ssl_ca_cert
else:
# if not set certificate file, use Mozilla's root certificates.
ca_certs = certifi.where()
addition_pool_args = {}
if configuration.assert_hostname is not None:
addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501
if maxsize is None:
if configuration.connection_pool_maxsize is not None:
maxsize = configuration.connection_pool_maxsize
else:
maxsize = 4
# https pool manager
if configuration.proxy:
self.pool_manager = urllib3.ProxyManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
proxy_url=configuration.proxy,
**addition_pool_args
)
else:
self.pool_manager = urllib3.PoolManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
**addition_pool_args
)
def request(self, method, url, query_params=None, headers=None,
body=None, post_params=None, _preload_content=True,
_request_timeout=None):
"""Perform requests.
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
"""
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
'PATCH', 'OPTIONS']
if post_params and body:
raise ValueError(
"body parameter cannot be used with post_params parameter."
)
post_params = post_params or {}
headers = headers or {}
timeout = None
if _request_timeout:
if isinstance(_request_timeout, (int, ) if six.PY3 else (int, long)): # noqa: E501,F821
timeout = urllib3.Timeout(total=_request_timeout)
elif (isinstance(_request_timeout, tuple) and
len(_request_timeout) == 2):
timeout = urllib3.Timeout(
connect=_request_timeout[0], read=_request_timeout[1])
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
try:
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
if query_params:
url += '?' + urlencode(query_params)
if re.search('json', headers['Content-Type'], re.IGNORECASE):
request_body = None
if body is not None:
request_body = json.dumps(body)
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=False,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'multipart/form-data':
# must del headers['Content-Type'], or the correct
# Content-Type which generated by urllib3 will be
# overwritten.
del headers['Content-Type']
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=True,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
# Pass a `string` parameter directly in the body to support
# other content types than Json when `body` argument is
# provided in serialized form
elif isinstance(body, str):
request_body = body
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
else:
# Cannot generate the request from given parameters
msg = """Cannot prepare a request message for provided
arguments. Please check that your arguments match
declared content type."""
raise ApiException(status=0, reason=msg)
# For `GET`, `HEAD`
else:
r = self.pool_manager.request(method, url,
fields=query_params,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
# In the python 3, the response.data is bytes.
# we need to decode it to string.
if six.PY3:
r.data = r.data.decode('utf8')
# log response body
logger.debug("response body: %s", r.data)
if not 200 <= r.status <= 299:
raise ApiException(http_resp=r)
return r
def GET(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("GET", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def HEAD(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("HEAD", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def OPTIONS(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("OPTIONS", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def DELETE(self, url, headers=None, query_params=None, body=None,
_preload_content=True, _request_timeout=None):
return self.request("DELETE", url,
headers=headers,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def POST(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("POST", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PUT(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PUT", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PATCH(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PATCH", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
class ApiException(Exception):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""Custom error messages for exception"""
error_message = "({0})\n"\
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(
self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
|
|
from __future__ import unicode_literals
from collections import defaultdict
from django.core import checks
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db import models, router, transaction, DEFAULT_DB_ALIAS
from django.db.models import signals, FieldDoesNotExist, DO_NOTHING
from django.db.models.base import ModelBase
from django.db.models.fields.related import ForeignObject, ForeignObjectRel
from django.db.models.related import PathInfo
from django.db.models.expressions import Col
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import smart_text, python_2_unicode_compatible
@python_2_unicode_compatible
class GenericForeignKey(object):
"""
Provides a generic relation to any object through content-type/object-id
fields.
"""
def __init__(self, ct_field="content_type", fk_field="object_id", for_concrete_model=True):
self.ct_field = ct_field
self.fk_field = fk_field
self.for_concrete_model = for_concrete_model
self.editable = False
def contribute_to_class(self, cls, name, **kwargs):
self.name = name
self.model = cls
self.cache_attr = "_%s_cache" % name
cls._meta.add_virtual_field(self)
# Only run pre-initialization field assignment on non-abstract models
if not cls._meta.abstract:
signals.pre_init.connect(self.instance_pre_init, sender=cls)
setattr(cls, name, self)
def __str__(self):
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
errors.extend(self._check_object_id_field())
errors.extend(self._check_content_type_field())
return errors
def _check_field_name(self):
if self.name.endswith("_"):
return [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=self,
id='fields.E001',
)
]
else:
return []
def _check_object_id_field(self):
try:
self.model._meta.get_field(self.fk_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey object ID references the non-existent field '%s'." % self.fk_field,
hint=None,
obj=self,
id='contenttypes.E001',
)
]
else:
return []
def _check_content_type_field(self):
""" Check if field named `field_name` in model `model` exists and is
valid content_type field (is a ForeignKey to ContentType). """
try:
field = self.model._meta.get_field(self.ct_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey content type references the non-existent field '%s.%s'." % (
self.model._meta.object_name, self.ct_field
),
hint=None,
obj=self,
id='contenttypes.E002',
)
]
else:
if not isinstance(field, models.ForeignKey):
return [
checks.Error(
"'%s.%s' is not a ForeignKey." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E003',
)
]
elif field.rel.to != ContentType:
return [
checks.Error(
"'%s.%s' is not a ForeignKey to 'contenttypes.ContentType'." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs):
"""
Handles initializing an object with the generic FK instead of
content-type/object-id fields.
"""
if self.name in kwargs:
value = kwargs.pop(self.name)
if value is not None:
kwargs[self.ct_field] = self.get_content_type(obj=value)
kwargs[self.fk_field] = value._get_pk_val()
else:
kwargs[self.ct_field] = None
kwargs[self.fk_field] = None
def get_content_type(self, obj=None, id=None, using=None):
if obj is not None:
return ContentType.objects.db_manager(obj._state.db).get_for_model(
obj, for_concrete_model=self.for_concrete_model)
elif id is not None:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is not None:
raise ValueError("Custom queryset can't be used for this lookup.")
# For efficiency, group the instances by content type and then do one
# query per model
fk_dict = defaultdict(set)
# We need one instance for each group in order to get the right db:
instance_dict = {}
ct_attname = self.model._meta.get_field(self.ct_field).get_attname()
for instance in instances:
# We avoid looking for values if either ct_id or fkey value is None
ct_id = getattr(instance, ct_attname)
if ct_id is not None:
fk_val = getattr(instance, self.fk_field)
if fk_val is not None:
fk_dict[ct_id].add(fk_val)
instance_dict[ct_id] = instance
ret_val = []
for ct_id, fkeys in fk_dict.items():
instance = instance_dict[ct_id]
ct = self.get_content_type(id=ct_id, using=instance._state.db)
ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys))
# For doing the join in Python, we have to match both the FK val and the
# content type, so we use a callable that returns a (fk, class) pair.
def gfk_key(obj):
ct_id = getattr(obj, ct_attname)
if ct_id is None:
return None
else:
model = self.get_content_type(id=ct_id,
using=obj._state.db).model_class()
return (model._meta.pk.get_prep_value(getattr(obj, self.fk_field)),
model)
return (ret_val,
lambda obj: (obj._get_pk_val(), obj.__class__),
gfk_key,
True,
self.cache_attr)
def is_cached(self, instance):
return hasattr(instance, self.cache_attr)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_attr)
except AttributeError:
rel_obj = None
# Make sure to use ContentType.objects.get_for_id() to ensure that
# lookups are cached (see ticket #5570). This takes more code than
# the naive ``getattr(instance, self.ct_field)``, but has better
# performance when dealing with GFKs in loops and such.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
if ct_id is not None:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field))
except ObjectDoesNotExist:
pass
setattr(instance, self.cache_attr, rel_obj)
return rel_obj
def __set__(self, instance, value):
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value._get_pk_val()
if fk is None:
raise ValueError(
'Cannot assign "%r": "%s" instance isn\'t saved in the database.' %
(value, value._meta.object_name)
)
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
setattr(instance, self.cache_attr, value)
class GenericRelation(ForeignObject):
"""Provides an accessor to generic related objects (e.g. comments)"""
def __init__(self, to, **kwargs):
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = GenericRel(
self, to,
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
)
# Override content-type/object-id field names on the related class
self.object_id_field_name = kwargs.pop("object_id_field", "object_id")
self.content_type_field_name = kwargs.pop("content_type_field", "content_type")
self.for_concrete_model = kwargs.pop("for_concrete_model", True)
kwargs['blank'] = True
kwargs['editable'] = False
kwargs['serialize'] = False
# This construct is somewhat of an abuse of ForeignObject. This field
# represents a relation from pk to object_id field. But, this relation
# isn't direct, the join is generated reverse along foreign key. So,
# the from_field is object_id field, to_field is pk because of the
# reverse join.
super(GenericRelation, self).__init__(
to, to_fields=[],
from_fields=[self.object_id_field_name], **kwargs)
def check(self, **kwargs):
errors = super(GenericRelation, self).check(**kwargs)
errors.extend(self._check_generic_foreign_key_existence())
return errors
def _check_generic_foreign_key_existence(self):
target = self.rel.to
if isinstance(target, ModelBase):
# Using `vars` is very ugly approach, but there is no better one,
# because GenericForeignKeys are not considered as fields and,
# therefore, are not included in `target._meta.local_fields`.
fields = target._meta.virtual_fields
if any(isinstance(field, GenericForeignKey) and
field.ct_field == self.content_type_field_name and
field.fk_field == self.object_id_field_name
for field in fields):
return []
else:
return [
checks.Error(
("The GenericRelation defines a relation with the model "
"'%s.%s', but that model does not have a GenericForeignKey.") % (
target._meta.app_label, target._meta.object_name
),
hint=None,
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def resolve_related_fields(self):
self.to_fields = [self.model._meta.pk.name]
return [(self.rel.to._meta.get_field_by_name(self.object_id_field_name)[0],
self.model._meta.pk)]
def get_path_info(self):
opts = self.rel.to._meta
target = opts.get_field_by_name(self.object_id_field_name)[0]
return [PathInfo(self.model._meta, opts, (target,), self.rel, True, False)]
def get_reverse_path_info(self):
opts = self.model._meta
from_opts = self.rel.to._meta
return [PathInfo(from_opts, opts, (opts.pk,), self, not self.unique, False)]
def get_choices_default(self):
return super(GenericRelation, self).get_choices(include_blank=False)
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return smart_text([instance._get_pk_val() for instance in qs])
def contribute_to_class(self, cls, name, **kwargs):
kwargs['virtual_only'] = True
super(GenericRelation, self).contribute_to_class(cls, name, **kwargs)
# Save a reference to which model this class is on for future use
self.model = cls
# Add the descriptor for the relation
setattr(cls, self.name, ReverseGenericRelatedObjectsDescriptor(self, self.for_concrete_model))
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def get_content_type(self):
"""
Returns the content type associated with this field's model.
"""
return ContentType.objects.get_for_model(self.model,
for_concrete_model=self.for_concrete_model)
def get_extra_restriction(self, where_class, alias, remote_alias):
field = self.rel.to._meta.get_field_by_name(self.content_type_field_name)[0]
contenttype_pk = self.get_content_type().pk
cond = where_class()
lookup = field.get_lookup('exact')(Col(remote_alias, field, field), contenttype_pk)
cond.add(lookup, 'AND')
return cond
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
"""
Return all objects related to ``objs`` via this ``GenericRelation``.
"""
return self.rel.to._base_manager.db_manager(using).filter(**{
"%s__pk" % self.content_type_field_name: ContentType.objects.db_manager(using).get_for_model(
self.model, for_concrete_model=self.for_concrete_model).pk,
"%s__in" % self.object_id_field_name: [obj.pk for obj in objs]
})
class ReverseGenericRelatedObjectsDescriptor(object):
"""
This class provides the functionality that makes the related-object
managers available as attributes on a model class, for fields that have
multiple "remote" values and have a GenericRelation defined in their model
(rather than having another model pointed *at* them). In the example
"article.publications", the publications attribute is a
ReverseGenericRelatedObjectsDescriptor instance.
"""
def __init__(self, field, for_concrete_model=True):
self.field = field
self.for_concrete_model = for_concrete_model
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# Dynamically create a class that subclasses the related model's
# default manager.
rel_model = self.field.rel.to
superclass = rel_model._default_manager.__class__
RelatedManager = create_generic_related_manager(superclass)
qn = connection.ops.quote_name
content_type = ContentType.objects.db_manager(instance._state.db).get_for_model(
instance, for_concrete_model=self.for_concrete_model)
join_cols = self.field.get_joining_columns(reverse_join=True)[0]
manager = RelatedManager(
model=rel_model,
instance=instance,
source_col_name=qn(join_cols[0]),
target_col_name=qn(join_cols[1]),
content_type=content_type,
content_type_field_name=self.field.content_type_field_name,
object_id_field_name=self.field.object_id_field_name,
prefetch_cache_name=self.field.attname,
)
return manager
def __set__(self, instance, value):
# Force evaluation of `value` in case it's a queryset whose
# value could be affected by `manager.clear()`. Refs #19816.
value = tuple(value)
manager = self.__get__(instance)
db = router.db_for_write(manager.model, instance=manager.instance)
with transaction.atomic(using=db, savepoint=False):
manager.clear()
for obj in value:
manager.add(obj)
def create_generic_related_manager(superclass):
"""
Factory function for a manager that subclasses 'superclass' (which is a
Manager) and adds behavior for generic related objects.
"""
class GenericRelatedObjectManager(superclass):
def __init__(self, model=None, instance=None, symmetrical=None,
source_col_name=None, target_col_name=None, content_type=None,
content_type_field_name=None, object_id_field_name=None,
prefetch_cache_name=None):
super(GenericRelatedObjectManager, self).__init__()
self.model = model
self.content_type = content_type
self.symmetrical = symmetrical
self.instance = instance
self.source_col_name = source_col_name
self.target_col_name = target_col_name
self.content_type_field_name = content_type_field_name
self.object_id_field_name = object_id_field_name
self.prefetch_cache_name = prefetch_cache_name
self.pk_val = self.instance._get_pk_val()
self.core_filters = {
'%s__pk' % content_type_field_name: content_type.id,
'%s' % object_id_field_name: instance._get_pk_val(),
}
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_generic_related_manager(manager.__class__)
return manager_class(
model=self.model,
instance=self.instance,
symmetrical=self.symmetrical,
source_col_name=self.source_col_name,
target_col_name=self.target_col_name,
content_type=self.content_type,
content_type_field_name=self.content_type_field_name,
object_id_field_name=self.object_id_field_name,
prefetch_cache_name=self.prefetch_cache_name,
)
do_not_call_in_templates = True
def __str__(self):
return repr(self)
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).get_queryset().using(db).filter(**self.core_filters)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(GenericRelatedObjectManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {
'%s__pk' % self.content_type_field_name: self.content_type.id,
'%s__in' % self.object_id_field_name: set(obj._get_pk_val() for obj in instances)
}
# We (possibly) need to convert object IDs to the type of the
# instances' PK in order to match up instances:
object_id_converter = instances[0]._meta.pk.to_python
return (queryset.filter(**query),
lambda relobj: object_id_converter(getattr(relobj, self.object_id_field_name)),
lambda obj: obj._get_pk_val(),
False,
self.prefetch_cache_name)
def add(self, *objs):
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
obj.save()
add.alters_data = True
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
self._clear(self.filter(pk__in=[o.pk for o in objs]), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.delete()` creates its own atomic block which
# contains the `pre_delete` and `post_delete` signal handlers.
queryset.delete()
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
obj.delete()
_clear.alters_data = True
def create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).update_or_create(**kwargs)
update_or_create.alters_data = True
return GenericRelatedObjectManager
class GenericRel(ForeignObjectRel):
def __init__(self, field, to, related_name=None, limit_choices_to=None, related_query_name=None):
super(GenericRel, self).__init__(field=field, to=to, related_name=related_query_name or '+',
limit_choices_to=limit_choices_to, on_delete=DO_NOTHING,
related_query_name=related_query_name)
|
|
from alembic.testing.env import clear_staging_env, staging_env
from alembic.testing import assert_raises_message, eq_
from alembic import util
from alembic.testing.fixtures import TestBase
from alembic.testing import mock
from alembic.migration import MigrationStep, HeadMaintainer
class MigrationTest(TestBase):
def up_(self, rev):
return MigrationStep.upgrade_from_script(
self.env.revision_map, rev)
def down_(self, rev):
return MigrationStep.downgrade_from_script(
self.env.revision_map, rev)
def _assert_downgrade(self, destination, source, expected, expected_heads):
revs = self.env._downgrade_revs(destination, source)
eq_(
revs, expected
)
heads = set(util.to_tuple(source, default=()))
head = HeadMaintainer(mock.Mock(), heads)
for rev in revs:
head.update_to_step(rev)
eq_(head.heads, expected_heads)
def _assert_upgrade(self, destination, source, expected, expected_heads):
revs = self.env._upgrade_revs(destination, source)
eq_(
revs, expected
)
heads = set(util.to_tuple(source, default=()))
head = HeadMaintainer(mock.Mock(), heads)
for rev in revs:
head.update_to_step(rev)
eq_(head.heads, expected_heads)
class RevisionPathTest(MigrationTest):
@classmethod
def setup_class(cls):
cls.env = env = staging_env()
cls.a = env.generate_revision(util.rev_id(), '->a')
cls.b = env.generate_revision(util.rev_id(), 'a->b')
cls.c = env.generate_revision(util.rev_id(), 'b->c')
cls.d = env.generate_revision(util.rev_id(), 'c->d')
cls.e = env.generate_revision(util.rev_id(), 'd->e')
@classmethod
def teardown_class(cls):
clear_staging_env()
def test_upgrade_path(self):
self._assert_upgrade(
self.e.revision, self.c.revision,
[
self.up_(self.d),
self.up_(self.e)
],
set([self.e.revision])
)
self._assert_upgrade(
self.c.revision, None,
[
self.up_(self.a),
self.up_(self.b),
self.up_(self.c),
],
set([self.c.revision])
)
def test_relative_upgrade_path(self):
self._assert_upgrade(
"+2", self.a.revision,
[
self.up_(self.b),
self.up_(self.c),
],
set([self.c.revision])
)
self._assert_upgrade(
"+1", self.a.revision,
[
self.up_(self.b)
],
set([self.b.revision])
)
self._assert_upgrade(
"+3", self.b.revision,
[self.up_(self.c), self.up_(self.d), self.up_(self.e)],
set([self.e.revision])
)
self._assert_upgrade(
"%s+2" % self.b.revision, self.a.revision,
[self.up_(self.b), self.up_(self.c), self.up_(self.d)],
set([self.d.revision])
)
self._assert_upgrade(
"%s-2" % self.d.revision, self.a.revision,
[self.up_(self.b)],
set([self.b.revision])
)
def test_invalid_relative_upgrade_path(self):
assert_raises_message(
util.CommandError,
"Relative revision -2 didn't produce 2 migrations",
self.env._upgrade_revs, "-2", self.b.revision
)
assert_raises_message(
util.CommandError,
r"Relative revision \+5 didn't produce 5 migrations",
self.env._upgrade_revs, "+5", self.b.revision
)
def test_downgrade_path(self):
self._assert_downgrade(
self.c.revision, self.e.revision,
[self.down_(self.e), self.down_(self.d)],
set([self.c.revision])
)
self._assert_downgrade(
None, self.c.revision,
[self.down_(self.c), self.down_(self.b), self.down_(self.a)],
set()
)
def test_relative_downgrade_path(self):
self._assert_downgrade(
"-1", self.c.revision,
[self.down_(self.c)],
set([self.b.revision])
)
self._assert_downgrade(
"-3", self.e.revision,
[self.down_(self.e), self.down_(self.d), self.down_(self.c)],
set([self.b.revision])
)
self._assert_downgrade(
"%s+2" % self.a.revision, self.d.revision,
[self.down_(self.d)],
set([self.c.revision])
)
self._assert_downgrade(
"%s-2" % self.c.revision, self.d.revision,
[self.down_(self.d), self.down_(self.c), self.down_(self.b)],
set([self.a.revision])
)
def test_invalid_relative_downgrade_path(self):
assert_raises_message(
util.CommandError,
"Relative revision -5 didn't produce 5 migrations",
self.env._downgrade_revs, "-5", self.b.revision
)
assert_raises_message(
util.CommandError,
r"Relative revision \+2 didn't produce 2 migrations",
self.env._downgrade_revs, "+2", self.b.revision
)
def test_invalid_move_rev_to_none(self):
assert_raises_message(
util.CommandError,
r"Destination %s is not a valid downgrade "
"target from current head\(s\)" % self.b.revision[0:3],
self.env._downgrade_revs, self.b.revision[0:3], None
)
def test_invalid_move_higher_to_lower(self):
assert_raises_message(
util.CommandError,
r"Destination %s is not a valid downgrade "
"target from current head\(s\)" % self.c.revision[0:4],
self.env._downgrade_revs, self.c.revision[0:4], self.b.revision
)
def test_stamp_to_base(self):
revs = self.env._stamp_revs("base", self.d.revision)
eq_(len(revs), 1)
assert revs[0].should_delete_branch
eq_(revs[0].delete_version_num, self.d.revision)
class BranchedPathTest(MigrationTest):
@classmethod
def setup_class(cls):
cls.env = env = staging_env()
cls.a = env.generate_revision(util.rev_id(), '->a')
cls.b = env.generate_revision(util.rev_id(), 'a->b')
cls.c1 = env.generate_revision(
util.rev_id(), 'b->c1',
branch_labels='c1branch',
refresh=True)
cls.d1 = env.generate_revision(util.rev_id(), 'c1->d1')
cls.c2 = env.generate_revision(
util.rev_id(), 'b->c2',
branch_labels='c2branch',
head=cls.b.revision, splice=True)
cls.d2 = env.generate_revision(
util.rev_id(), 'c2->d2',
head=cls.c2.revision)
@classmethod
def teardown_class(cls):
clear_staging_env()
def test_stamp_down_across_multiple_branch_to_branchpoint(self):
heads = [self.d1.revision, self.c2.revision]
revs = self.env._stamp_revs(
self.b.revision, heads)
eq_(len(revs), 1)
eq_(
revs[0].merge_branch_idents(heads),
# DELETE d1 revision, UPDATE c2 to b
([self.d1.revision], self.c2.revision, self.b.revision)
)
def test_stamp_to_labeled_base_multiple_heads(self):
revs = self.env._stamp_revs(
"c1branch@base", [self.d1.revision, self.c2.revision])
eq_(len(revs), 1)
assert revs[0].should_delete_branch
eq_(revs[0].delete_version_num, self.d1.revision)
def test_stamp_to_labeled_head_multiple_heads(self):
heads = [self.d1.revision, self.c2.revision]
revs = self.env._stamp_revs(
"c2branch@head", heads)
eq_(len(revs), 1)
eq_(
revs[0].merge_branch_idents(heads),
# the c1branch remains unchanged
([], self.c2.revision, self.d2.revision)
)
def test_upgrade_single_branch(self):
self._assert_upgrade(
self.d1.revision, self.b.revision,
[self.up_(self.c1), self.up_(self.d1)],
set([self.d1.revision])
)
def test_upgrade_multiple_branch(self):
# move from a single head to multiple heads
self._assert_upgrade(
(self.d1.revision, self.d2.revision), self.a.revision,
[self.up_(self.b), self.up_(self.c2), self.up_(self.d2),
self.up_(self.c1), self.up_(self.d1)],
set([self.d1.revision, self.d2.revision])
)
def test_downgrade_multiple_branch(self):
self._assert_downgrade(
self.a.revision, (self.d1.revision, self.d2.revision),
[self.down_(self.d1), self.down_(self.c1), self.down_(self.d2),
self.down_(self.c2), self.down_(self.b)],
set([self.a.revision])
)
def test_relative_upgrade(self):
self._assert_upgrade(
"c2branch@head-1", self.b.revision,
[self.up_(self.c2)],
set([self.c2.revision])
)
def test_relative_downgrade(self):
self._assert_downgrade(
"c2branch@base+2", [self.d2.revision, self.d1.revision],
[self.down_(self.d2), self.down_(self.c2), self.down_(self.d1)],
set([self.c1.revision])
)
class BranchFromMergepointTest(MigrationTest):
"""this is a form that will come up frequently in the
"many independent roots with cross-dependencies" case.
"""
@classmethod
def setup_class(cls):
cls.env = env = staging_env()
cls.a1 = env.generate_revision(util.rev_id(), '->a1')
cls.b1 = env.generate_revision(util.rev_id(), 'a1->b1')
cls.c1 = env.generate_revision(util.rev_id(), 'b1->c1')
cls.a2 = env.generate_revision(
util.rev_id(), '->a2', head=(),
refresh=True)
cls.b2 = env.generate_revision(
util.rev_id(), 'a2->b2', head=cls.a2.revision)
cls.c2 = env.generate_revision(
util.rev_id(), 'b2->c2', head=cls.b2.revision)
# mergepoint between c1, c2
# d1 dependent on c2
cls.d1 = env.generate_revision(
util.rev_id(), 'd1', head=(cls.c1.revision, cls.c2.revision),
refresh=True)
# but then c2 keeps going into d2
cls.d2 = env.generate_revision(
util.rev_id(), 'd2', head=cls.c2.revision,
refresh=True, splice=True)
def test_mergepoint_to_only_one_side_upgrade(self):
self._assert_upgrade(
self.d1.revision, (self.d2.revision, self.b1.revision),
[self.up_(self.c1), self.up_(self.d1)],
set([self.d2.revision, self.d1.revision])
)
def test_mergepoint_to_only_one_side_downgrade(self):
self._assert_downgrade(
self.b1.revision, (self.d2.revision, self.d1.revision),
[self.down_(self.d1), self.down_(self.c1)],
set([self.d2.revision, self.b1.revision])
)
class BranchFrom3WayMergepointTest(MigrationTest):
"""this is a form that will come up frequently in the
"many independent roots with cross-dependencies" case.
"""
@classmethod
def setup_class(cls):
cls.env = env = staging_env()
cls.a1 = env.generate_revision(util.rev_id(), '->a1')
cls.b1 = env.generate_revision(util.rev_id(), 'a1->b1')
cls.c1 = env.generate_revision(util.rev_id(), 'b1->c1')
cls.a2 = env.generate_revision(
util.rev_id(), '->a2', head=(),
refresh=True)
cls.b2 = env.generate_revision(
util.rev_id(), 'a2->b2', head=cls.a2.revision)
cls.c2 = env.generate_revision(
util.rev_id(), 'b2->c2', head=cls.b2.revision)
cls.a3 = env.generate_revision(
util.rev_id(), '->a3', head=(),
refresh=True)
cls.b3 = env.generate_revision(
util.rev_id(), 'a3->b3', head=cls.a3.revision)
cls.c3 = env.generate_revision(
util.rev_id(), 'b3->c3', head=cls.b3.revision)
# mergepoint between c1, c2, c3
# d1 dependent on c2, c3
cls.d1 = env.generate_revision(
util.rev_id(), 'd1', head=(
cls.c1.revision, cls.c2.revision, cls.c3.revision),
refresh=True)
# but then c2 keeps going into d2
cls.d2 = env.generate_revision(
util.rev_id(), 'd2', head=cls.c2.revision,
refresh=True, splice=True)
# c3 keeps going into d3
cls.d3 = env.generate_revision(
util.rev_id(), 'd3', head=cls.c3.revision,
refresh=True, splice=True)
def test_mergepoint_to_only_one_side_upgrade(self):
self._assert_upgrade(
self.d1.revision,
(self.d3.revision, self.d2.revision, self.b1.revision),
[self.up_(self.c1), self.up_(self.d1)],
set([self.d3.revision, self.d2.revision, self.d1.revision])
)
def test_mergepoint_to_only_one_side_downgrade(self):
self._assert_downgrade(
self.b1.revision,
(self.d3.revision, self.d2.revision, self.d1.revision),
[self.down_(self.d1), self.down_(self.c1)],
set([self.d3.revision, self.d2.revision, self.b1.revision])
)
def test_mergepoint_to_two_sides_upgrade(self):
self._assert_upgrade(
self.d1.revision,
(self.d3.revision, self.b2.revision, self.b1.revision),
[self.up_(self.c2), self.up_(self.c1), self.up_(self.d1)],
# this will merge b2 and b1 into d1
set([self.d3.revision, self.d1.revision])
)
# but then! b2 will break out again if we keep going with it
self._assert_upgrade(
self.d2.revision, (self.d3.revision, self.d1.revision),
[self.up_(self.d2)],
set([self.d3.revision, self.d2.revision, self.d1.revision])
)
class TwinMergeTest(MigrationTest):
"""Test #297, where we have two mergepoints from the same set of
originating branches.
"""
@classmethod
def setup_class(cls):
"""
33e21c000cfe -> 178d4e761bbd (head),
2bef33cb3a58, 3904558db1c6, 968330f320d -> 33e21c000cfe (mergepoint)
46c99f866004 -> 18f46b42410d (head),
2bef33cb3a58, 3904558db1c6, 968330f320d -> 46c99f866004 (mergepoint)
f0fa4315825 -> 3904558db1c6 (branchpoint),
--------------------------
A -> B2 (branchpoint),
B1, B2, B3 -> C1 (mergepoint)
B1, B2, B3 -> C2 (mergepoint)
C1 -> D1 (head),
C2 -> D2 (head),
"""
cls.env = env = staging_env()
cls.a = env.generate_revision(
'a', 'a'
)
cls.b1 = env.generate_revision('b1', 'b1',
head=cls.a.revision)
cls.b2 = env.generate_revision('b2', 'b2',
splice=True,
head=cls.a.revision)
cls.b3 = env.generate_revision('b3', 'b3',
splice=True,
head=cls.a.revision)
cls.c1 = env.generate_revision(
'c1', 'c1',
head=(cls.b1.revision, cls.b2.revision, cls.b3.revision))
cls.c2 = env.generate_revision(
'c2', 'c2',
splice=True,
head=(cls.b1.revision, cls.b2.revision, cls.b3.revision))
cls.d1 = env.generate_revision(
'd1', 'd1', head=cls.c1.revision)
cls.d2 = env.generate_revision(
'd2', 'd2', head=cls.c2.revision)
def test_upgrade(self):
head = HeadMaintainer(mock.Mock(), [self.a.revision])
steps = [
(self.up_(self.b3), ('b3',)),
(self.up_(self.b1), ('b1', 'b3',)),
(self.up_(self.b2), ('b1', 'b2', 'b3',)),
(self.up_(self.c2), ('c2',)),
(self.up_(self.d2), ('d2',)),
(self.up_(self.c1), ('c1', 'd2')),
(self.up_(self.d1), ('d1', 'd2')),
]
for step, assert_ in steps:
head.update_to_step(step)
eq_(head.heads, set(assert_))
class NotQuiteTwinMergeTest(MigrationTest):
"""Test a variant of #297.
"""
@classmethod
def setup_class(cls):
"""
A -> B2 (branchpoint),
B1, B2 -> C1 (mergepoint)
B2, B3 -> C2 (mergepoint)
C1 -> D1 (head),
C2 -> D2 (head),
"""
cls.env = env = staging_env()
cls.a = env.generate_revision(
'a', 'a'
)
cls.b1 = env.generate_revision('b1', 'b1',
head=cls.a.revision)
cls.b2 = env.generate_revision('b2', 'b2',
splice=True,
head=cls.a.revision)
cls.b3 = env.generate_revision('b3', 'b3',
splice=True,
head=cls.a.revision)
cls.c1 = env.generate_revision(
'c1', 'c1',
head=(cls.b1.revision, cls.b2.revision))
cls.c2 = env.generate_revision(
'c2', 'c2',
splice=True,
head=(cls.b2.revision, cls.b3.revision))
cls.d1 = env.generate_revision(
'd1', 'd1', head=cls.c1.revision)
cls.d2 = env.generate_revision(
'd2', 'd2', head=cls.c2.revision)
def test_upgrade(self):
head = HeadMaintainer(mock.Mock(), [self.a.revision])
"""
upgrade a -> b2, b2
upgrade a -> b3, b3
upgrade b2, b3 -> c2, c2
upgrade c2 -> d2, d2
upgrade a -> b1, b1
upgrade b1, b2 -> c1, c1
upgrade c1 -> d1, d1
"""
steps = [
(self.up_(self.b2), ('b2',)),
(self.up_(self.b3), ('b2', 'b3',)),
(self.up_(self.c2), ('c2',)),
(self.up_(self.d2), ('d2',)),
(self.up_(self.b1), ('b1', 'd2',)),
(self.up_(self.c1), ('c1', 'd2')),
(self.up_(self.d1), ('d1', 'd2')),
]
for step, assert_ in steps:
head.update_to_step(step)
eq_(head.heads, set(assert_))
class DependsOnBranchTestOne(MigrationTest):
@classmethod
def setup_class(cls):
cls.env = env = staging_env()
cls.a1 = env.generate_revision(
util.rev_id(), '->a1',
branch_labels=['lib1'])
cls.b1 = env.generate_revision(util.rev_id(), 'a1->b1')
cls.c1 = env.generate_revision(util.rev_id(), 'b1->c1')
cls.a2 = env.generate_revision(util.rev_id(), '->a2', head=())
cls.b2 = env.generate_revision(
util.rev_id(), 'a2->b2', head=cls.a2.revision)
cls.c2 = env.generate_revision(
util.rev_id(), 'b2->c2', head=cls.b2.revision,
depends_on=cls.c1.revision)
cls.d1 = env.generate_revision(
util.rev_id(), 'c1->d1',
head=cls.c1.revision)
cls.e1 = env.generate_revision(
util.rev_id(), 'd1->e1',
head=cls.d1.revision)
cls.f1 = env.generate_revision(
util.rev_id(), 'e1->f1',
head=cls.e1.revision)
def test_downgrade_to_dependency(self):
heads = [self.c2.revision, self.d1.revision]
head = HeadMaintainer(mock.Mock(), heads)
head.update_to_step(self.down_(self.d1))
eq_(head.heads, set([self.c2.revision]))
def test_stamp_across_dependency(self):
heads = [self.e1.revision, self.c2.revision]
head = HeadMaintainer(mock.Mock(), heads)
for step in self.env._stamp_revs(self.b1.revision, heads):
head.update_to_step(step)
eq_(head.heads, set([self.b1.revision]))
class DependsOnBranchTestTwo(MigrationTest):
@classmethod
def setup_class(cls):
cls.env = env = staging_env()
cls.a1 = env.generate_revision(util.rev_id(), '->a1', head='base')
cls.a2 = env.generate_revision(util.rev_id(), '->a2', head='base')
cls.a3 = env.generate_revision(util.rev_id(), '->a3', head='base')
cls.amerge = env.generate_revision(util.rev_id(), 'amerge', head=[
cls.a1.revision, cls.a2.revision, cls.a3.revision
])
cls.b1 = env.generate_revision(util.rev_id(), '->b1', head='base')
cls.b2 = env.generate_revision(util.rev_id(), '->b2', head='base')
cls.bmerge = env.generate_revision(util.rev_id(), 'bmerge', head=[
cls.b1.revision, cls.b2.revision
])
cls.c1 = env.generate_revision(util.rev_id(), '->c1', head='base')
cls.c2 = env.generate_revision(util.rev_id(), '->c2', head='base')
cls.c3 = env.generate_revision(util.rev_id(), '->c3', head='base')
cls.cmerge = env.generate_revision(util.rev_id(), 'cmerge', head=[
cls.c1.revision, cls.c2.revision, cls.c3.revision
])
cls.d1 = env.generate_revision(
util.rev_id(), 'overmerge',
head="base",
depends_on=[
cls.a3.revision, cls.b2.revision, cls.c1.revision
])
def test_kaboom(self):
# here's the upgrade path:
# ['->c1', '->b2', '->a3', 'overmerge', '->c3', '->c2', 'cmerge',
# '->b1', 'bmerge', '->a2', '->a1', 'amerge'],
heads = [
self.amerge.revision,
self.bmerge.revision, self.cmerge.revision,
self.d1.revision
]
self._assert_downgrade(
self.b2.revision, heads,
[self.down_(self.bmerge), self.down_(self.d1)],
set([
self.amerge.revision, self.b2.revision,
self.b1.revision, self.cmerge.revision])
)
heads = [
self.amerge.revision, self.b2.revision,
self.b1.revision, self.cmerge.revision]
self._assert_downgrade(
"base", heads,
[
self.down_(self.amerge), self.down_(self.a1),
self.down_(self.a2), self.down_(self.a3),
self.down_(self.b2), self.down_(self.b1),
self.down_(self.cmerge), self.down_(self.c1),
self.down_(self.c2), self.down_(self.c3)
],
set([])
)
class DependsOnBranchLabelTest(MigrationTest):
@classmethod
def setup_class(cls):
cls.env = env = staging_env()
cls.a1 = env.generate_revision(
util.rev_id(), '->a1',
branch_labels=['lib1'])
cls.b1 = env.generate_revision(util.rev_id(), 'a1->b1')
cls.c1 = env.generate_revision(
util.rev_id(), 'b1->c1',
branch_labels=['c1lib'])
cls.a2 = env.generate_revision(util.rev_id(), '->a2', head=())
cls.b2 = env.generate_revision(
util.rev_id(), 'a2->b2', head=cls.a2.revision)
cls.c2 = env.generate_revision(
util.rev_id(), 'b2->c2', head=cls.b2.revision,
depends_on=['c1lib'])
cls.d1 = env.generate_revision(
util.rev_id(), 'c1->d1',
head=cls.c1.revision)
cls.e1 = env.generate_revision(
util.rev_id(), 'd1->e1',
head=cls.d1.revision)
cls.f1 = env.generate_revision(
util.rev_id(), 'e1->f1',
head=cls.e1.revision)
def test_upgrade_path(self):
self._assert_upgrade(
self.c2.revision, self.a2.revision,
[
self.up_(self.a1),
self.up_(self.b1),
self.up_(self.c1),
self.up_(self.b2),
self.up_(self.c2),
],
set([self.c2.revision, ])
)
class ForestTest(MigrationTest):
@classmethod
def setup_class(cls):
cls.env = env = staging_env()
cls.a1 = env.generate_revision(util.rev_id(), '->a1')
cls.b1 = env.generate_revision(util.rev_id(), 'a1->b1')
cls.a2 = env.generate_revision(
util.rev_id(), '->a2', head=(),
refresh=True)
cls.b2 = env.generate_revision(
util.rev_id(), 'a2->b2', head=cls.a2.revision)
@classmethod
def teardown_class(cls):
clear_staging_env()
def test_base_to_heads(self):
eq_(
self.env._upgrade_revs("heads", "base"),
[self.up_(self.a2), self.up_(self.b2),
self.up_(self.a1), self.up_(self.b1)]
)
def test_stamp_to_heads(self):
revs = self.env._stamp_revs("heads", ())
eq_(len(revs), 2)
eq_(
set(r.to_revisions for r in revs),
set([(self.b1.revision,), (self.b2.revision,)])
)
def test_stamp_to_heads_no_moves_needed(self):
revs = self.env._stamp_revs(
"heads", (self.b1.revision, self.b2.revision))
eq_(len(revs), 0)
class MergedPathTest(MigrationTest):
@classmethod
def setup_class(cls):
cls.env = env = staging_env()
cls.a = env.generate_revision(util.rev_id(), '->a')
cls.b = env.generate_revision(util.rev_id(), 'a->b')
cls.c1 = env.generate_revision(util.rev_id(), 'b->c1')
cls.d1 = env.generate_revision(util.rev_id(), 'c1->d1')
cls.c2 = env.generate_revision(
util.rev_id(), 'b->c2',
branch_labels='c2branch',
head=cls.b.revision, splice=True)
cls.d2 = env.generate_revision(
util.rev_id(), 'c2->d2',
head=cls.c2.revision)
cls.e = env.generate_revision(
util.rev_id(), 'merge d1 and d2',
head=(cls.d1.revision, cls.d2.revision)
)
cls.f = env.generate_revision(util.rev_id(), 'e->f')
@classmethod
def teardown_class(cls):
clear_staging_env()
def test_stamp_down_across_merge_point_branch(self):
heads = [self.e.revision]
revs = self.env._stamp_revs(self.c2.revision, heads)
eq_(len(revs), 1)
eq_(
revs[0].merge_branch_idents(heads),
# no deletes, UPDATE e to c2
([], self.e.revision, self.c2.revision)
)
def test_stamp_down_across_merge_prior_branching(self):
heads = [self.e.revision]
revs = self.env._stamp_revs(self.a.revision, heads)
eq_(len(revs), 1)
eq_(
revs[0].merge_branch_idents(heads),
# no deletes, UPDATE e to c2
([], self.e.revision, self.a.revision)
)
def test_stamp_up_across_merge_from_single_branch(self):
revs = self.env._stamp_revs(self.e.revision, [self.c2.revision])
eq_(len(revs), 1)
eq_(
revs[0].merge_branch_idents([self.c2.revision]),
# no deletes, UPDATE e to c2
([], self.c2.revision, self.e.revision)
)
def test_stamp_labled_head_across_merge_from_multiple_branch(self):
# this is testing that filter_for_lineage() checks for
# d1 both in terms of "c2branch" as well as that the "head"
# revision "f" is the head of both d1 and d2
revs = self.env._stamp_revs(
"c2branch@head", [self.d1.revision, self.c2.revision])
eq_(len(revs), 1)
eq_(
revs[0].merge_branch_idents([self.d1.revision, self.c2.revision]),
# DELETE d1 revision, UPDATE c2 to e
([self.d1.revision], self.c2.revision, self.f.revision)
)
def test_stamp_up_across_merge_from_multiple_branch(self):
heads = [self.d1.revision, self.c2.revision]
revs = self.env._stamp_revs(
self.e.revision, heads)
eq_(len(revs), 1)
eq_(
revs[0].merge_branch_idents(heads),
# DELETE d1 revision, UPDATE c2 to e
([self.d1.revision], self.c2.revision, self.e.revision)
)
def test_stamp_up_across_merge_prior_branching(self):
heads = [self.b.revision]
revs = self.env._stamp_revs(self.e.revision, heads)
eq_(len(revs), 1)
eq_(
revs[0].merge_branch_idents(heads),
# no deletes, UPDATE e to c2
([], self.b.revision, self.e.revision)
)
def test_upgrade_across_merge_point(self):
eq_(
self.env._upgrade_revs(self.f.revision, self.b.revision),
[
self.up_(self.c2),
self.up_(self.d2),
self.up_(self.c1), # b->c1, create new branch
self.up_(self.d1),
self.up_(self.e), # d1/d2 -> e, merge branches
# (DELETE d2, UPDATE d1->e)
self.up_(self.f)
]
)
def test_downgrade_across_merge_point(self):
eq_(
self.env._downgrade_revs(self.b.revision, self.f.revision),
[
self.down_(self.f),
self.down_(self.e), # e -> d1 and d2, unmerge branches
# (UPDATE e->d1, INSERT d2)
self.down_(self.d1),
self.down_(self.c1),
self.down_(self.d2),
self.down_(self.c2), # c2->b, delete branch
]
)
|
|
# stdlib
from typing import Any
from typing import Dict
# third party
from fastapi import APIRouter
from fastapi import Body
from fastapi import Depends
from fastapi.responses import JSONResponse
from nacl.encoding import HexEncoder
from nacl.signing import SigningKey
# syft absolute
from syft.core.node.common.action.exception_action import ExceptionMessage
# syft
from syft.core.node.common.node_service.association_request.association_request_messages import (
DeleteAssociationRequestMessage,
)
from syft.core.node.common.node_service.association_request.association_request_messages import (
GetAssociationRequestMessage,
)
from syft.core.node.common.node_service.association_request.association_request_messages import (
GetAssociationRequestsMessage,
)
from syft.core.node.common.node_service.association_request.association_request_messages import (
ReceiveAssociationRequestMessage,
)
from syft.core.node.common.node_service.association_request.association_request_messages import (
RespondAssociationRequestMessage,
)
from syft.core.node.common.node_service.association_request.association_request_messages import (
SendAssociationRequestMessage,
)
# grid absolute
from grid.api.dependencies.current_user import get_current_user
from grid.core.node import node
router = APIRouter()
@router.post("/request", status_code=200, response_class=JSONResponse)
def send_association_request(
target: str,
source: str,
current_user: Any = Depends(get_current_user),
) -> Any:
"""Sends a new association request to the target address
Args:
current_user : Current session.
target: Target address.
source: Source address.
Returns:
resp: JSON structure containing a log message
"""
# Map User Key
user_key = SigningKey(current_user.private_key.encode(), encoder=HexEncoder)
# Build Syft Message
msg = SendAssociationRequestMessage(
address=node.address,
metadata={},
target=target,
source=source,
reply_to=node.address,
).sign(signing_key=user_key)
# Process syft message
reply = node.recv_immediate_msg_with_reply(msg=msg).message
# Handle Response types
resp = {}
if isinstance(reply, ExceptionMessage):
resp = {"error": reply.exception_msg}
else:
resp = {"message": reply.resp_msg}
return resp
@router.post("/receive", status_code=201, response_class=JSONResponse)
def receive_association_request(
name: str = Body(..., example="Nodes Association Request"),
source: str = Body(..., example="http://<node_address>/api/v1"),
target: str = Body(..., example="http://<target_address>/api/v1"),
) -> Dict[str, str]:
"""Receives a new association request to the sender address
Args:
current_user : Current session.
name: Association request name.
target: Target address.
source: Source address.
Returns:
resp: JSON structure containing a log message
"""
# Build Syft Message
msg = ReceiveAssociationRequestMessage(
address=node.address,
name=name,
source=source,
target=target,
reply_to=node.address,
).sign(signing_key=SigningKey.generate())
# Process syft message
reply = node.recv_immediate_msg_with_reply(msg=msg).message
# Handle Response types
if isinstance(reply, ExceptionMessage):
return {"error": reply.exception_msg}
else:
return {"message": reply.resp_msg}
@router.post("/reply", status_code=201, response_class=JSONResponse)
def respond_association_request(
source: str,
target: str,
current_user: Any = Depends(get_current_user),
) -> Dict[str, str]:
"""Replies an association request
Args:
current_user : Current session.
name: Association request name.
handshake: Code attached to this association request.
target: Target address.
sender: Sender address.
Returns:
resp: JSON structure containing a log message
"""
# Map User Key
user_key = SigningKey(current_user.private_key.encode(), encoder=HexEncoder)
# Build Syft Message
msg = RespondAssociationRequestMessage(
address=node.address,
target=target,
source=source,
reply_to=node.address,
).sign(signing_key=user_key)
# Process syft message
reply = node.recv_immediate_msg_with_reply(msg=msg).message
# Handle Response types
if isinstance(reply, ExceptionMessage):
return {"error": reply.exception_msg}
else:
return {"message": reply.resp_msg}
@router.get("", status_code=200, response_class=JSONResponse)
def get_all_association_requests(
current_user: Any = Depends(get_current_user),
) -> Dict[str, Any]:
"""Retrieves all association requests
Args:
current_user : Current session.
Returns:
resp: JSON structure containing registered association requests.
"""
# Map User Key
user_key = SigningKey(current_user.private_key.encode(), encoder=HexEncoder)
# Build Syft Message
msg = GetAssociationRequestsMessage(
address=node.address, reply_to=node.address
).sign(signing_key=user_key)
# Process syft message
reply = node.recv_immediate_msg_with_reply(msg=msg).message
# Handle Response types
if isinstance(reply, ExceptionMessage):
return {"error": reply.exception_msg}
else:
return reply.content
@router.get("/{association_request_id}", status_code=200, response_class=JSONResponse)
def get_specific_association_route(
association_request_id: int,
current_user: Any = Depends(get_current_user),
) -> Dict[str, Any]:
"""Retrieves specific association
Args:
current_user : Current session.
association_request_id: Association request ID.
Returns:
resp: JSON structure containing specific association request.
"""
# Map User Key
user_key = SigningKey(current_user.private_key.encode(), encoder=HexEncoder)
# Build Syft Message
msg = GetAssociationRequestMessage(
address=node.address,
association_request_id=association_request_id,
reply_to=node.address,
).sign(signing_key=user_key)
# Process syft message
reply = node.recv_immediate_msg_with_reply(msg=msg).message
# Handle Response types
if isinstance(reply, ExceptionMessage):
return {"error": reply.exception_msg}
else:
return reply.content
@router.delete(
"/{association_request_id}", status_code=200, response_class=JSONResponse
)
def delete_association_route(
association_request_id: int,
current_user: Any = Depends(get_current_user),
) -> Dict[str, str]:
"""Deletes specific association
Args:
current_user : Current session.
association_request_id: Association request ID.
Returns:
resp: JSON structure containing a log message
"""
# Map User Key
user_key = SigningKey(current_user.private_key.encode(), encoder=HexEncoder)
# Build Syft Message
msg = DeleteAssociationRequestMessage(
address=node.address,
association_request_id=association_request_id,
reply_to=node.address,
).sign(signing_key=user_key)
# Process syft message
reply = node.recv_immediate_msg_with_reply(msg=msg).message
# Handle Response types
if isinstance(reply, ExceptionMessage):
return {"error": reply.exception_msg}
else:
return {"message": reply.resp_msg}
|
|
"""Support for Coinbase sensors."""
import logging
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import ATTR_ATTRIBUTION
from .const import (
API_ACCOUNT_AMOUNT,
API_ACCOUNT_BALANCE,
API_ACCOUNT_CURRENCY,
API_ACCOUNT_ID,
API_ACCOUNT_NAME,
API_ACCOUNT_NATIVE_BALANCE,
API_RATES,
API_RESOURCE_TYPE,
API_TYPE_VAULT,
CONF_CURRENCIES,
CONF_EXCHANGE_RATES,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
ATTR_NATIVE_BALANCE = "Balance in native currency"
CURRENCY_ICONS = {
"BTC": "mdi:currency-btc",
"ETH": "mdi:currency-eth",
"EUR": "mdi:currency-eur",
"LTC": "mdi:litecoin",
"USD": "mdi:currency-usd",
}
DEFAULT_COIN_ICON = "mdi:cash"
ATTRIBUTION = "Data provided by coinbase.com"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Coinbase sensor platform."""
instance = hass.data[DOMAIN][config_entry.entry_id]
entities = []
provided_currencies = [
account[API_ACCOUNT_CURRENCY]
for account in instance.accounts
if account[API_RESOURCE_TYPE] != API_TYPE_VAULT
]
desired_currencies = []
if CONF_CURRENCIES in config_entry.options:
desired_currencies = config_entry.options[CONF_CURRENCIES]
exchange_base_currency = instance.exchange_rates[API_ACCOUNT_CURRENCY]
for currency in desired_currencies:
if currency not in provided_currencies:
_LOGGER.warning(
"The currency %s is no longer provided by your account, please check "
"your settings in Coinbase's developer tools",
currency,
)
continue
entities.append(AccountSensor(instance, currency))
if CONF_EXCHANGE_RATES in config_entry.options:
for rate in config_entry.options[CONF_EXCHANGE_RATES]:
entities.append(
ExchangeRateSensor(
instance,
rate,
exchange_base_currency,
)
)
async_add_entities(entities)
class AccountSensor(SensorEntity):
"""Representation of a Coinbase.com sensor."""
def __init__(self, coinbase_data, currency):
"""Initialize the sensor."""
self._coinbase_data = coinbase_data
self._currency = currency
for account in coinbase_data.accounts:
if (
account[API_ACCOUNT_CURRENCY] == currency
and account[API_RESOURCE_TYPE] != API_TYPE_VAULT
):
self._name = f"Coinbase {account[API_ACCOUNT_NAME]}"
self._id = (
f"coinbase-{account[API_ACCOUNT_ID]}-wallet-"
f"{account[API_ACCOUNT_CURRENCY]}"
)
self._state = account[API_ACCOUNT_BALANCE][API_ACCOUNT_AMOUNT]
self._unit_of_measurement = account[API_ACCOUNT_CURRENCY]
self._native_balance = account[API_ACCOUNT_NATIVE_BALANCE][
API_ACCOUNT_AMOUNT
]
self._native_currency = account[API_ACCOUNT_NATIVE_BALANCE][
API_ACCOUNT_CURRENCY
]
break
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self):
"""Return the Unique ID of the sensor."""
return self._id
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
@property
def native_unit_of_measurement(self):
"""Return the unit of measurement this sensor expresses itself in."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return CURRENCY_ICONS.get(self._unit_of_measurement, DEFAULT_COIN_ICON)
@property
def extra_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_NATIVE_BALANCE: f"{self._native_balance} {self._native_currency}",
}
def update(self):
"""Get the latest state of the sensor."""
self._coinbase_data.update()
for account in self._coinbase_data.accounts:
if (
account[API_ACCOUNT_CURRENCY] == self._currency
and account[API_RESOURCE_TYPE] != API_TYPE_VAULT
):
self._state = account[API_ACCOUNT_BALANCE][API_ACCOUNT_AMOUNT]
self._native_balance = account[API_ACCOUNT_NATIVE_BALANCE][
API_ACCOUNT_AMOUNT
]
self._native_currency = account[API_ACCOUNT_NATIVE_BALANCE][
API_ACCOUNT_CURRENCY
]
break
class ExchangeRateSensor(SensorEntity):
"""Representation of a Coinbase.com sensor."""
def __init__(self, coinbase_data, exchange_currency, exchange_base):
"""Initialize the sensor."""
self._coinbase_data = coinbase_data
self.currency = exchange_currency
self._name = f"{exchange_currency} Exchange Rate"
self._id = f"coinbase-{coinbase_data.user_id}-xe-{exchange_currency}"
self._state = round(
1 / float(self._coinbase_data.exchange_rates[API_RATES][self.currency]), 2
)
self._unit_of_measurement = exchange_base
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self):
"""Return the unique ID of the sensor."""
return self._id
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
@property
def native_unit_of_measurement(self):
"""Return the unit of measurement this sensor expresses itself in."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return CURRENCY_ICONS.get(self.currency, DEFAULT_COIN_ICON)
@property
def extra_state_attributes(self):
"""Return the state attributes of the sensor."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
def update(self):
"""Get the latest state of the sensor."""
self._coinbase_data.update()
self._state = round(
1 / float(self._coinbase_data.exchange_rates.rates[self.currency]), 2
)
|
|
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
from __future__ import print_function
import inspect
import os
import re
import sys
import types
import collections
if sys.version_info >= (3, 4):
import importlib
else:
import imp
import warnings
import itkConfig
import itkBase
import itkLazy
from itkTypes import itkCType
def registerNoTpl(name, cl):
"""Register a class without template
It can seem not useful to register classes without template (and it wasn't
useful until the SmartPointer template was generated), but those classes
can be used as template argument of classes with template.
"""
itkTemplate.__templates__[normalizeName(name)] = cl
def normalizeName(name):
"""Normalize the class name to remove ambiguity
This function removes the white spaces in the name, and also
remove the pointer declaration "*" (it have no sense in python) """
name = name.replace(" ", "")
name = name.replace("*", "")
return name
class itkTemplate(object):
"""This class manages access to available template arguments of a C++ class.
This class is generic and does not give help on the methods available in
the instantiated class. To get help on a specific ITK class, instantiate an
object of that class.
e.g.: median = itk.MedianImageFilter[ImageType, ImageType].New()
help(median)
There are two ways to access types:
1. With a dict interface. The user can manipulate template parameters
similarly to C++, with the exception that the available parameters sets are
chosen at compile time. It is also possible, with the dict interface, to
explore the available parameters sets.
2. With object attributes. The user can easily find the available parameters
sets by pressing tab in interperter like ipython
"""
__templates__ = collections.OrderedDict()
__class_to_template__ = {}
__named_templates__ = {}
__doxygen_root__ = itkConfig.doxygen_root
def __new__(cls, name):
# Singleton pattern: we only make a single instance of any Template of
# a given name. If we have already made the instance, just return it
# as-is.
if name not in cls.__named_templates__:
new_instance = object.__new__(cls)
new_instance.__name__ = name
new_instance.__template__ = collections.OrderedDict()
cls.__named_templates__[name] = new_instance
return cls.__named_templates__[name]
def __add__(self, paramSetString, cl):
"""Add a new argument set and the resulting class to the template.
paramSetString is the C++ string which defines the parameters set.
cl is the class which corresponds to the couple template-argument set.
"""
# recreate the full name and normalize it to avoid ambiguity
normFullName = normalizeName(
self.__name__ + "<" + paramSetString + ">")
# the full class should not be already registered. If it is, there is a
# problem somewhere so warn the user so he can fix the problem
if normFullName in itkTemplate.__templates__:
message = (
"Template %s\n already defined as %s\n is redefined "
"as %s") % (normFullName, self.__templates__[normFullName], cl)
warnings.warn(message)
# register the class
itkTemplate.__templates__[normFullName] = cl
# __find_param__ will parse the paramSetString and produce a list of
# the same parameters transformed in corresponding python classes.
# we transform this list in tuple to make it usable as key of the dict
param = tuple(self.__find_param__(paramSetString))
# once again, warn the user if the tuple of parameter is already
# defined so he can fix the problem
if param in self.__template__:
message = "Warning: template already defined '%s'" % normFullName
warnings.warn(message)
# and register the parameter tuple
self.__template__[param] = cl
# add in __class_to_template__ dictionary
itkTemplate.__class_to_template__[cl] = (self, param)
# now populate the template
# 2 cases:
# - the template is a SmartPointer. In that case, the attribute name
# will be the full real name of the class without the itk prefix and
# _Pointer suffix
# - the template is not a SmartPointer. In that case, we keep only the
# end of the real class name which is a short string discribing the
# template arguments (for example IUC2)
if cl.__name__.startswith("itk"):
if cl.__name__.endswith("_Pointer"):
# it's a SmartPointer
attributeName = cl.__name__[len("itk"):-len("_Pointer")]
else:
# it's not a SmartPointer
# we need to now the size of the name to keep only the suffix
# short name does not contain :: and nested namespace
# itk::Numerics::Sample -> itkSample
shortNameSize = len(re.sub(r':.*:', '', self.__name__))
attributeName = cl.__name__[shortNameSize:]
else:
shortName = re.sub(r':.*:', '', self.__name__)
if not cl.__name__.startswith(shortName):
shortName = re.sub(r'.*::', '', self.__name__)
attributeName = cl.__name__[len(shortName):]
if attributeName[0].isdigit():
# the attribute name can't start with a number
# add a single x before it to build a valid name.
# Adding an underscore would hide the attributeName in IPython
attributeName = "x" + attributeName
# add the attribute to this object
self.__dict__[attributeName] = cl
def __find_param__(self, paramSetString):
"""Find the parameters of the template.
paramSetString is the C++ string which defines the parameters set.
__find_param__ returns a list of itk classes, itkCType, and/or numbers
which correspond to the parameters described in paramSetString.
The parameters MUST have been registered before calling this method,
or __find_param__ will return a string and not the wanted object, and
will display a warning. Registration order is important.
This method is not static only to be able to display the template name
in the warning.
"""
# split the string in a list of parameters
paramStrings = []
inner = 0
part = paramSetString.split(",")
for elt in part:
if inner == 0:
paramStrings.append(elt)
else:
paramStrings[-1] += "," + elt
inner += elt.count("<") - elt.count(">")
# convert all string parameters into classes (if possible)
parameters = []
for param in paramStrings:
# the parameter need to be normalized several time below
# do it once here
param = param.strip()
paramNorm = normalizeName(param)
if paramNorm in itkTemplate.__templates__:
# the parameter is registered.
# just get the really class form the dictionary
param = itkTemplate.__templates__[paramNorm]
elif itkCType.GetCType(param):
# the parameter is a c type
# just get the itkCtype instance
param = itkCType.GetCType(param)
elif paramNorm.isdigit():
# the parameter is a number
# convert the string to a number !
param = int(param)
elif paramNorm == "true":
param = True
elif paramNorm == "false":
param = False
else:
# unable to convert the parameter
# use it without changes, but display a warning message, to
# incite developer to fix the problem
message = (
"Warning: Unknown parameter '%s' in "
"template '%s'" % (param, self.__name__))
warnings.warn(message)
parameters.append(param)
return parameters
def __getitem__(self, parameters):
"""Return the class which corresponds to the given template parameters.
parameters can be:
- a single parameter (Ex: itk.Index[2])
- a list of elements (Ex: itk.Image[itk.UC, 2])
"""
parameters_type = type(parameters)
if not parameters_type is tuple and not parameters_type is list:
# parameters is a single element.
# include it in a list to manage the 2 cases in the same way
parameters = [parameters]
cleanParameters = []
for param in parameters:
# In the case of itk class instance, get the class
name = param.__class__.__name__
isclass = inspect.isclass(param)
if not isclass and name[:3] == 'itk' and name != "itkCType":
param = param.__class__
# append the parameter to the list. If it's not a supported type,
# it is not in the dictionary and we will raise an exception below
cleanParameters.append(param)
try:
return(self.__template__[tuple(cleanParameters)])
except:
self._LoadModules()
try:
return(self.__template__[tuple(cleanParameters)])
except:
raise KeyError(
'itkTemplate : No template %s for the %s class' %
(str(parameters), self.__name__))
def __repr__(self):
return '<itkTemplate %s>' % self.__name__
def __getattr__(self, attr):
"""Support for lazy loading."""
self._LoadModules()
return object.__getattribute__(self, attr)
def _LoadModules(self):
"""Loads all the module that may have not been loaded by the lazy loading system.
If multiple modules use the same object, the lazy loading system is only going to
load the module in which the object belongs. The other modules will be loaded only when necessary.
"""
name=self.__name__.split('::')[-1] # Remove 'itk::' or 'itk::Function::'
modules = itkBase.lazy_attributes[name]
for module in modules:
# find the module's name in sys.modules, or create a new module so named
if sys.version_info >= (3, 4):
this_module = sys.modules.setdefault(module, types.ModuleType(module))
else:
this_module = sys.modules.setdefault(module, imp.new_module(module))
namespace = {}
if not hasattr(this_module, '__templates_loaded'):
itkBase.LoadModule(module, namespace)
def __dir__(self):
"""Returns the list of the attributes available in the current template.
This loads all the modules that might be required by this template first,
and then returns the list of attributes. It is used when dir() is called
or when it tries to autocomplete attribute names.
"""
self._LoadModules()
def get_attrs(obj):
if not hasattr(obj, '__dict__'):
return [] # slots only
if sys.version_info >= (3, 0):
dict_types = (dict, types.MappingProxyType)
else:
dict_types = (dict, types.DictProxyType)
if not isinstance(obj.__dict__, dict_types):
raise TypeError("%s.__dict__ is not a dictionary"
"" % obj.__name__)
return obj.__dict__.keys()
def dir2(obj):
attrs = set()
if not hasattr(obj, '__bases__'):
# obj is an instance
if not hasattr(obj, '__class__'):
# slots
return sorted(get_attrs(obj))
klass = obj.__class__
attrs.update(get_attrs(klass))
else:
# obj is a class
klass = obj
for cls in klass.__bases__:
attrs.update(get_attrs(cls))
attrs.update(dir2(cls))
attrs.update(get_attrs(obj))
return list(attrs)
return dir2(self)
def __call__(self, *args, **kwargs):
"""Create a process object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
UpdateLargestPossibleRegion() is execute and the current output,
or tuple of outputs if there is more than
one, is returned.
For example,
outputImage = itk.MedianImageFilter(inputImage, Radius=(1,2))
"""
filt = self.New(*args, **kwargs)
try:
filt.UpdateLargestPossibleRegion()
if filt.GetNumberOfIndexedOutputs() == 1:
result = filt.GetOutput()
else:
result = tuple([filt.GetOutput(idx) for idx in range(filt.GetNumberOfIndexedOutputs())])
except AttributeError:
result = filt
return result
def New(self, *args, **kwargs):
"""Instantiate the template with a type implied from its input.
Template type specification can be avoided by assuming that the type's
first template argument should have the same type as its primary input.
This is generally true. If it is not true, then specify the types
explicitly.
For example, instead of the explicit type specification::
median = itk.MedianImageFilter[ImageType, ImageType].New()
median.SetInput(reader.GetOutput())
call::
median = itk.MedianImageFilter.New(Input=reader.GetOutput())
or, the shortened::
median = itk.MedianImageFilter.New(reader.GetOutput())
or:
median = itk.MedianImageFilter.New(reader)"""
import itk
keys = self.keys()
cur = itk.auto_pipeline.current
if self.__name__ == "itk::ImageFileReader":
return self._NewImageFileReader(*args, **kwargs)
primary_input_methods = ('Input', 'InputImage', 'Input1')
if len(args) != 0:
# try to find a type suitable for the primary input provided
input_type = output(args[0]).__class__
keys = [k for k in keys if k[0] == input_type]
elif set(primary_input_methods).intersection(kwargs.keys()):
for method in primary_input_methods:
if method in kwargs:
input_type = output(kwargs[method]).__class__
keys = [k for k in keys if k[0] == input_type]
break
elif cur is not None and len(cur) != 0:
# try to find a type suitable for the input provided
input_type = output(cur).__class__
keys = [k for k in keys if k[0] == input_type]
if len(keys) == 0:
raise RuntimeError("No suitable template parameter can be found.")
return self[list(keys)[0]].New(*args, **kwargs)
def _NewImageFileReader(self, *args, **kwargs):
primaryInputMethods = ('FileName',)
inputFileName = ''
if len(args) != 0:
# try to find a type suitable for the primary input provided
inputFileName = args[0]
elif set(primaryInputMethods).intersection(kwargs.keys()):
for method in primaryInputMethods:
if method in kwargs:
inputFileName = kwargs[method]
break
if not inputFileName:
raise RuntimeError("No FileName specified.")
import itk
imageIO = itk.ImageIOFactory.CreateImageIO( inputFileName, itk.ImageIOFactory.ReadMode )
if not imageIO:
raise RuntimeError("No ImageIO is registered to handle the given file.")
componentTypeDic= {"float": itk.F, "double": itk.D,
"unsigned_char": itk.UC, "unsigned_short": itk.US, "unsigned_int": itk.UI,
"unsigned_long": itk.UL, "unsigned_long_long": itk.ULL, "char": itk.SC, "short": itk.SS,
"int": itk.SI, "long": itk.SL, "long_long": itk.SLL}
# Read the metadata from the image file.
imageIO.SetFileName( inputFileName )
imageIO.ReadImageInformation()
dimension = imageIO.GetNumberOfDimensions()
componentAsString = imageIO.GetComponentTypeAsString(imageIO.GetComponentType())
component = componentTypeDic[componentAsString]
pixel = imageIO.GetPixelTypeAsString(imageIO.GetPixelType())
PixelType = itkTemplate._pixelTypeFromIO(pixel, component, dimension)
ImageType = itk.Image[PixelType, dimension]
ReaderType = itk.ImageFileReader[ImageType]
return ReaderType.New(*args, **kwargs)
@staticmethod
def _pixelTypeFromIO(pixel, component, dimension):
import itk
if pixel == 'scalar':
PixelType = component
elif pixel == 'rgb':
PixelType = itk.RGBPixel[component]
elif pixel == 'rgba':
PixelType = itk.RGBAPixel[component]
elif pixel == 'offset':
PixelType = itk.Offset[dimension]
elif pixel == 'vector':
PixelType = itk.Vector[component, dimension]
elif pixel == 'point':
PixelType = itk.Point[component, dimension]
elif pixel == 'covariant_vector':
PixelType = itk.CovariantVector[component, dimension]
elif pixel == 'symmetric_second_rank_tensor':
PixelType = itk.SymmetricSecondRankTensor[component, dimension]
elif pixel == 'diffusion_tensor_3D':
PixelType = itk.DiffusionTensor3D[component]
elif pixel == 'complex':
PixelType = itk.complex[component]
elif pixel == 'fixed_array':
PixelType = itk.FixedArray[component, dimension]
elif pixel == 'matrix':
PixelType = itk.Matrix[component, dimension, dimension]
else:
raise RuntimeError("Unknown pixel type: %s." % pixel)
return PixelType
def keys(self):
return self.__template__.keys()
# everything after this comment is for dict interface
# and is a copy/paste from DictMixin
# only methods to edit dictionary are not there
def __iter__(self):
for k in self.keys():
yield k
def __contains__(self, key):
return key in self
# third level takes advantage of second level definitions
def iteritems(self):
for k in self:
yield (k, self[k])
def iterkeys(self):
return self.__iter__()
# fourth level uses definitions from lower levels
def itervalues(self):
for _, v in self.iteritems():
yield v
def values(self):
return [v for _, v in self.iteritems()]
def items(self):
return list(self.iteritems())
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __len__(self):
return len(self.keys())
def GetTypes(self):
"""Helper method which prints out the available template parameters."""
print("<itkTemplate %s>" % self.__name__)
print("Options:")
for tp in self.GetTypesAsList():
print(" " + str(tp).replace("(", "[").replace(")", "]"))
def GetTypesAsList(self):
"""Helper method which returns the available template parameters."""
# Make a list of allowed types, and sort them
ctypes = []
classes = []
others = []
for key_tuple in self.__template__:
key = str(key_tuple)
if "itkCType" in key:
ctypes.append(key)
elif "class" in key:
classes.append(key)
else:
others.append(key)
# Sort the lists
ctypes = sorted(ctypes)
classes = sorted(classes)
others = sorted(others)
return ctypes + classes + others
# create a new New function which accepts parameters
def New(self, *args, **kargs):
import itk
itk.set_inputs(self, args, kargs)
# now, try to add observer to display progress
if "auto_progress" in kargs.keys():
if kargs["auto_progress"] in [True, 1]:
callback = itk.terminal_progress_callback
elif kargs["auto_progress"] == 2:
callback = itk.simple_progress_callback
else:
callback = None
elif itkConfig.ProgressCallback:
callback = itkConfig.ProgressCallback
else:
callback = None
if callback and not issubclass(self.__class__, itk.Command):
try:
name = self.__class__.__name__
def progress():
# self and callback are kept referenced with a closure
callback(name, self.GetProgress())
self.AddObserver(itk.ProgressEvent(), progress)
except:
# it seems that something goes wrong...
# as this feature is designed for prototyping, it's not really a
# problem if an object doesn't have progress reporter, so adding
# reporter can silently fail
pass
if itkConfig.NotInPlace and "SetInPlace" in dir(self):
self.SetInPlace(False)
if itk.auto_pipeline.current is not None:
itk.auto_pipeline.current.connect(self)
return self
def output(input):
try:
img = input.GetOutput()
except AttributeError:
img = input
return img
def image(input):
warnings.warn("WrapITK warning: itk.image() is deprecated. "
"Use itk.output() instead.")
return output(input)
|
|
# !/usr/bin/env python
"""This module supplies a function that can generate custom sequences of
optimization passes for arbitrary programs.
This module provides an implementation of a genetic algorithm presented by
Cooper in his paper "Optimizing for Reduced Code Space using Genetic
Algorithms" (published 1999). The algorithm is modified like described by
Almagor in "Finding Effective Compilation Sequences" (published 2004).
The algorithm is used to generate a custom optimization sequence for an
arbitrary application. The resulting sequence is a list of flags that can be
set by the LLVM opt tool. The generated sequence is meant to be a good flag
combination that increases the amount of code that can be detected by Polly.
"""
import random
import multiprocessing
import logging
import benchbuild.experiments.sequences.polly_stats as polly_stats
__author__ = "Christoph Woller"
__credits__ = ["Christoph Woller"]
__maintainer__ = "Christoph Woller"
__email__ = "wollerch@fim.uni-passau.de"
# Default values for the population of chromosomes
MIN_POPULATION_SIZE = 10
DEFAULT_CHROMOSOME_SIZE = 10
DEFAULT_POPULATION_SIZE = 50
DEFAULT_GENE_POOL = ['-basicaa', '-mem2reg']
DEFAULT_GENERATIONS = 50
# Should the program print debug information?
print_out = False
def print_chromosome_list(chromosomes):
"""This helper function just prints a list of chromosomes. """
for chromosome in chromosomes:
print(chromosome)
class Chromosome(object):
"""The class Chromosome is used to represent a single chromosome of a
population. Each chromosome consists of genes and has a fitness value.
The genes are available optimization flags. The fitness value of a
chromosome is the number of regions in the application that are no valid
SCoPs if you call Polly with the flags that are represented by the genes
of the chromosome. Hence, the lower the better.
"""
chromosome_number = 0
def __init__(self, genes, environment):
"""Initializes a new chromosome.
Args:
genes (list[string]): the genes of this chromosome.
environment (string): the name of the environment in which the
chromosome's fitness should be tested. In this case the
environment is the name of the program in which we want to
detect SCoPs.
"""
Chromosome.chromosome_number += 1
self.chromosome_id = Chromosome.chromosome_number
self.genes = genes if genes is not None else []
self.fitness_value = float('inf')
self.environment = environment
def __str__(self):
"""Returns a string representation of this chromosome."""
return (str(self.chromosome_id) + '. Chromosome: Genes: ' + str(
self.genes) + '; Fitness: ' + str(self.fitness_value))
def __eq__(self, other):
"""Checks if two chromosomes are equal.
Args:
other (Chromosome): the other chromosome, which should be compared
with this one.
Returns:
boolean: true if the environment and the genes of the chromosomes
are equal; false otherwise.
"""
if self.environment != other.environment:
return False
for i in range(0, len(self.genes)):
if self.genes[i] != other.genes[i]:
return False
return True
def __hash__(self):
"""Returns the hash value of this chromosome."""
return hash(('genes', tuple(self.genes)))
def calculate_fitness_value(self, seq_to_fitness):
"""Calculates the fitness value of this chromosome.
Args:
seq_to_fitness (dict): mapping from sequence to fitness value.
"""
self.fitness_value = seq_to_fitness[str(self.genes)]
class Population(object):
"""This class represents a population of chromosomes.
The chromosomes of a population can reproduce and mutate over
generations. This class provides functionality to simulate the next
generations of the population.
"""
def __init__(self, environment, size=DEFAULT_POPULATION_SIZE,
gene_pool=DEFAULT_GENE_POOL,
chromosome_size=DEFAULT_CHROMOSOME_SIZE):
"""Initializes a new population.
The first generation of chromosomes of the population is created
randomly.
Args:
environment (string): the environment the chromosomes should
live in over the generations.
size (int, optional): the number of chromosomes this population
should consist of.
gene_pool (list[string], optional): a list containing all
available genes.
chromosome_size (int, optional): the number of genes a
chromosome should consist of.
"""
self.gene_pool = gene_pool if gene_pool else DEFAULT_GENE_POOL
self.size = max(size, MIN_POPULATION_SIZE)
self.chromosome_size = max(chromosome_size, 0)
self.generation = 0
self.fittest_chromosome = None
self.chromosomes = []
self.environment = environment
for i in range(self.size):
self.chromosomes.append(
Chromosome(self.__generate_random_gene_sequence(),
self.environment))
def __str__(self):
"""Prints out a string representation of this population."""
result = (
'---> Population - Generation: ' + str(
self.generation) + '<--- \n')
result += 'Fittest Chromosome: \n' + str(self.fittest_chromosome)
for chromosome in self.chromosomes:
result += str(chromosome) + '\n'
return result
def simulate_generations(self, gen=DEFAULT_GENERATIONS):
"""Simulates a certain number of generations.
Args:
generations (int, optional): the number of generations to simulate.
Returns:
Chromosome: the fittest chromosome of the last generation for the
specified environment.
"""
seq_to_fitness = multiprocessing.Manager().dict()
for i in range(gen):
logging.getLogger().debug(self)
self.simulate_generation(seq_to_fitness)
if i < gen - 1:
self.__delete_duplicates()
return self.fittest_chromosome
def simulate_generation(self, seq_to_fitness):
"""Simulates a single generation change of the population."""
# 1. calculate fitness value of each chromosome.
pool = multiprocessing.Pool()
for chromosome in self.chromosomes:
sequence = chromosome.genes
pool.apply_async(calculate_fitness_value,
args=(sequence, seq_to_fitness, str(sequence),
self.environment))
pool.close()
pool.join()
for chromosome in self.chromosomes:
chromosome.calculate_fitness_value(seq_to_fitness)
# 2. sort the chromosomes by its fitness value and reverse the list,
# because the chromosome with the lowest fitness value is the best.
self.chromosomes.sort(key=lambda c: c.fitness_value)
self.chromosomes = self.chromosomes[::-1]
# 3. best 10% of chromosomes survive without change.
num_best = len(self.chromosomes) // 10
self.fittest_chromosome = self.chromosomes.pop()
best_chromosomes = [self.fittest_chromosome]
for i in range(num_best - 1):
best_chromosomes.append(self.chromosomes.pop())
# 4. crossover: fill the vacancies in the population with new
# chromosomes. The genes of the new chromosomes are mixtures of the
# genes of two randomly chosen strong chromosomes.
new_chromosomes = []
num_of_new = self.size - len(best_chromosomes)
while len(new_chromosomes) < num_of_new:
c1 = random.choice(best_chromosomes)
c2 = random.choice(best_chromosomes)
new_chromosomes.append(Chromosome(
c1.genes[:self.chromosome_size // 2]
+ c2.genes[self.chromosome_size // 2:], self.environment))
if len(new_chromosomes) < num_of_new:
new_chromosomes.append(Chromosome(
c1.genes[self.chromosome_size // 2:]
+ c2.genes[:self.chromosome_size // 2], self.environment))
if len(new_chromosomes) < num_of_new:
new_chromosomes.append(Chromosome(
c2.genes[:self.chromosome_size // 2]
+ c1.genes[self.chromosome_size // 2:], self.environment))
if len(new_chromosomes) < num_of_new:
new_chromosomes.append(Chromosome(
c2.genes[self.chromosome_size // 2:]
+ c1.genes[:self.chromosome_size // 2], self.environment))
# 6. mutation: Perform mutations on the new chromosomes.
# the mutation probability for the lower half is 10 percent.
self.__mutate(new_chromosomes, 10, seq_to_fitness)
# 7. Rejoin all chromosomes.
self.chromosomes = best_chromosomes + new_chromosomes
self.generation += 1
def __generate_random_gene_sequence(self):
"""Generates a random sequence of genes."""
genes = []
for j in range(self.chromosome_size):
genes.append(random.choice(self.gene_pool))
return genes
def __mutate(self, chromosomes, mutation_probability, seq_to_fitness):
"""Performs mutations on chromosomes with a certain probability."""
log = logging.getLogger()
for chromosome in chromosomes:
for i in range(self.chromosome_size):
if random.randint(1, 100) <= mutation_probability:
if print_out:
log.debug(
"---> Mutation in Chromosome " + str(
chromosome.chromosome_id) + " in gene "
+ str(i) + " <---")
chromosome.genes[i] = random.choice(self.gene_pool)
sequence = ''.join(chromosome.genes)
num_seq = 0
while sequence in seq_to_fitness and num_seq < len(
self.gene_pool) ** self.chromosome_size:
log.debug(
"----> Sequence has been already used. Mutate! <----")
chromosome.genes[random.randint(0, len(
chromosome.genes) - 1)] = random.choice(self.gene_pool)
sequence = ''.join(chromosome.genes)
num_seq += 1
def __delete_duplicates(self):
"""Deletes duplicates in the chromosomes of the population."""
log = logging.getLogger()
log.debug("\n---> Duplicate check <---")
chromosomes = list(set(self.chromosomes))
diff = self.size - len(chromosomes)
if diff > 0:
log.debug("---> Duplicate(s) found! <---")
for i in range(diff):
chromosomes.append(
Chromosome(self.__generate_random_gene_sequence(),
self.environment))
else:
log.debug("---> No duplicates found! <---")
self.chromosomes = chromosomes
def calculate_fitness_value(sequence, seq_to_fitness, key, program):
"""Calculates the fitness value of the provided sequence.
This method calculates the fitness of the sequence by using the number
of regions that are no valid SCoPs if this sequence is used for
preoptimization before Polly's SCoP detection.
Args:
sequence (list[string]): the sequence for that the fitness value should
be calculated.
seq_to_fitness (dict): dictionary that stores calculated fitness
values.
key (string): the key of the provided sequence for the dictionary.
program (string): the name of the application this sequence
should be used for.
"""
if key not in seq_to_fitness:
seq_to_fitness[key] = polly_stats.get_regions_without_scops(sequence,
program)
def generate_custom_sequence(program, pass_space=DEFAULT_GENE_POOL,
debug=False):
"""Generates a custom optimization sequence for a provided application.
Args:
program (string): the name of the application a custom sequence should
be generated for.
pass_space (list[string], optional): list of passes that should be
taken into consideration for the generation of the custom
sequence.
debug (boolean, optional): True if debug information should be printed;
False, otherwise.
Returns:
list[string]: the generated custom optimization sequence. Each element
of the list represents one optimization pass.
"""
global print_out
print_out = debug
population = Population(gene_pool=pass_space, environment=program)
fittest_chromosome = population.simulate_generations()
custom_sequence = fittest_chromosome.genes
return custom_sequence
|
|
# -*- coding: utf-8 -*-
# Natural Language Toolkit: Transformation-based learning
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Marcus Uneson <marcus.uneson@gmail.com>
# based on previous (nltk2) version by
# Christopher Maloof, Edward Loper, Steven Bird
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function
from nltk.compat import python_2_unicode_compatible, unicode_repr
from nltk import jsontags
######################################################################
# Tag Rules
######################################################################
class TagRule(object):
"""
An interface for tag transformations on a tagged corpus, as
performed by tbl taggers. Each transformation finds all tokens
in the corpus that are tagged with a specific original tag and
satisfy a specific condition, and replaces their tags with a
replacement tag. For any given transformation, the original
tag, replacement tag, and condition are fixed. Conditions may
depend on the token under consideration, as well as any other
tokens in the corpus.
Tag rules must be comparable and hashable.
"""
def __init__(self, original_tag, replacement_tag):
self.original_tag = original_tag
"""The tag which this TagRule may cause to be replaced."""
self.replacement_tag = replacement_tag
"""The tag with which this TagRule may replace another tag."""
def apply(self, tokens, positions=None):
"""
Apply this rule at every position in positions where it
applies to the given sentence. I.e., for each position p
in *positions*, if *tokens[p]* is tagged with this rule's
original tag, and satisfies this rule's condition, then set
its tag to be this rule's replacement tag.
:param tokens: The tagged sentence
:type tokens: list(tuple(str, str))
:type positions: list(int)
:param positions: The positions where the transformation is to
be tried. If not specified, try it at all positions.
:return: The indices of tokens whose tags were changed by this
rule.
:rtype: int
"""
if positions is None:
positions = list(range(len(tokens)))
# Determine the indices at which this rule applies.
change = [i for i in positions if self.applies(tokens, i)]
# Make the changes. Note: this must be done in a separate
# step from finding applicable locations, since we don't want
# the rule to interact with itself.
for i in change:
tokens[i] = (tokens[i][0], self.replacement_tag)
return change
def applies(self, tokens, index):
"""
:return: True if the rule would change the tag of
``tokens[index]``, False otherwise
:rtype: bool
:param tokens: A tagged sentence
:type tokens: list(str)
:param index: The index to check
:type index: int
"""
raise NotImplementedError
# Rules must be comparable and hashable for the algorithm to work
def __eq__(self, other):
raise TypeError("Rules must implement __eq__()")
def __ne__(self, other):
raise TypeError("Rules must implement __ne__()")
def __hash__(self):
raise TypeError("Rules must implement __hash__()")
@python_2_unicode_compatible
@jsontags.register_tag
class Rule(TagRule):
"""
A Rule checks the current corpus position for a certain set of conditions;
if they are all fulfilled, the Rule is triggered, meaning that it
will change tag A to tag B. For other tags than A, nothing happens.
The conditions are parameters to the Rule instance. Each condition is a feature-value pair,
with a set of positions to check for the value of the corresponding feature.
Conceptually, the positions are joined by logical OR, and the feature set by logical AND.
More formally, the Rule is then applicable to the M{n}th token iff:
- The M{n}th token is tagged with the Rule's original tag; and
- For each (Feature(positions), M{value}) tuple:
- The value of Feature of at least one token in {n+p for p in positions}
is M{value}.
"""
json_tag = 'nltk.tbl.Rule'
def __init__(self, templateid, original_tag, replacement_tag, conditions):
"""
Construct a new Rule that changes a token's tag from
C{original_tag} to C{replacement_tag} if all of the properties
specified in C{conditions} hold.
@type templateid: string
@param templateid: the template id (a zero-padded string, '001' etc,
so it will sort nicely)
@type conditions: C{iterable} of C{Feature}
@param conditions: A list of Feature(positions),
each of which specifies that the property (computed by
Feature.extract_property()) of at least one
token in M{n} + p in positions is C{value}.
"""
TagRule.__init__(self, original_tag, replacement_tag)
self._conditions = conditions
self.templateid = templateid
def encode_json_obj(self):
return {
'templateid': self.templateid,
'original': self.original_tag,
'replacement': self.replacement_tag,
'conditions': self._conditions,
}
@classmethod
def decode_json_obj(cls, obj):
return cls(obj['templateid'], obj['original'], obj['replacement'], obj['conditions'])
def applies(self, tokens, index):
# Inherit docs from TagRule
# Does the given token have this Rule's "original tag"?
if tokens[index][1] != self.original_tag:
return False
# Check to make sure that every condition holds.
for (feature, val) in self._conditions:
# Look for *any* token that satisfies the condition.
for pos in feature.positions:
if not (0 <= index + pos < len(tokens)):
continue
if feature.extract_property(tokens, index+pos) == val:
break
else:
# No token satisfied the condition; return false.
return False
# Every condition checked out, so the Rule is applicable.
return True
def __eq__(self, other):
return (self is other or
(other is not None and
other.__class__ == self.__class__ and
self.original_tag == other.original_tag and
self.replacement_tag == other.replacement_tag and
self._conditions == other._conditions))
def __ne__(self, other):
return not (self == other)
def __hash__(self):
# Cache our hash value (justified by profiling.)
try:
return self.__hash
except AttributeError:
self.__hash = hash(repr(self))
return self.__hash
def __repr__(self):
# Cache the repr (justified by profiling -- this is used as
# a sort key when deterministic=True.)
try:
return self.__repr
except AttributeError:
self.__repr = (
"{0}('{1}', {2}, {3}, [{4}])".format(
self.__class__.__name__,
self.templateid,
unicode_repr(self.original_tag),
unicode_repr(self.replacement_tag),
# list(self._conditions) would be simpler but will not generate
# the same Rule.__repr__ in python 2 and 3 and thus break some tests
', '.join("({0},{1})".format(f, unicode_repr(v)) for (f, v) in self._conditions)
)
)
return self.__repr
def __str__(self):
def _condition_to_logic(feature, value):
"""
Return a compact, predicate-logic styled string representation
of the given condition.
"""
return '{0}:{1}@[{2}]'.format(
feature.PROPERTY_NAME,
value,
",".join(str(w) for w in feature.positions)
)
conditions = ' & '.join([_condition_to_logic(f, v) for (f, v) in self._conditions])
s = '{0}->{1} if {2}'.format(
self.original_tag,
self.replacement_tag,
conditions
)
return s
def format(self, fmt):
"""
Return a string representation of this rule.
>>> from nltk.tbl.rule import Rule
>>> from nltk.tag.brill import Pos
>>> r = Rule("23", "VB", "NN", [(Pos([-2,-1]), 'DT')])
r.format("str") == str(r)
True
>>> r.format("str")
'VB->NN if Pos:DT@[-2,-1]'
r.format("repr") == repr(r)
True
>>> r.format("repr")
"Rule('23', 'VB', 'NN', [(Pos([-2, -1]),'DT')])"
>>> r.format("verbose")
'VB -> NN if the Pos of words i-2...i-1 is "DT"'
>>> r.format("not_found")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "nltk/tbl/rule.py", line 256, in format
raise ValueError("unknown rule format spec: {0}".format(fmt))
ValueError: unknown rule format spec: not_found
>>>
:param fmt: format specification
:type fmt: str
:return: string representation
:rtype: str
"""
if fmt == "str":
return self.__str__()
elif fmt == "repr":
return self.__repr__()
elif fmt == "verbose":
return self._verbose_format()
else:
raise ValueError("unknown rule format spec: {0}".format(fmt))
def _verbose_format(self):
"""
Return a wordy, human-readable string representation
of the given rule.
Not sure how useful this is.
"""
def condition_to_str(feature, value):
return ('the %s of %s is "%s"' %
(feature.PROPERTY_NAME, range_to_str(feature.positions), value))
def range_to_str(positions):
if len(positions) == 1:
p = positions[0]
if p == 0:
return 'this word'
if p == -1:
return 'the preceding word'
elif p == 1:
return 'the following word'
elif p < 0:
return 'word i-%d' % -p
elif p > 0:
return 'word i+%d' % p
else:
# for complete compatibility with the wordy format of nltk2
mx = max(positions)
mn = min(positions)
if mx - mn == len(positions) - 1:
return 'words i%+d...i%+d' % (mn, mx)
else:
return 'words {%s}' % (",".join("i%+d" % d for d in positions),)
replacement = '%s -> %s' % (self.original_tag, self.replacement_tag)
conditions = (' if ' if self._conditions else "") + ', and '.join(
condition_to_str(f, v) for (f, v) in self._conditions
)
return replacement + conditions
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Clustering Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
# pylint: enable=wildcard-import
from tensorflow.contrib.factorization.python.ops import gen_clustering_ops
from tensorflow.contrib.factorization.python.ops.gen_clustering_ops import *
from tensorflow.contrib.util import loader
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.embedding_ops import embedding_lookup
from tensorflow.python.platform import resource_loader
_clustering_ops = loader.load_op_library(
resource_loader.get_path_to_datafile('_clustering_ops.so'))
# Euclidean distance between vectors U and V is defined as ||U - V||_F which is
# the square root of the sum of the absolute squares of the elements difference.
SQUARED_EUCLIDEAN_DISTANCE = 'squared_euclidean'
# Cosine distance between vectors U and V is defined as
# 1 - (U \dot V) / (||U||_F ||V||_F)
COSINE_DISTANCE = 'cosine'
RANDOM_INIT = 'random'
KMEANS_PLUS_PLUS_INIT = 'kmeans_plus_plus'
class KMeans(object):
"""Creates the graph for k-means clustering."""
def __init__(self,
inputs,
num_clusters,
initial_clusters=RANDOM_INIT,
distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=False,
random_seed=0,
kmeans_plus_plus_num_retries=2):
"""Creates an object for generating KMeans clustering graph.
Args:
inputs: An input tensor or list of input tensors
num_clusters: number of clusters.
initial_clusters: Specifies the clusters used during initialization. Can
be a tensor or numpy array, or a function that generates the clusters.
Can also be "random" to specify that clusters should be chosen randomly
from input data.
distance_metric: distance metric used for clustering.
use_mini_batch: If true, use the mini-batch k-means algorithm. Else assume
full batch.
random_seed: Seed for PRNG used to initialize seeds.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample O(log(num_to_sample)) additional points.
"""
self._inputs = inputs if isinstance(inputs, list) else [inputs]
assert num_clusters > 0, num_clusters
self._num_clusters = num_clusters
self._initial_clusters = initial_clusters
assert distance_metric in [SQUARED_EUCLIDEAN_DISTANCE, COSINE_DISTANCE]
self._distance_metric = distance_metric
self._use_mini_batch = use_mini_batch
self._random_seed = random_seed
self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
@classmethod
def _distance_graph(cls, inputs, clusters, distance_metric):
"""Computes distance between each input and each cluster center.
Args:
inputs: list of input Tensors.
clusters: cluster Tensor.
distance_metric: distance metric used for clustering
Returns:
list of Tensors, where each element corresponds to each element in inputs.
The value is the distance of each row to all the cluster centers.
Currently only Euclidean distance and cosine distance are supported.
"""
assert isinstance(inputs, list)
if distance_metric == SQUARED_EUCLIDEAN_DISTANCE:
return cls._compute_euclidean_distance(inputs, clusters)
elif distance_metric == COSINE_DISTANCE:
return cls._compute_cosine_distance(
inputs, clusters, inputs_normalized=True)
else:
assert False, ('Unsupported distance metric passed to Kmeans %s' %
str(distance_metric))
@classmethod
def _compute_euclidean_distance(cls, inputs, clusters):
"""Computes Euclidean distance between each input and each cluster center.
Args:
inputs: list of input Tensors.
clusters: cluster Tensor.
Returns:
list of Tensors, where each element corresponds to each element in inputs.
The value is the distance of each row to all the cluster centers.
"""
output = []
for inp in inputs:
with ops.colocate_with(inp):
# Computes Euclidean distance. Note the first and third terms are
# broadcast additions.
squared_distance = (math_ops.reduce_sum(
math_ops.square(inp), 1, keep_dims=True) - 2 * math_ops.matmul(
inp, clusters, transpose_b=True) + array_ops.transpose(
math_ops.reduce_sum(
math_ops.square(clusters), 1, keep_dims=True)))
output.append(squared_distance)
return output
@classmethod
def _compute_cosine_distance(cls, inputs, clusters, inputs_normalized=True):
"""Computes cosine distance between each input and each cluster center.
Args:
inputs: list of input Tensor.
clusters: cluster Tensor
inputs_normalized: if True, it assumes that inp and clusters are
normalized and computes the dot product which is equivalent to the cosine
distance. Else it L2 normalizes the inputs first.
Returns:
list of Tensors, where each element corresponds to each element in inp.
The value is the distance of each row to all the cluster centers.
"""
output = []
if not inputs_normalized:
with ops.colocate_with(clusters):
clusters = nn_impl.l2_normalize(clusters, dim=1)
for inp in inputs:
with ops.colocate_with(inp):
if not inputs_normalized:
inp = nn_impl.l2_normalize(inp, dim=1)
output.append(1 - math_ops.matmul(inp, clusters, transpose_b=True))
return output
def _infer_graph(self, inputs, clusters):
"""Maps input to closest cluster and the score.
Args:
inputs: list of input Tensors.
clusters: Tensor of cluster centers.
Returns:
List of tuple, where each value in tuple corresponds to a value in inp.
The tuple has following three elements:
all_scores: distance of each input to each cluster center.
score: distance of each input to closest cluster center.
cluster_idx: index of cluster center closest to the corresponding input.
"""
assert isinstance(inputs, list)
# Pairwise distances are used only by transform(). In all other cases, this
# sub-graph is not evaluated.
scores = self._distance_graph(inputs, clusters, self._distance_metric)
output = []
if (self._distance_metric == COSINE_DISTANCE and
not self._clusters_l2_normalized()):
# The cosine distance between normalized vectors x and y is the same as
# 2 * squared_euclidian_distance. We are using this fact and reusing the
# nearest_neighbors op.
# TODO(ands): Support COSINE distance in nearest_neighbors and remove
# this.
with ops.colocate_with(clusters):
clusters = nn_impl.l2_normalize(clusters, dim=1)
for inp, score in zip(inputs, scores):
with ops.colocate_with(inp):
(indices,
distances) = gen_clustering_ops.nearest_neighbors(inp, clusters, 1)
if self._distance_metric == COSINE_DISTANCE:
distances *= 0.5
output.append(
(score, array_ops.squeeze(distances), array_ops.squeeze(indices)))
return zip(*output)
def _init_clusters_random(self):
"""Does random initialization of clusters.
Returns:
Tensor of randomly initialized clusters.
"""
num_data = math_ops.add_n([array_ops.shape(inp)[0] for inp in self._inputs])
# Note that for mini-batch k-means, we should ensure that the batch size of
# data used during initialization is sufficiently large to avoid duplicated
# clusters.
with ops.control_dependencies(
[check_ops.assert_less_equal(self._num_clusters, num_data)]):
indices = random_ops.random_uniform(
array_ops.reshape(self._num_clusters, [-1]),
minval=0,
maxval=math_ops.cast(num_data, dtypes.int64),
seed=self._random_seed,
dtype=dtypes.int64)
clusters_init = embedding_lookup(
self._inputs, indices, partition_strategy='div')
return clusters_init
def _clusters_l2_normalized(self):
"""Returns True if clusters centers are kept normalized."""
return self._distance_metric == COSINE_DISTANCE and not self._use_mini_batch
def _init_clusters(self):
"""Initialization of clusters.
Returns:
Tuple with following elements:
cluster_centers: a Tensor for storing cluster centers
cluster_counts: a Tensor for storing counts of points assigned to this
cluster. This is used by mini-batch training.
"""
init = self._initial_clusters
if init == RANDOM_INIT:
clusters_init = self._init_clusters_random()
elif init == KMEANS_PLUS_PLUS_INIT:
# Points from only the first shard are used for initializing centers.
# TODO(ands): Use all points.
clusters_init = gen_clustering_ops.kmeans_plus_plus_initialization(
self._inputs[0], self._num_clusters, self._random_seed,
self._kmeans_plus_plus_num_retries)
elif callable(init):
clusters_init = init(self._inputs, self._num_clusters)
elif not isinstance(init, str):
clusters_init = init
else:
assert False, 'Unsupported init passed to Kmeans %s' % str(init)
if self._distance_metric == COSINE_DISTANCE and clusters_init is not None:
clusters_init = nn_impl.l2_normalize(clusters_init, dim=1)
clusters_init = clusters_init if clusters_init is not None else []
cluster_centers = variables.Variable(
clusters_init, name='clusters', validate_shape=False)
cluster_counts = (variables.Variable(
array_ops.ones(
[self._num_clusters], dtype=dtypes.int64)) if self._use_mini_batch
else None)
return cluster_centers, cluster_counts
@classmethod
def _l2_normalize_data(cls, inputs):
"""Normalized the input data."""
output = []
for inp in inputs:
with ops.colocate_with(inp):
output.append(nn_impl.l2_normalize(inp, dim=1))
return output
def training_graph(self):
"""Generate a training graph for kmeans algorithm.
Returns:
A tuple consisting of:
all_scores: A matrix (or list of matrices) of dimensions (num_input,
num_clusters) where the value is the distance of an input vector and a
cluster center.
cluster_idx: A vector (or list of vectors). Each element in the vector
corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
scores: Similar to cluster_idx but specifies the distance to the
assigned cluster instead.
training_op: an op that runs an iteration of training.
"""
# Implementation of kmeans.
inputs = self._inputs
cluster_centers_var, total_counts = self._init_clusters()
cluster_centers = cluster_centers_var
if self._distance_metric == COSINE_DISTANCE:
inputs = self._l2_normalize_data(inputs)
if not self._clusters_l2_normalized():
cluster_centers = nn_impl.l2_normalize(cluster_centers, dim=1)
all_scores, scores, cluster_idx = self._infer_graph(inputs, cluster_centers)
if self._use_mini_batch:
training_op = self._mini_batch_training_op(inputs, cluster_idx,
cluster_centers,
cluster_centers_var,
total_counts)
else:
assert cluster_centers == cluster_centers_var
training_op = self._full_batch_training_op(inputs, cluster_idx,
cluster_centers_var)
return all_scores, cluster_idx, scores, training_op
def _mini_batch_training_op(self, inputs, cluster_idx_list, cluster_centers,
cluster_centers_var, total_counts):
"""Creates an op for training for mini batch case.
Args:
inputs: list of input Tensors.
cluster_idx_list: A vector (or list of vectors). Each element in the
vector corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
cluster_centers: Tensor of cluster centers, possibly normalized.
cluster_centers_var: Tensor Ref of cluster centers.
total_counts: Tensor Ref of cluster counts.
Returns:
An op for doing an update of mini-batch k-means.
"""
update_ops = []
for inp, cluster_idx in zip(inputs, cluster_idx_list):
with ops.colocate_with(inp):
assert total_counts is not None
cluster_idx = array_ops.reshape(cluster_idx, [-1])
# Dedupe the unique ids of cluster_centers being updated so that updates
# can be locally aggregated.
unique_ids, unique_idx = array_ops.unique(cluster_idx)
num_unique_cluster_idx = array_ops.size(unique_ids)
# Fetch the old values of counts and cluster_centers.
with ops.colocate_with(total_counts):
old_counts = array_ops.gather(total_counts, unique_ids)
with ops.colocate_with(cluster_centers):
old_cluster_centers = array_ops.gather(cluster_centers, unique_ids)
# Locally aggregate the increment to counts.
count_updates = math_ops.unsorted_segment_sum(
array_ops.ones_like(
unique_idx, dtype=total_counts.dtype),
unique_idx,
num_unique_cluster_idx)
# Locally compute the sum of inputs mapped to each id.
# For a cluster with old cluster value x, old count n, and with data
# d_1,...d_k newly assigned to it, we recompute the new value as
# x += (sum_i(d_i) - k * x) / (n + k).
# Compute sum_i(d_i), see comment above.
cluster_center_updates = math_ops.unsorted_segment_sum(
inp, unique_idx, num_unique_cluster_idx)
# Shape to enable broadcasting count_updates and learning_rate to inp.
# It extends the shape with 1's to match the rank of inp.
broadcast_shape = array_ops.concat(
[
array_ops.reshape(num_unique_cluster_idx, [1]), array_ops.ones(
array_ops.reshape(array_ops.rank(inp) - 1, [1]),
dtype=dtypes.int32)
],
0)
# Subtract k * x, see comment above.
cluster_center_updates -= math_ops.cast(
array_ops.reshape(count_updates, broadcast_shape),
inp.dtype) * old_cluster_centers
learning_rate = math_ops.reciprocal(
math_ops.cast(old_counts + count_updates, inp.dtype))
learning_rate = array_ops.reshape(learning_rate, broadcast_shape)
# scale by 1 / (n + k), see comment above.
cluster_center_updates *= learning_rate
# Apply the updates.
update_counts = state_ops.scatter_add(total_counts, unique_ids,
count_updates)
update_cluster_centers = state_ops.scatter_add(cluster_centers_var,
unique_ids,
cluster_center_updates)
update_ops.extend([update_counts, update_cluster_centers])
return control_flow_ops.group(*update_ops)
def _full_batch_training_op(self, inputs, cluster_idx_list, cluster_centers):
"""Creates an op for training for full batch case.
Args:
inputs: list of input Tensors.
cluster_idx_list: A vector (or list of vectors). Each element in the
vector corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
cluster_centers: Tensor Ref of cluster centers.
Returns:
An op for doing an update of mini-batch k-means.
"""
cluster_sums = []
cluster_counts = []
epsilon = constant_op.constant(1e-6, dtype=inputs[0].dtype)
for inp, cluster_idx in zip(inputs, cluster_idx_list):
with ops.colocate_with(inp):
cluster_sums.append(
math_ops.unsorted_segment_sum(inp, cluster_idx, self._num_clusters))
cluster_counts.append(
math_ops.unsorted_segment_sum(
array_ops.reshape(
array_ops.ones(
array_ops.reshape(array_ops.shape(inp)[0], [-1])),
[-1, 1]), cluster_idx, self._num_clusters))
with ops.colocate_with(cluster_centers):
new_clusters_centers = math_ops.add_n(cluster_sums) / (math_ops.cast(
math_ops.add_n(cluster_counts), cluster_sums[0].dtype) + epsilon)
if self._clusters_l2_normalized():
new_clusters_centers = nn_impl.l2_normalize(new_clusters_centers, dim=1)
return state_ops.assign(cluster_centers, new_clusters_centers)
|
|
from datetime import timedelta
from django.conf import settings
from django.contrib.comments.models import Comment
from django.db import models
from django.db.models.signals import m2m_changed, post_save, pre_delete
from django.dispatch import receiver
from moderator.constants import CLASS_CHOICES
from likes.signals import object_liked
import secretballot
COMMENT_MAX_LENGTH = getattr(settings, 'COMMENT_MAX_LENGTH', 3000)
class CannedReply(models.Model):
comment = models.TextField(max_length=COMMENT_MAX_LENGTH)
site = models.ForeignKey(
'sites.Site',
blank=True,
null=True
)
class Meta:
verbose_name_plural = 'Canned replies'
def __unicode__(self):
return self.comment
class ClassifiedComment(models.Model):
comment = models.ForeignKey('comments.Comment')
cls = models.CharField(
'Class',
max_length=64,
choices=CLASS_CHOICES
)
class Meta:
ordering = ['-comment__submit_date', ]
def __unicode__(self):
return self.cls.title()
class CommentReply(models.Model):
user = models.ForeignKey(
'auth.User',
limit_choices_to={'is_staff': True}
)
canned_reply = models.ForeignKey(
'moderator.CannedReply',
help_text='Select a canned reply or otherwise enter '
'a custom comment below.',
blank=True,
null=True,
)
comment = models.TextField(
max_length=COMMENT_MAX_LENGTH,
help_text='Enter a custom comment (only used if no '
'canned reply is selected).',
blank=True,
null=True,
)
replied_to_comments = models.ManyToManyField(
'comments.Comment',
related_name='replied_to_comments_set',
help_text='Comment on which this reply applies.'
)
reply_comments = models.ManyToManyField(
'comments.Comment',
related_name='reply_comments_set',
blank=True,
null=True,
help_text='Generated reply comments.'
)
class Meta:
verbose_name_plural = 'Comment replies'
def __unicode__(self):
return "%s: %s..." % (
self.user.username,
self.comment[:50]
)
@property
def comment_text(self):
if self.canned_reply:
return self.canned_reply.comment
if self.comment:
return self.comment
# Proxy models for admin display.
class HamComment(Comment):
class Meta:
proxy = True
class ReportedComment(Comment):
class Meta:
proxy = True
class SpamComment(Comment):
class Meta:
proxy = True
class UnsureComment(Comment):
class Meta:
proxy = True
@receiver(pre_delete, sender=CommentReply)
def comment_reply_pre_delete_handler(sender, instance, **kwargs):
"""
Deletes all generated reply comments.
"""
instance.reply_comments.all().delete()
@receiver(m2m_changed, sender=CommentReply.replied_to_comments.through)
def comment_reply_post_create_handler(sender, instance, action, model, pk_set,
using, **kwargs):
if action == 'post_add':
for replied_to_comment in instance.replied_to_comments.all():
moderator_settings = getattr(settings, 'MODERATOR', None)
offset_timedelta = timedelta(seconds=1)
if moderator_settings:
if 'REPLY_BEFORE_COMMENT' in moderator_settings:
if moderator_settings['REPLY_BEFORE_COMMENT']:
offset_timedelta = timedelta(seconds=-1)
created = False
# We use try except DoesNotExist instead of get or create to
# allow us to add a is_reply_comment to a newly created comment
# which facilitates realtime_comment_classifier below to distinguish
# between normal comments and reply comments.
try:
comment_obj = Comment.objects.get(
content_type=replied_to_comment.content_type,
object_pk=replied_to_comment.object_pk,
site=replied_to_comment.site,
submit_date=replied_to_comment.submit_date + offset_timedelta,
user=instance.user,
)
except Comment.DoesNotExist:
comment_obj = Comment(
content_type=replied_to_comment.content_type,
object_pk=replied_to_comment.object_pk,
site=replied_to_comment.site,
submit_date=replied_to_comment.submit_date + offset_timedelta,
user=instance.user,
comment=instance.comment_text,
)
comment_obj.is_reply_comment = True
comment_obj.save()
created = True
if not created:
comment_obj.comment = instance.comment_text
comment_obj.save()
if comment_obj not in instance.reply_comments.all():
instance.reply_comments.add(comment_obj)
@receiver(post_save, sender=Comment)
def realtime_comment_classifier(sender, instance, created, **kwargs):
"""
Classifies a comment after it has been created.
This behaviour is configurable by the REALTIME_CLASSIFICATION MODERATOR,
default behaviour is to classify(True).
"""
# Only classify if newly created.
if created:
moderator_settings = getattr(settings, 'MODERATOR', None)
if moderator_settings:
if 'REALTIME_CLASSIFICATION' in moderator_settings:
if not moderator_settings['REALTIME_CLASSIFICATION']:
return
# Only classify if not a reply comment.
if not getattr(instance, 'is_reply_comment', False):
from moderator.utils import classify_comment
classify_comment(instance)
@receiver(object_liked)
def flag_reported_comments(instance, request, **kwargs):
if not getattr(instance, 'is_reply_comment', False) and\
isinstance(instance, Comment):
from moderator.tasks import flag_reported_comments_task
flag_reported_comments_task.delay(instance)
# Enable voting on Comments (for negative votes/reporting abuse).
secretballot.enable_voting_on(Comment, 'objects_with_votes')
|
|
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs SPEC CPU2006.
From the SPEC CPU2006 documentation:
"The SPEC CPU 2006 benchmark is SPEC's next-generation, industry-standardized,
CPU-intensive benchmark suite, stressing a system's processor, memory subsystem
and compiler."
SPEC CPU2006 homepage: http://www.spec.org/cpu2006/
"""
import itertools
import logging
import os
import posixpath
import re
import tarfile
from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import stages
FLAGS = flags.FLAGS
_SPECINT_BENCHMARKS = frozenset([
'perlbench', 'bzip2', 'gcc', 'mcf', 'gobmk', 'hmmer', 'sjeng',
'libquantum', 'h264ref', 'omnetpp', 'astar', 'xalancbmk'])
_SPECFP_BENCHMARKS = frozenset([
'bwaves', 'gamess', 'milc', 'zeusmp', 'gromacs', 'cactusADM',
'leslie3d', 'namd', 'dealII', 'soplex', 'povray', 'calculix',
'GemsFDTD', 'tonto', 'lbm', 'wrf', 'sphinx3'])
_SPECCPU_SUBSETS = frozenset(['int', 'fp', 'all'])
flags.DEFINE_enum(
'benchmark_subset', 'int',
_SPECFP_BENCHMARKS | _SPECINT_BENCHMARKS | _SPECCPU_SUBSETS,
'Used by the PKB speccpu2006 benchmark. Specifies a subset of SPEC CPU2006 '
'benchmarks to run.')
flags.DEFINE_string(
'runspec_config', 'linux64-x64-gcc47.cfg',
'Used by the PKB speccpu2006 benchmark. Name of the cfg file to use as the '
'SPEC CPU2006 config file provided to the runspec binary via its --config '
'flag. If the benchmark is run using the cpu2006-1.2.iso file, then the '
'cfg file must be placed in the local PKB data directory and will be '
'copied to the remote machine prior to executing runspec. See README.md '
'for instructions if running with a repackaged cpu2006v1.2.tgz file.')
flags.DEFINE_integer(
'runspec_iterations', 3,
'Used by the PKB speccpu2006 benchmark. The number of benchmark iterations '
'to execute, provided to the runspec binary via its --iterations flag.')
flags.DEFINE_string(
'runspec_define', '',
'Used by the PKB speccpu2006 benchmark. Optional comma-separated list of '
'SYMBOL[=VALUE] preprocessor macros provided to the runspec binary via '
'repeated --define flags. Example: numa,smt,sse=SSE4.2')
flags.DEFINE_boolean(
'runspec_enable_32bit', False,
'Used by the PKB speccpu2006 benchmark. If set, multilib packages will be '
'installed on the remote machine to enable use of 32-bit SPEC CPU2006 '
'binaries. This may be useful when running on memory-constrained instance '
'types (i.e. less than 2 GiB memory/core), where 64-bit execution may be '
'problematic.')
flags.DEFINE_boolean(
'runspec_keep_partial_results', False,
'Used by the PKB speccpu2006 benchmark. If set, the benchmark will report '
'an aggregate score even if some of the SPEC CPU2006 component tests '
'failed with status "NR". Available results will be saved, and PKB samples '
'will be marked with a metadata value of partial=true. If unset, partial '
'failures are treated as errors.')
BENCHMARK_NAME = 'speccpu2006'
BENCHMARK_CONFIG = """
speccpu2006:
description: Runs SPEC CPU2006
vm_groups:
default:
vm_spec: *default_single_core
disk_spec: *default_500_gb
"""
_BENCHMARK_SPECIFIC_VM_STATE_ATTR = 'speccpu2006_vm_state'
_MOUNT_DIR = 'cpu2006_mnt'
_SPECCPU2006_DIR = 'cpu2006'
_SPECCPU2006_ISO = 'cpu2006-1.2.iso'
_SPECCPU2006_TAR = 'cpu2006v1.2.tgz'
_TAR_REQUIRED_MEMBERS = 'cpu2006', 'cpu2006/bin/runspec'
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def CheckPrerequisites():
"""Verifies that the required input files are present."""
try:
# Peeking into the tar file is slow. If running in stages, it's
# reasonable to do this only once and assume that the contents of the
# tar file will not change between stages.
_CheckTarFile(FLAGS.runspec_config,
examine_members=stages.PROVISION in FLAGS.run_stage)
except data.ResourceNotFound:
_CheckIsoAndCfgFile(FLAGS.runspec_config)
def _CheckTarFile(runspec_config, examine_members):
"""Searches for the tar file and performs preliminary checks on its format.
Args:
runspec_config: string. User-specified name of the config file that is
expected to be in the tar file.
examine_members: boolean. If True, this function will examine the tar file's
members to verify that certain required members are present.
Raises:
data.ResourcePath: If the tar file cannot be found.
errors.Benchmarks.PrepareException: If the tar file does not contain a
required member.
errors.Config.InvalidValue: If the tar file is found, and runspec_config is
not a valid file name.
"""
tar_file_path = data.ResourcePath(_SPECCPU2006_TAR)
logging.info('Found tar file at %s. Skipping search for %s.', tar_file_path,
_SPECCPU2006_ISO)
if posixpath.basename(runspec_config) != runspec_config:
raise errors.Config.InvalidValue(
'Invalid runspec_config value: {0}{1}When running speccpu2006 with a '
'tar file, runspec_config cannot specify a file in a sub-directory. '
'See README.md for information about running speccpu2006 with a tar '
'file.'.format(runspec_config, os.linesep))
if not examine_members:
return
with tarfile.open(tar_file_path, 'r') as tf:
members = tf.getnames()
cfg_member = 'cpu2006/config/{0}'.format(runspec_config)
required_members = itertools.chain(_TAR_REQUIRED_MEMBERS, [cfg_member])
missing_members = set(required_members).difference(members)
if missing_members:
raise errors.Benchmarks.PrepareException(
'The following files were not found within {tar}:{linesep}{members}'
'{linesep}This is an indication that the tar file is formatted '
'incorrectly. See README.md for information about the expected format '
'of the tar file.'.format(
linesep=os.linesep, tar=tar_file_path,
members=os.linesep.join(sorted(missing_members))))
def _CheckIsoAndCfgFile(runspec_config):
"""Searches for the iso file and cfg file.
Args:
runspec_config: string. Name of the config file to provide to runspec.
Raises:
data.ResourcePath: If one of the required files could not be found.
"""
# Search for the iso.
try:
data.ResourcePath(_SPECCPU2006_ISO)
except data.ResourceNotFound:
logging.error(
'%(iso)s not found. To run the speccpu2006 benchmark, %(iso)s must be '
'in the perfkitbenchmarker/data directory (or one of the specified '
'data directories if the --data_search_paths flag is used). Visit '
'https://www.spec.org/cpu2006/ to learn more about purchasing %(iso)s.',
{'iso': _SPECCPU2006_ISO})
raise
# Search for the cfg.
try:
data.ResourcePath(runspec_config)
except data.ResourceNotFound:
logging.error(
'%s not found. To run the speccpu2006 benchmark, the config file '
'specified by the --runspec_config flag must be in the '
'perfkitbenchmarker/data directory (or one of the specified data '
'directories if the --data_search_paths flag is used). Visit '
'https://www.spec.org/cpu2006/docs/runspec.html#about_config to learn '
'more about config files.', runspec_config)
raise
class _SpecCpu2006SpecificState(object):
"""State specific to this benchmark that must be preserved between PKB stages.
An instance of this class is attached to the VM as an attribute and is
therefore preserved as part of the pickled BenchmarkSpec between PKB stages.
Each attribute represents a possible file or directory that may be created on
the remote machine as part of running the benchmark.
Attributes:
cfg_file_path: Optional string. Path of the cfg file on the remote machine.
iso_file_path: Optional string. Path of the iso file on the remote machine.
mount_dir: Optional string. Path where the iso file is mounted on the
remote machine.
spec_dir: Optional string. Path of a created directory on the remote machine
where the SPEC files are stored.
tar_file_path: Optional string. Path of the tar file on the remote machine.
"""
def __init__(self):
self.cfg_file_path = None
self.iso_file_path = None
self.mount_dir = None
self.spec_dir = None
self.tar_file_path = None
def Prepare(benchmark_spec):
"""Installs SPEC CPU2006 on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vm = benchmark_spec.vms[0]
speccpu_vm_state = _SpecCpu2006SpecificState()
setattr(vm, _BENCHMARK_SPECIFIC_VM_STATE_ATTR, speccpu_vm_state)
vm.Install('wget')
vm.Install('build_tools')
vm.Install('fortran')
if FLAGS.runspec_enable_32bit:
vm.Install('multilib')
vm.Install('numactl')
scratch_dir = vm.GetScratchDir()
vm.RemoteCommand('chmod 777 {0}'.format(scratch_dir))
speccpu_vm_state.spec_dir = posixpath.join(scratch_dir, _SPECCPU2006_DIR)
try:
_PrepareWithTarFile(vm, speccpu_vm_state)
except data.ResourceNotFound:
_PrepareWithIsoFile(vm, speccpu_vm_state)
def _PrepareWithTarFile(vm, speccpu_vm_state):
"""Prepares the VM to run using the tar file.
Args:
vm: BaseVirtualMachine. Recipient of the tar file.
speccpu_vm_state: _SpecCpu2006SpecificState. Modified by this function to
reflect any changes to the VM that may need to be cleaned up.
"""
scratch_dir = vm.GetScratchDir()
local_tar_file_path = data.ResourcePath(_SPECCPU2006_TAR)
speccpu_vm_state.tar_file_path = posixpath.join(scratch_dir, _SPECCPU2006_TAR)
vm.PushFile(local_tar_file_path, scratch_dir)
vm.RemoteCommand('cd {dir} && tar xvfz {tar}'.format(dir=scratch_dir,
tar=_SPECCPU2006_TAR))
speccpu_vm_state.cfg_file_path = posixpath.join(
speccpu_vm_state.spec_dir, 'config', FLAGS.runspec_config)
def _PrepareWithIsoFile(vm, speccpu_vm_state):
"""Prepares the VM to run using the iso file.
Copies the iso to the VM, mounts it, and extracts the contents. Copies the
config file to the VM. Runs the SPEC install.sh script on the VM.
Args:
vm: BaseVirtualMachine. Recipient of the iso file.
speccpu_vm_state: _SpecCpu2006SpecificState. Modified by this function to
reflect any changes to the VM that may need to be cleaned up.
"""
scratch_dir = vm.GetScratchDir()
# Make cpu2006 directory on the VM.
vm.RemoteCommand('mkdir {0}'.format(speccpu_vm_state.spec_dir))
# Copy the iso to the VM.
local_iso_file_path = data.ResourcePath(_SPECCPU2006_ISO)
speccpu_vm_state.iso_file_path = posixpath.join(scratch_dir, _SPECCPU2006_ISO)
vm.PushFile(local_iso_file_path, scratch_dir)
# Extract files from the iso to the cpu2006 directory.
speccpu_vm_state.mount_dir = posixpath.join(scratch_dir, _MOUNT_DIR)
vm.RemoteCommand('mkdir {0}'.format(speccpu_vm_state.mount_dir))
vm.RemoteCommand('sudo mount -t iso9660 -o loop {0} {1}'.format(
speccpu_vm_state.iso_file_path, speccpu_vm_state.mount_dir))
vm.RemoteCommand('cp -r {0}/* {1}'.format(speccpu_vm_state.mount_dir,
speccpu_vm_state.spec_dir))
vm.RemoteCommand('chmod -R 777 {0}'.format(speccpu_vm_state.spec_dir))
# Copy the cfg to the VM.
local_cfg_file_path = data.ResourcePath(FLAGS.runspec_config)
cfg_file_name = os.path.basename(local_cfg_file_path)
speccpu_vm_state.cfg_file_path = posixpath.join(
speccpu_vm_state.spec_dir, 'config', cfg_file_name)
vm.PushFile(local_cfg_file_path, speccpu_vm_state.cfg_file_path)
# Run SPEC CPU2006 installation.
install_script_path = posixpath.join(speccpu_vm_state.spec_dir, 'install.sh')
vm.RobustRemoteCommand('yes | {0}'.format(install_script_path))
def _ExtractScore(stdout, vm, keep_partial_results):
"""Extracts the SPEC(int|fp) score from stdout.
Args:
stdout: stdout from running RemoteCommand.
vm: The vm instance where SPEC CPU2006 was run.
keep_partial_results: A boolean indicating whether partial results should
be extracted in the event that not all benchmarks were successfully
run. See the "runspec_keep_partial_results" flag for more info.
Sample input for SPECint:
...
...
=============================================
400.perlbench 9770 417 23.4 *
401.bzip2 9650 565 17.1 *
403.gcc 8050 364 22.1 *
429.mcf 9120 364 25.1 *
445.gobmk 10490 499 21.0 *
456.hmmer 9330 491 19.0 *
458.sjeng 12100 588 20.6 *
462.libquantum 20720 468 44.2 *
464.h264ref 22130 700 31.6 *
471.omnetpp 6250 349 17.9 *
473.astar 7020 482 14.6 *
483.xalancbmk 6900 248 27.8 *
Est. SPECint(R)_base2006 22.7
Sample input for SPECfp:
...
...
=============================================
410.bwaves 13590 717 19.0 *
416.gamess 19580 923 21.2 *
433.milc 9180 480 19.1 *
434.zeusmp 9100 600 15.2 *
435.gromacs 7140 605 11.8 *
436.cactusADM 11950 1289 9.27 *
437.leslie3d 9400 859 10.9 *
444.namd 8020 504 15.9 *
447.dealII 11440 409 28.0 *
450.soplex 8340 272 30.6 *
453.povray 5320 231 23.0 *
454.calculix 8250 993 8.31 *
459.GemsFDTD 10610 775 13.7 *
465.tonto 9840 565 17.4 *
470.lbm 13740 365 37.7 *
481.wrf 11170 788 14.2 *
482.sphinx3 19490 668 29.2 *
Est. SPECfp(R)_base2006 17.5
Returns:
A list of sample.Sample objects.
"""
results = []
re_begin_section = re.compile('^={1,}')
re_end_section = re.compile(r'Est. (SPEC.*_base2006)\s*(\S*)')
result_section = []
in_result_section = False
# Extract the summary section
for line in stdout.splitlines():
if in_result_section:
result_section.append(line)
# search for begin of result section
match = re.search(re_begin_section, line)
if match:
assert not in_result_section
in_result_section = True
continue
# search for end of result section
match = re.search(re_end_section, line)
if match:
assert in_result_section
spec_name = str(match.group(1))
try:
spec_score = float(match.group(2))
except ValueError:
# Partial results may get reported as '--' instead of a number.
spec_score = None
in_result_section = False
# remove the final SPEC(int|fp) score, which has only 2 columns.
result_section.pop()
metadata = {'num_cpus': vm.num_cpus,
'runspec_config': FLAGS.runspec_config,
'runspec_iterations': str(FLAGS.runspec_iterations),
'runspec_enable_32bit': str(FLAGS.runspec_enable_32bit),
'runspec_define': FLAGS.runspec_define}
metadata.update(vm.GetMachineTypeDict())
missing_results = []
for benchmark in result_section:
# Skip over failed runs, but count them since they make the overall
# result invalid.
if 'NR' in benchmark:
logging.warning('SPEC CPU2006 missing result: %s', benchmark)
missing_results.append(str(benchmark.split()[0]))
continue
# name, ref_time, time, score, misc
name, _, _, score, _ = benchmark.split()
results.append(sample.Sample(str(name), float(score), '', metadata))
if spec_score is None:
missing_results.append(spec_name)
if missing_results:
if keep_partial_results:
metadata['partial'] = 'true'
metadata['missing_results'] = ','.join(missing_results)
else:
raise errors.Benchmarks.RunError(
'speccpu2006: results missing, see log: ' + ','.join(missing_results))
if spec_score is not None:
results.append(sample.Sample(spec_name, spec_score, '', metadata))
return results
def _ParseOutput(vm, spec_dir):
"""Retrieves the SPEC CPU2006 output from the VM and parses it.
Args:
vm: The vm instance where SPEC CPU2006 was run.
spec_dir: string. Path of the directory on the remote machine where the SPEC
files, including binaries and logs, are located.
Returns:
A list of samples to be published (in the same format as Run() returns).
"""
results = []
log_files = []
# FIXME(liquncheng): Only reference runs generate SPEC scores. The log
# id is hardcoded as 001, which might change with different runspec
# parameters. SPEC CPU2006 will generate different logs for build, test
# run, training run and ref run.
if FLAGS.benchmark_subset in _SPECINT_BENCHMARKS | set(['int', 'all']):
log_files.append('CINT2006.001.ref.txt')
if FLAGS.benchmark_subset in _SPECFP_BENCHMARKS | set(['fp', 'all']):
log_files.append('CFP2006.001.ref.txt')
for log in log_files:
stdout, _ = vm.RemoteCommand('cat %s/result/%s' % (spec_dir, log),
should_log=True)
results.extend(_ExtractScore(
stdout, vm, FLAGS.runspec_keep_partial_results or (
FLAGS.benchmark_subset not in _SPECCPU_SUBSETS)))
return results
def Run(benchmark_spec):
"""Runs SPEC CPU2006 on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
vm = benchmark_spec.vms[0]
speccpu_vm_state = getattr(vm, _BENCHMARK_SPECIFIC_VM_STATE_ATTR)
num_cpus = vm.num_cpus
runspec_flags = [
('config', posixpath.basename(speccpu_vm_state.cfg_file_path)),
('tune', 'base'), ('size', 'ref'), ('rate', num_cpus),
('iterations', FLAGS.runspec_iterations)]
if FLAGS.runspec_define:
for runspec_define in FLAGS.runspec_define.split(','):
runspec_flags.append(('define', runspec_define))
runspec_cmd = 'runspec --noreportable {flags} {subset}'.format(
flags=' '.join('--{0}={1}'.format(k, v) for k, v in runspec_flags),
subset=FLAGS.benchmark_subset)
cmd = ' && '.join((
'cd {0}'.format(speccpu_vm_state.spec_dir), '. ./shrc', './bin/relocate',
'. ./shrc', 'rm -rf result', runspec_cmd))
vm.RobustRemoteCommand(cmd)
logging.info('SPEC CPU2006 Results:')
return _ParseOutput(vm, speccpu_vm_state.spec_dir)
def Cleanup(benchmark_spec):
"""Cleans up SPEC CPU2006 from the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vm = benchmark_spec.vms[0]
speccpu_vm_state = getattr(vm, _BENCHMARK_SPECIFIC_VM_STATE_ATTR, None)
if speccpu_vm_state:
if speccpu_vm_state.mount_dir:
try:
vm.RemoteCommand('sudo umount {0}'.format(speccpu_vm_state.mount_dir))
except errors.VirtualMachine.RemoteCommandError:
# Even if umount failed, continue to clean up.
logging.exception('umount failed.')
targets = ' '.join(p for p in speccpu_vm_state.__dict__.values() if p)
vm.RemoteCommand('rm -rf {0}'.format(targets))
|
|
#!/usr/bin/env python2.7
#
# Copyright 2017 Google Inc.
# Copyright 2019 Open GEE Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Supports web interface for cutting a globe based on a specified polygon.
Ajax calls are made for the individual steps so that the user can
get feedback on progress of process.
TODO: May be necessary to add a status call for longer processes
TODO: on bigger globes. Can use size of directory as a metric.
"""
import cgi
import os
import re
import shutil
import sys
import time
import ssl
import urllib
import urllib2
from contextlib import closing
import defusedxml.ElementTree as etree
import xml.etree.ElementTree as etree2
from common import form_wrap
from common import postgres_manager_wrap
import common.utils
from core import globe_cutter
import common.configs
CONFIG_FILE = "/opt/google/gehttpd/cgi-bin/advanced_cutter.cfg"
CONFIGS = common.configs.Configs(CONFIG_FILE)
COMMAND_DIR = "/opt/google/bin"
WEB_URL_BASE = "/cutter/globes"
WEB_DIR = "/opt/google/gehttpd/htdocs%s" % WEB_URL_BASE
TEMPLATE_DIR = "/opt/google/gehttpd/htdocs/cutter/template"
MAP_FILE_TEMPLATE = "%s/%%s.glm" % WEB_DIR
GLOBE_FILE_TEMPLATE = "%s/%%s.glb" % WEB_DIR
GLOBE_NAME_TEMPLATE = "%s_%s"
# Default values if no environment variables are set.
DEFAULT_PORTABLE_PORT = "9335"
DEFAULT_PORTABLE_SERVER = "localhost"
DEFAULT_PORTABLE_PREFIX = "http"
DEFAULT_SOURCE_GLOBE = ""
# Should be relative address, so appended to target base
# address (publish point) from which dbRoot was fetched.
DEFAULT_SEARCH_SERVICE = "Portable3dPoiSearch"
# Names of environment variables.
PORTABLE_SERVER_PARAM = "FORCE_PORTABLE_SERVER"
PORTABLE_PORT_PARAM = "FORCE_PORTABLE_PORT"
PORTABLE_PREFIX_PARAM = "FORCE_PORTABLE_PREFIX"
SEARCH_SERVICE_PARAM = "FORCE_SEARCH_SERVICE"
KML_SERVER_PARAM = "FORCE_KML_SERVER"
KML_PORT_PARAM = "FORCE_KML_PORT"
SOURCE_GLOBE_PARAM = "FORCE_SOURCE_GLOBE"
PORTABLE_TMP_PARAM = "FORCE_PORTABLE_TMP"
FORM = form_wrap.FormWrap(cgi.FieldStorage(), do_sanitize=True)
TMP_DIR = FORM.getvalue_path(PORTABLE_TMP_PARAM)
if not TMP_DIR:
TMP_DIR = WEB_DIR
BASE_DIR = "%s/.globe_builder" % TMP_DIR
GLOBE_ENV_DIR_TEMPLATE = "%s/%%s_%%s" % BASE_DIR
GLOBE_FINAL_ENV_DIR_TEMPLATE = "%s/%%s_env" % BASE_DIR
LOG_FILE = "%s/log" % GLOBE_ENV_DIR_TEMPLATE
GLOBE_DIR_TEMPLATE = "%s/%%s" % GLOBE_ENV_DIR_TEMPLATE
ICONS_DIR_TEMPLATE = "%s/%%s/icons" % GLOBE_ENV_DIR_TEMPLATE
PLUGIN_DIR_TEMPLATE = "%s/%%s/earth" % GLOBE_ENV_DIR_TEMPLATE
MAPS_DIR_TEMPLATE = "%s/%%s/maps" % GLOBE_ENV_DIR_TEMPLATE
JSON_EARTH_FILE_TEMPLATE = "%s/%%s/earth/earth.json" % GLOBE_ENV_DIR_TEMPLATE
JSON_MAP_FILE_TEMPLATE = "%s/%%s/maps/map.json" % GLOBE_ENV_DIR_TEMPLATE
INFO_FILE_TEMPLATE = "%s/%%s/earth/info.txt" % GLOBE_ENV_DIR_TEMPLATE
JS_DIR_TEMPLATE = "%s/%%s/js" % GLOBE_ENV_DIR_TEMPLATE
KML_MAP_FILE_TEMPLATE = "%s/kml_map.txt" % GLOBE_ENV_DIR_TEMPLATE
KML_DIR_TEMPLATE = "%s/%%s/kml" % GLOBE_ENV_DIR_TEMPLATE
ICONS_DIR_TEMPLATE = "%s/%%s/icons" % GLOBE_ENV_DIR_TEMPLATE
SEARCH_DIR_TEMPLATE = "%s/%%s/search_db" % GLOBE_ENV_DIR_TEMPLATE
DBROOT_FILE_TEMPLATE = "%s/dbroot.v5" % GLOBE_ENV_DIR_TEMPLATE
DBROOT_DIR_TEMPLATE = "%s/%%s/dbroot" % GLOBE_ENV_DIR_TEMPLATE
DBROOT_FILE2_TEMPLATE = "%s/%%s/dbroot/dbroot_%%s_%%s" % GLOBE_ENV_DIR_TEMPLATE
POLYGON_FILE_TEMPLATE = "%s/%%s/earth/polygon.kml" % GLOBE_ENV_DIR_TEMPLATE
PACKET_INFO_TEMPLATE = "%s/packet_info.txt" % GLOBE_ENV_DIR_TEMPLATE
QTNODES_FILE_TEMPLATE = "%s/qt_nodes.txt" % GLOBE_ENV_DIR_TEMPLATE
METADATA_FILE_TEMPLATE = "%s/%%s/earth/metadata.json" % GLOBE_ENV_DIR_TEMPLATE
# Disk space minimum in MB before we start sending warnings.
DISK_SPACE_WARNING_THRESHOLD = 1000.0
class OsCommandError(Exception):
"""Thrown if os command fails."""
pass
class DiskFullError(Exception):
"""Thrown if disk partition is too full."""
pass
class GlobeBuilder(object):
"""Class that implements all commands for cutting a globe."""
@staticmethod
def Query(db, query, parameters=None):
"""Submits the query to the database and returns tuples.
Args:
db: The database being queried.
query: SQL SELECT statement.
parameters: sequence of parameters to populate placeholders in SQL
statement.
Returns:
Results as list of lists (rows of fields).
"""
return postgres_manager_wrap.PostgresManagerWrap.Query(
db, query, parameters)
@staticmethod
def TableColumns(db, table):
"""Returns list of column names for the given table in the given db."""
query = ("SELECT column_name FROM INFORMATION_SCHEMA.columns "
"WHERE table_name=%s")
return GlobeBuilder.Query(db, query, (table,))
@staticmethod
def PrintTable(column_names, data):
"""Save table data to a file."""
fix_geom = False
# Write out the column names.
# Change the_geom column to lat and lon columns.
if column_names[-1] == "the_geom":
column_names[-1] = "lon"
column_names.append("lat")
fix_geom = True
print "\t".join(column_names)
# Write out each row of data.
# Convert POINT() data to a lat and lon column.
for row in data:
if fix_geom:
out_row = list(row[:-1])
point = row[-1]
lonlat = point[6:-1].split(" ")
out_row.extend(lonlat)
print "\t".join(out_row)
else:
print "\t".join(row)
@staticmethod
def Status(message):
"""Outputs a status message."""
print "<br>%s" % common.utils.HtmlEscape(message)
@staticmethod
def StatusWarning(message):
"""Outputs a status message."""
print ("<br><span class='text_status_warn'>%s</span>" %
common.utils.HtmlEscape(message))
def AddGlobeDirectory(self, description):
"""Add directory where globe will be built."""
# Add early for info file.
try:
os.makedirs(self.plugin_dir)
except os.error:
pass # Directory may already exist
try:
os.makedirs(self.dbroot_dir)
except os.error:
pass # Directory may already exist
self.CreateInfoFile()
if description:
self.AppendInfoFile("Globe description: %s" % description)
self.Status("Description: %s" % description)
else:
self.Status("No description given.")
self.logger.Log("Added globe directory: %s" % self.globe_dir)
self.Status("Added globe directory: %s" % self.globe_dir)
def SavePolygon(self, polygon):
"""Save polygon kml to a file."""
with open(self.polygon_file, "w") as fp:
if polygon:
# Check XML validity and standardize representation
xml = etree2.ElementTree(etree.fromstring(polygon))
xml.write(fp, xml_declaration=True, encoding='UTF-8')
self.Status("Saved polygon to %s" % self.polygon_file)
else:
self.Status("Created empty polygon file %s" % self.polygon_file)
def ConvertPolygonToQtNodes(self, polygon_level, is_mercator=False):
"""Convert polygon into a set of qt nodes at given level."""
self.Status("Convert polygon to quadtree nodes ...")
try:
os.remove(self.qtnodes_file)
except OSError:
pass # Ok, if file isn't there.
os_cmd = ("%s/gepolygontoqtnodes --qt_nodes_file=\"%s\" "
"--kml_polygon_file=\"%s\" --max_level=%d"
% (COMMAND_DIR, self.qtnodes_file, self.polygon_file,
polygon_level))
if is_mercator:
os_cmd += " --mercator"
common.utils.ExecuteCmd(os_cmd, self.logger)
fp = open(self.qtnodes_file)
self.Status("%d qtnodes" % len(fp.readlines()))
fp.close()
@staticmethod
def ConvertPolygonToPsql(polygon):
"""Convert polygon kml into PostGIS polygon text representation.
Args:
polygon: polygon in kml format.
Returns:
PostGIS polygon text representation string.
"""
if not polygon:
return ""
parsed_polygon = polygon[polygon.find("<Polygon>"):
polygon.find("</Polygon>")].strip()
parsed_polygon = (parsed_polygon[parsed_polygon.find("<coordinates>")
+ len("<coordinates>"):
parsed_polygon.find("</coordinates>")])
parsed_polygon = parsed_polygon.strip()
coordinates = parsed_polygon.split(" ")
if len(coordinates) < 4:
return ""
# Note: item of coordinates-list may have 2 or 3 coordinates.
postgis_polygon = ", ".join(
["%s %s" % tuple(coord.split(",")[:2]) for coord in coordinates])
postgis_polygon = "POLYGON((%s))" % postgis_polygon
return postgis_polygon
def RewriteDbRoot(self, source, include_historical):
"""Executes command to rewrite the dbroot and extract the icons it uses."""
self.Status("Rewrite dbroot ...")
historical_flag = '--disable_historical'
if include_historical:
historical_flag = ''
os_cmd = ("%s/gerewritedbroot --source=\"%s\" --icon_directory=\"%s\" "
"--dbroot_file=\"%s\" --search_service=\"%s\" "
"--kml_map_file=\"%s\" "
"%s"
% (COMMAND_DIR, source, self.icons_dir, self.dbroot_file,
self.search_service, self.kml_map_file,
historical_flag))
common.utils.ExecuteCmd(os_cmd, self.logger)
self.Status("%d icons" % len(os.listdir(self.icons_dir)))
os_cmd = ("cp \"%s\" \"%s\""
% (self.dbroot_file, self.dbroot_file2))
common.utils.ExecuteCmd(os_cmd, self.logger)
def GrabKml(self, source):
"""Recursively grabs all kml files referenced in the dbroot."""
self.Status("Grab kml files ...")
os_cmd = (("%s/gekmlgrabber --kml_map_file=\"%s\" --output_directory=\"%s\""
" --source=\"%s\"")
% (COMMAND_DIR, self.kml_map_file, self.kml_dir, source))
common.utils.ExecuteCmd(os_cmd, self.logger)
self.Status("%d kml files" % len(os.listdir(self.kml_dir)))
def BuildGlobe(self, source, default_level, max_level):
"""Executes command to cut globe and save data into packet bundles."""
self.Status("Build globe ...")
# Run this task as a background task.
os_cmd = ("%s/geportableglobebuilder --source=\"%s\" --default_level=%d "
"--max_level=%d --hires_qt_nodes_file=\"%s\" "
"--globe_directory=\"%s\" --dbroot_file=\"%s\" --metadata_file=\"%s\" >\"%s\""
% (COMMAND_DIR, source, default_level, max_level,
self.qtnodes_file, self.globe_dir, self.dbroot_file,
self.metadata_file,
self.packet_info_file))
common.utils.ExecuteCmdInBackground(os_cmd, self.logger)
def BuildMap(self, source, default_level, max_level, ignore_imagery_depth):
"""Executes command to cut map and save data into packet bundles."""
self.Status("Build map ...")
ignore_imagery_depth_str = str()
if ignore_imagery_depth:
ignore_imagery_depth_str = "--ignore_imagery_depth"
# Run this task as a background task.
# Having trouble with permissions if output is redirected to a file.
os_cmd = ("%s/geportablemapbuilder "
"%s "
"--source=\"%s\" "
"--hires_qt_nodes_file=\"%s\" "
"--map_directory=\"%s\" --default_level=%d --max_level=%d "
"--metadata_file=\"%s\" "
% (COMMAND_DIR, ignore_imagery_depth_str, source,
self.qtnodes_file, self.globe_dir, default_level,
max_level, self.metadata_file))
common.utils.ExecuteCmdInBackground(os_cmd, self.logger)
# TODO: Get real packet numbers for imagery and vectors.
fp = open(self.packet_info_file, "w")
fp.write("1 0 0")
fp.close()
def CreateInfoFile(self):
"""Create globe info file."""
fp = open(self.info_file, "w")
fp.write("Portable Globe\n")
fp.write("Copyright 2017 Google Inc.\nLicensed under the Apache License, Version 2.0.\n")
fp.write("portable %s:%s\n" % (self.portable_server, self.portable_port))
fp.write("search %s\n" % (self.search_service))
fp.write("kml %s:%s\n" % (self.kml_server, self.kml_port))
fp.write("%s\n" % common.utils.GmTimeStamp())
fp.close()
def AppendInfoFile(self, message):
"""Create globe info file."""
self.logger.Log(message)
fp = open(self.info_file, "a")
fp.write(common.utils.TimeStamp())
fp.write("%s\n\n" % message)
fp.close()
def HttpGet(self, url):
"""Make an HTTP or HTTPS request with a context for checking or ignoring
certificate errors depending on Config settings.
"""
context = None
response_data = ""
http_status_code = 0
try:
# TODO: When Python 2.7 is used on Centos6, this if version<=2.6 block can be removed
# and the 'else' ssl.SSLContext based block can be used instead.
if sys.version_info[0] == 2 and sys.version_info[1] <= 6:
with closing(urllib2.urlopen(url)) as fp:
http_status_code = fp.getcode()
response_data = fp.read()
else:
# Set the context based on cert requirements
if CONFIGS.GetBool("VALIDATE_CERTIFICATE"):
cert_file = CONFIGS.GetStr("CERTIFICATE_CHAIN_PATH")
key_file = CONFIGS.GetStr("CERTIFICATE_KEY_PATH")
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.load_cert_chain(cert_file, keyfile=key_file)
else:
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with closing(urllib2.urlopen(url)) as fp:
http_status_code = fp.getcode()
response_data = fp.read()
except:
GlobeBuilder.StatusWarning("FAILED: Caught exception reading {0}".format(url))
raise
return (response_data, http_status_code)
def AddJsonFile(self, source, is_map, portable_server, json_file):
"""Get JSON from server and add it to the portable globe plugin files.
Args:
source: server URL.
is_map: whether it is a 2d map server/database:
true - 2d map, false - 3d globe.
portable_server: portable server URL.
json_file: path to json file to write in.
Returns:
updated {maps,earth}.json content.
"""
# Get JSON from the server.
url = "%s/query?request=Json&var=geeServerDefs" % source
self.Status("Rewrite JSON from: %s to: %s" % (url, json_file))
json, code = self.HttpGet(url)
if (code != 200):
raise Exception("GET {0} failed with {1}".format(url, code))
# Replace all urls to point at the portable server.
start = 0
new_json = ""
is_altered = False
for match in re.finditer(r"([\w\-]+)\s*:\s*\"http[s]?:"
"//[\w\-\.]+(:\d+)?/([^/\n]+)([^\n\"]*)\"",
json, 0):
spans = match.span()
new_json += json[start:spans[0]]
new_json += ("%s : \"%s%s\"" %
(match.groups()[0], portable_server, match.groups()[3]))
start = spans[1]
is_altered = True
new_json += json[start:]
json = new_json
# Replace the serverUrl parameter with the plain Portable Server url.
# This allows simple replacement by the Portable Server depending on
# the context.
new_json = ""
start = 0
for match in re.finditer(r"serverUrl\s*:\s*\"[^\n\"]*\"", json, 0):
spans = match.span()
new_json += json[start:spans[0]]
new_json += ("serverUrl : \"%s\"" % portable_server)
start = spans[1]
is_altered = True
break
new_json += json[start:]
if is_map:
if new_json.find("ImageryMapsMercator") >= 0:
# MotF is not supported in Portable Server, and we don't need it
# since we are getting the tiles already converted to the Mercator
# projection.
new_json = new_json.replace("ImageryMapsMercator", "ImageryMaps")
is_altered = True
# Get search tabs from server and insert into ServerDefs.
search_tabs = globe_cutter.GlobeCutter.GetSearchTabs(source)
if search_tabs:
combined_json = globe_cutter.GlobeCutter.AddSearchTabsToServerDefs(
new_json, search_tabs)
if combined_json:
new_json = combined_json
is_altered = True
if not is_altered:
print "Json has not been altered."
# Write modified JSON to portable server file.
fp = open(json_file, "w")
fp.write(new_json)
fp.close()
return new_json
def AddJsonIcons(self, source, json):
"""Get icons from JSON and add them to the map."""
# Add icon directory if needed.
try:
os.makedirs(self.icons_dir)
except os.error:
pass # Directory may already exist
# Get all of the icons from the json, ignoring duplicates.
icons = {}
for match in re.finditer("icon\s*:\s*\"icons/(.*)\"",
json, 0):
icons[match.groups()[0]] = True
for icon in icons.iterkeys():
# Get JSON from the server.
url = "%s/query?request=Icon&icon_path=icons/%s" % (source, icon)
try:
data, code = self.HttpGet(url)
if code == 200:
with open("%s/%s" % (self.icons_dir, icon), "w") as fpw:
fpw.write(data)
else:
raise Exception("Cannot fetch {0}".format(url))
except:
self.Status("Unable to retrieve icon %s" % icon)
def AddPluginFiles(self, source, is_map):
"""Copies files associated with the Google Earth browser plug-in."""
self.Status("Add plugin files ...")
# Plugin directory should already exist.
common.utils.CopyDirectory("%s/earth" % TEMPLATE_DIR, self.plugin_dir,
self.logger)
common.utils.CopyDirectory("%s/maps" % TEMPLATE_DIR, self.maps_dir,
self.logger)
common.utils.CopyDirectory("%s/js" % TEMPLATE_DIR, self.js_dir, self.logger)
# Get the Json that defines the plugin params.
if is_map:
json = self.AddJsonFile(
source, is_map, self.json_address, self.json_map_file)
self.AddJsonIcons(source, json)
else:
self.AddJsonFile(source, is_map, self.json_address, self.json_earth_file)
def BuildSearchDb(self, source, polygon):
"""Extracts database info needed for POI search."""
self.Status("Extract search data ...")
try:
os.makedirs(self.search_dir)
except os.error:
pass # Directory may already exist
# Determine the server and target path (fusion db publish point) from
# the source.
target = ""
server = ""
if source:
server, target = common.utils.GetServerAndPathFromUrl(source)
# Replace the server with advanced configuration host
server = CONFIGS.GetStr("DATABASE_HOST")
target = common.utils.NormalizeTargetPath(target)
base_url = "%s/cgi-bin/globe_cutter_app.py" % server
url = "%s?cmd=POI_IDS&target=%s" % (base_url, target)
self.Status("Querying search poi ids: target=%s" % target)
poi_list = None
try:
data, http_status_code = self.HttpGet(url)
if http_status_code == 200:
poi_list = data.strip()
except Exception as e:
raise Exception("Request failed: cannot connect to server: {0}".format(e))
if poi_list:
# Quote polygon parameter for URI.
polygon_quoted = ""
if polygon:
polygon_quoted = urllib.quote(polygon)
poi_ids = poi_list.split(" ")
for poi_id in poi_ids:
url = ("%s?cmd=SEARCH_FILE&poi_id=%s&polygon=%s" %
(base_url, poi_id, polygon_quoted))
search_file = "%s/gepoi_%s" % (self.search_dir, poi_id)
try:
self.Status("Querying search poi data: poi_id=%s, polygon=%s" %
(poi_id, polygon))
data, http_status_code = self.HttpGet(url)
if http_status_code == 200:
self.Status("Copying search poi data: gepoi_%s to globe" % poi_id)
with open(search_file, "w") as fpw:
fpw.write(data.strip())
fpw.write("\n")
else:
self.StatusWarning(fp.read())
fp.close()
except IOError as e:
self.StatusWarning(
"Unable to write search file: %s. Error: %s" % (search_file, e))
except Exception as e:
self.StatusWarning("Unable to get search data: gepoi_%s. Error: %s" %
(poi_id, e))
else:
self.Status("No search data.")
def BuildSearchFile(self, poi_id, polygon):
"""Extracts database info needed for POI search.
Args:
poi_id: index of POI table.
polygon: area of interest (polygon in kml-format).
"""
# Converts polygon from kml format to PostGIS.
postgis_polygon = GlobeBuilder.ConvertPolygonToPsql(polygon)
# Make polygon parameter required.
if not postgis_polygon:
return
# Get column names of gepoi table and prepare for query.
table_name = "gepoi_%d" % poi_id
columns = self.TableColumns("gepoi", table_name)
# TODO: no results - consider to return 204 No content!?
if not columns:
return
query_cols = ",".join(columns)
query_cols = query_cols.replace("the_geom", "ST_AsText(the_geom)")
# Issue SQL query to get search data.
query = """SELECT %s FROM %s
WHERE ST_CONTAINS (ST_PolygonFromText(%%s, 4326), the_geom)"""
query %= (query_cols, table_name)
results = self.Query("gepoi", query, (postgis_polygon,))
# TODO: no results - consider to return 204 No content!?
if results:
self.PrintTable(columns, results)
def ListPoiIds(self, target):
"""Lists ids of tables for POI search."""
# db_id's do NOT match across the gesearch and gestream databases.
# TODO: Since starting in 5.0 we are reuniting POI search
# with its source database (i.e. one target),
# consider more unification within Postgres.
# E.g. You should at least be able to get the reference from the
# bridging table like:
# SELECT db_id
# FROM target_table t, target_db_table td
# WHERE
# t.target_path = '/%s' AND
# t.target_id = td.target_id
# Then getting the poi ids would simply be:
# SELECT poi_id FROM db_poi_table
# WHERE db_id = %s"""
#
# But unifying the tables so that single-step joins can be done
# is probably better given our simple schema structure.
query = """SELECT host_name, db_name
FROM target_db_table td, db_table d, target_table t
WHERE t.target_path = %s AND
t.target_id = td.target_id AND
td.db_id = d.db_id
"""
result = self.Query("gestream", query, (target,))
if result:
(db_host, db_name) = result[0]
query = """SELECT poi_id
FROM db_poi_table dp, db_table d
WHERE
d.host_name = %s AND
d.db_name = %s AND
d.db_id = dp.db_id
"""
poi_id = self.Query("gesearch", query, (db_host, db_name))
else:
poi_id = []
print " ".join(map(str, poi_id))
def PackageGlobeForDownload(self, make_copy, is_map=False):
"""Packages globe or map as a single-file globe."""
if is_map:
self.Status("Packaging map for download ...")
is_2d_str = "--is_2d"
out_file = self.map_file
else:
self.Status("Packaging globe for download ...")
is_2d_str = ""
out_file = self.globe_file
# Remove old globe or map.
try:
os.remove(out_file)
except OSError:
pass # Globe or map may not exist.
make_copy_str = ""
if make_copy:
make_copy_str = "--make_copy"
os_cmd = ("%s/geportableglobepacker --globe_directory=\"%s\" "
"--output=\"%s\" %s %s"
% (COMMAND_DIR, self.globe_dir, out_file,
make_copy_str, is_2d_str))
new_globe_size = common.utils.DirectorySize(self.globe_env_dir)
globe_dir_space = common.utils.DiskSpace(os.path.dirname(out_file))
if globe_dir_space < new_globe_size:
self.StatusWarning(
("Not enough room to create %s. %s required."
"<br>Did not execute:<br>%s")
% (out_file, common.utils.SizeAsString(new_globe_size),
os_cmd))
raise DiskFullError("Disk is full at %s"
% os.path.dirname(out_file))
common.utils.ExecuteCmd(os_cmd, self.logger)
os_cmd = ("chmod a+r \"%s\"" % out_file)
common.utils.ExecuteCmd(os_cmd, self.logger)
self.Status("%s %s" % (out_file,
common.utils.FileSizeAsString(out_file)))
def CleanUp(self, save_temp):
"""Clean up temporary directory."""
try:
shutil.rmtree(self.globe_final_env_dir)
except OSError:
pass # Directory may not exist
try:
if save_temp:
shutil.move(self.globe_env_dir, self.globe_final_env_dir)
self.Status("Saving tmp directory as: %s" % self.globe_final_env_dir)
else:
shutil.rmtree(self.globe_env_dir)
self.Status("Deleting tmp directory as: %s" % self.globe_env_dir)
except Exception, e:
self.StatusWarning("Error: %s" % str(e))
def CutProcesses(self):
"""Returns processes referencing the temp directory of a cut in progress."""
print "ps -ef | grep \"%s\" | grep -v grep" % self.globe_dir
procs = os.popen("ps -ef | grep \"%s\" | grep -v grep" % self.globe_dir)
procs_info = []
for proc in procs:
proc_info = re.compile(r"\s+").split(proc)
procs_info.append([int(proc_info[1]), " ".join(proc_info[7:])])
return procs_info
def CancelCut(self, save_temp):
"""Kill processes referencing the temp directory of a cut in progress."""
for proc_info in self.CutProcesses():
print "Killing: (%d) %s " % (proc_info[0], proc_info[1])
os.kill(proc_info[0], 1)
self.CleanUp(save_temp)
def GetServers(self):
"""Get names and urls for globes being served on this EarthServer."""
os_cmd = ("%s/geserveradmin --listvss") % COMMAND_DIR
vss_regex = re.compile("\d+\.\s*(.*?),\s+(.*?),\s+(.*?)\s*$")
fp = os.popen(os_cmd)
servers = {}
for line in fp:
match = vss_regex.match(line)
if match and match.group(2) == "ge":
servers[match.group(1)] = [match.group(3), "false"]
elif match and match.group(2) == "map":
servers[match.group(1)] = [match.group(3), "true"]
fp.close()
os_cmd = ("%s/geserveradmin --publisheddbs") % COMMAND_DIR
vss_regex = re.compile("\s*Virtual\s+Server:\s*(.*)$")
fp = os.popen(os_cmd)
print "["
server_entries = []
for line in fp:
match = vss_regex.match(line)
if match and match.group(1) in servers.keys():
server = "{"
server += '"name": "%s", ' % match.group(1)
server += '"url": "%s", ' % servers[match.group(1)][0]
server += '"is_2d": %s' % servers[match.group(1)][1]
server += "}"
server_entries.append(server)
print ",\n".join(server_entries)
print "]"
fp.close()
def GetDirectorySize(self):
"""Get size of directory for globe being built."""
size = common.utils.DirectorySizeAsString(self.globe_env_dir)
if size == "0.00MB":
return ""
else:
return size
def CheckArgs(self, arg_list, form_):
"""Checks that required arguments are available from form.
Also sets up all of the the global paths based on the
globe name.
Args:
arg_list: Arguments pass from the GET or POST call.
form_: HTML form from which the arguments came.
"""
missing = ""
uid = form_.getvalue("uid")
for arg in arg_list:
if arg == "source":
value = form_.getvalue_url(arg)
elif arg == "polygon":
value = form_.getvalue_kml(arg)
elif arg == "globe_name":
value = form_.getvalue_filename(arg)
else:
value = form_.getvalue(arg)
if not value:
missing = "%s %s" % (missing, arg)
elif arg == "globe_name":
self.dbroot_file = DBROOT_FILE_TEMPLATE % (value, uid)
self.polygon_file = POLYGON_FILE_TEMPLATE % (value, uid, value)
self.qtnodes_file = QTNODES_FILE_TEMPLATE % (value, uid)
self.packet_info_file = PACKET_INFO_TEMPLATE % (value, uid)
self.globe_env_dir = GLOBE_ENV_DIR_TEMPLATE % (value, uid)
self.globe_final_env_dir = GLOBE_FINAL_ENV_DIR_TEMPLATE % value
self.globe_dir = GLOBE_DIR_TEMPLATE % (value, uid, value)
self.icons_dir = ICONS_DIR_TEMPLATE % (value, uid, value)
self.plugin_dir = PLUGIN_DIR_TEMPLATE % (value, uid, value)
self.maps_dir = MAPS_DIR_TEMPLATE % (value, uid, value)
self.json_earth_file = JSON_EARTH_FILE_TEMPLATE % (value, uid, value)
self.json_map_file = JSON_MAP_FILE_TEMPLATE % (value, uid, value)
self.info_file = INFO_FILE_TEMPLATE % (value, uid, value)
self.js_dir = JS_DIR_TEMPLATE % (value, uid, value)
self.kml_map_file = KML_MAP_FILE_TEMPLATE % (value, uid)
self.kml_dir = KML_DIR_TEMPLATE % (value, uid, value)
self.icons_dir = ICONS_DIR_TEMPLATE % (value, uid, value)
self.search_dir = SEARCH_DIR_TEMPLATE % (value, uid, value)
self.globe_file = GLOBE_FILE_TEMPLATE % value
self.map_file = MAP_FILE_TEMPLATE % value
self.metadata_file = METADATA_FILE_TEMPLATE % (value, uid, value)
self.logger = common.utils.Log(LOG_FILE % (value, uid))
form_keys = form_.keys()
if PORTABLE_PREFIX_PARAM in form_keys:
self.portable_prefix = form_.getvalue(PORTABLE_PREFIX_PARAM)
else:
self.portable_prefix = DEFAULT_PORTABLE_PREFIX
if PORTABLE_SERVER_PARAM in form_keys:
self.portable_server = form_.getvalue(PORTABLE_SERVER_PARAM)
else:
self.portable_server = DEFAULT_PORTABLE_SERVER
if PORTABLE_PORT_PARAM in form_keys:
self.portable_port = form_.getvalue(PORTABLE_PORT_PARAM)
else:
self.portable_port = DEFAULT_PORTABLE_PORT
if SOURCE_GLOBE_PARAM in form_keys:
self.source_globe = form_.getvalue_path(SOURCE_GLOBE_PARAM)
else:
self.source_globe = DEFAULT_SOURCE_GLOBE
if SEARCH_SERVICE_PARAM in form_keys:
self.search_service = form_.getvalue(SEARCH_SERVICE_PARAM)
else:
self.search_service = DEFAULT_SEARCH_SERVICE
if KML_SERVER_PARAM in form_keys:
self.kml_server = form_.getvalue(KML_SERVER_PARAM)
else:
self.kml_server = self.portable_server
if KML_PORT_PARAM in form_keys:
self.kml_port = form_.getvalue(KML_PORT_PARAM)
else:
self.kml_port = self.portable_port
self.html_address = "%s://%s:%s" % (self.portable_prefix,
self.portable_server,
self.portable_port)
self.json_address = "%s://%s:%s" % (self.portable_prefix,
self.portable_server,
self.portable_port)
self.dbroot_dir = DBROOT_DIR_TEMPLATE % (value, uid, value)
self.dbroot_file2 = DBROOT_FILE2_TEMPLATE % (value, uid, value,
self.portable_server,
self.portable_port)
if missing:
self.StatusWarning("Missing args: %s" % missing)
raise
if __name__ == "__main__":
common.utils.WriteHeader("text/plain")
msg = "Ok"
globe_builder = GlobeBuilder()
# Add directory where globe will be built.
try:
# Put the GET arguments into a dictionary.
cgi_cmd = FORM.getvalue("cmd")
if cgi_cmd == "ADD_GLOBE_DIRECTORY":
globe_builder.CheckArgs(["globe_name"], FORM)
globe_builder.AddGlobeDirectory(FORM.getvalue_escaped("description"))
elif cgi_cmd == "POLYGON_TO_QTNODES":
is_mercator = FORM.getvalue("is_mercator")
globe_builder.CheckArgs(["globe_name", "polygon_level", "polygon"], FORM)
globe_builder.SavePolygon(FORM.getvalue_kml("polygon"))
globe_builder.ConvertPolygonToQtNodes(
int(FORM.getvalue("polygon_level")), is_mercator == "t")
elif cgi_cmd == "REWRITE_DB_ROOT":
globe_builder.CheckArgs(["globe_name", "source"], FORM)
include_historic = FORM.getvalue("include_historical_imagery") is not None
globe_builder.RewriteDbRoot(FORM.getvalue_url("source"), include_historic)
elif cgi_cmd == "GRAB_KML":
globe_builder.CheckArgs(["globe_name", "source"], FORM)
globe_builder.GrabKml(FORM.getvalue_url("source"))
elif cgi_cmd == "BUILD_GLOBE":
ignore_imagery_depth = (FORM.getvalue("ignore_imagery_depth") is not None)
globe_builder.CheckArgs(["globe_name", "source", "default_level",
"max_level"], FORM)
is_2d = FORM.getvalue("is_2d")
if is_2d == "t":
globe_builder.BuildMap(FORM.getvalue_url("source"),
int(FORM.getvalue("default_level")),
int(FORM.getvalue("max_level")),
ignore_imagery_depth)
else:
globe_builder.BuildGlobe(FORM.getvalue_url("source"),
int(FORM.getvalue("default_level")),
int(FORM.getvalue("max_level")))
elif cgi_cmd == "EXTRACT_SEARCH_DB":
globe_builder.CheckArgs(["globe_name", "source", "polygon"], FORM)
globe_builder.BuildSearchDb(FORM.getvalue_url("source"),
FORM.getvalue_kml("polygon"))
elif cgi_cmd == "POI_IDS":
target_path = common.utils.NormalizeTargetPath(
FORM.getvalue_url("target"))
globe_builder.ListPoiIds(target_path)
msg = ""
elif cgi_cmd == "SEARCH_FILE":
globe_builder.CheckArgs(["poi_id", "polygon"], FORM)
# Note: a value 0 for poi_id is invalid.
poi_id_arg = int(FORM.getvalue("poi_id", 0))
polygon_arg = FORM.getvalue_kml("polygon")
# TODO: return error 400 "Bad request" on else.
if poi_id_arg and polygon_arg:
globe_builder.BuildSearchFile(poi_id_arg, polygon_arg)
msg = ""
elif cgi_cmd == "ADD_PLUGIN_FILES":
is_2d = FORM.getvalue("is_2d")
globe_builder.CheckArgs(["globe_name", "source"], FORM)
globe_builder.AddPluginFiles(FORM.getvalue_url("source") , is_2d)
elif cgi_cmd == "PACKAGE_GLOBE":
globe_builder.CheckArgs(["globe_name"], FORM)
is_2d = FORM.getvalue("is_2d")
globe_builder.PackageGlobeForDownload(FORM.getvalue("save_tmp") == "t",
is_2d == "t")
elif cgi_cmd == "CLEAN_UP":
globe_builder.CheckArgs(["globe_name"], FORM)
globe_builder.CleanUp(FORM.getvalue("save_tmp") == "t")
elif cgi_cmd == "CANCEL":
globe_builder.CheckArgs(["globe_name", "uid"], FORM)
globe_builder.CancelCut(FORM.getvalue("save_tmp") == "t")
elif cgi_cmd == "GLOBE_INFO":
globe_builder.CheckArgs(["globe_name"], FORM)
globe_name = FORM.getvalue_filename("globe_name")
is_2d = FORM.getvalue("is_2d")
if is_2d == "t":
print ("<hr>Your map is available at <a href=\"%s/%s.glm\">%s</a>." %
(WEB_URL_BASE, globe_name, globe_name))
globe_size = common.utils.FileSizeAsString(globe_builder.map_file)
else:
print ("<hr>Your globe is available at <a href=\"%s/%s.glb\">%s</a>." %
(WEB_URL_BASE, globe_name, globe_name))
globe_size = common.utils.FileSizeAsString(globe_builder.globe_file)
print "<br> Size: %s" % globe_size
globe_builder.AppendInfoFile(globe_size)
msg = ""
elif cgi_cmd == "UID":
allow_overwrite = (FORM.getvalue("allow_overwrite") is not None)
overwriting = False
globe_name = FORM.getvalue_filename("globe_name")
new_globe_name = globe_name
is_2d = FORM.getvalue("is_2d")
if is_2d == "t":
file_template = MAP_FILE_TEMPLATE
else:
file_template = GLOBE_FILE_TEMPLATE
globe_file = file_template % globe_name
if os.path.isfile(globe_file):
# If overwriting is allowed, just give a warning.
if allow_overwrite:
overwriting = True
# Otherwise, find an unused name and reserve it.
else:
file_num = 1
while os.path.isfile(globe_file):
new_globe_name = GLOBE_NAME_TEMPLATE % (globe_name,
"%03d" % file_num)
globe_file = file_template % new_globe_name
file_num += 1
fp_temp = open(globe_file, "w")
fp_temp.close()
msg = "%s %s %s" % (common.utils.Uid(), new_globe_name, overwriting)
elif cgi_cmd == "SERVERS":
globe_builder.GetServers()
msg = ""
elif cgi_cmd == "PING":
print "Ready to cut."
elif cgi_cmd == "BUILD_SIZE":
globe_builder.CheckArgs(["globe_name"], FORM)
msg = globe_builder.GetDirectorySize()
elif cgi_cmd == "BUILD_DONE":
globe_builder.CheckArgs(["globe_name"], FORM)
msg = ""
is_2d = FORM.getvalue("is_2d")
tool_name = (
"geportablemapbuilder" if is_2d == "t" else "geportableglobebuilder")
# To add polling for other command, look for them here
# as is done for "geportableglobebuilder."
# Then add a "wait_for_task" in the sequence after the
# command in globe_cutter.js.
if common.utils.IsProcessRunningForGlobe(
tool_name, globe_builder.globe_env_dir):
msg = "t"
time.sleep(int(FORM.getvalue("delay", 0)))
else:
packet_file = open(globe_builder.packet_info_file)
packet_info = packet_file.readline().split(" ")
if len(packet_info) == 3 and int(packet_info[0]) > 0:
globe_builder.logger.Log("%s imagery packets "
"%s terrain packets "
"%s vector packets "
% tuple(packet_info))
msg = " ".join(packet_info)
else:
globe_builder.logger.Log("FAILED: %s did"
" not complete properly." % tool_name)
msg = "FAILED %s %s" % (globe_builder.packet_info_file,
packet_info.__str__())
elif cgi_cmd == "ECHO":
print FORM.getvalue("echo")
else:
GlobeBuilder.StatusWarning("FAILED: Unknown command: %s" % cgi_cmd)
msg = ""
except OsCommandError:
GlobeBuilder.StatusWarning("FAILED: Unable to run OS command")
msg = ""
except Exception as e:
GlobeBuilder.StatusWarning("FAILED: %s" % e)
msg = ""
except:
GlobeBuilder.StatusWarning("FAILED: %s" % sys.exc_info().__str__())
msg = ""
if (cgi_cmd != "SERVERS" and cgi_cmd != "PING" and cgi_cmd != "ECHO"
and cgi_cmd != "BUILD_SIZE" and cgi_cmd != "BUILD_DONE"
and cgi_cmd != "UID"):
disk_space = common.utils.DiskSpace(BASE_DIR)
if disk_space < DISK_SPACE_WARNING_THRESHOLD:
GlobeBuilder.StatusWarning(
"WARNING: %4.1f MB of disk space remaining." % disk_space)
print msg
|
|
from direct.directnotify import DirectNotifyGlobal
from direct.gui.DirectGui import *
from direct.showbase.DirectObject import DirectObject
from pandac.PandaModules import *
import random
import string
from toontown.fishing import FishSellGUI
from toontown.hood import ZoneUtil
from toontown.pets import Pet, PetConstants
from toontown.pets import PetDNA
from toontown.pets import PetDetail
from toontown.pets import PetNameGenerator
from toontown.pets import PetTraits
from toontown.pets import PetUtil
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import ToontownTimer
Dialog_MainMenu = 0
Dialog_AdoptPet = 1
Dialog_ChoosePet = 2
Dialog_ReturnPet = 3
Dialog_SellFish = 4
Dialog_NamePicker = 5
Dialog_GoHome = 6
disabledImageColor = Vec4(0.6, 0.6, 0.6, 1)
text0Color = Vec4(0.65, 0, 0.87, 1)
text1Color = Vec4(0.65, 0, 0.87, 1)
text2Color = Vec4(1, 1, 0.5, 1)
text3Color = Vec4(0.4, 0.4, 0.4, 1)
class PetshopGUI(DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('PetshopGui')
class GoHomeDlg(DirectFrame):
notify = DirectNotifyGlobal.directNotify.newCategory('GoHomeDlg')
def __init__(self, doneEvent):
DirectFrame.__init__(self, pos=(0.0, 0.0, 0.0), image_color=ToontownGlobals.GlobalDialogColor, image_scale=(1.0, 1.0, 0.6), text='', text_wordwrap=13.5, text_scale=0.06, text_pos=(0.0, 0.13))
self['image'] = DGG.getDefaultDialogGeom()
self['text'] = TTLocalizer.PetshopGoHomeText
buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')
gui = loader.loadModel('phase_3.5/models/gui/avatar_panel_gui')
self.bYes = DirectButton(self, image=(buttons.find('**/ChtBx_OKBtn_UP'), buttons.find('**/ChtBx_OKBtn_DN'), buttons.find('**/ChtBx_OKBtn_Rllvr')), relief=None, text=TTLocalizer.TutorialYes, text_scale=0.05, text_pos=(0.0, -0.1), pos=(-0.15, 0.0, -0.1), command=lambda : messenger.send(doneEvent, [1]))
self.bNo = DirectButton(self, image=(buttons.find('**/CloseBtn_UP'), buttons.find('**/CloseBtn_DN'), buttons.find('**/CloseBtn_Rllvr')), relief=None, text=TTLocalizer.TutorialNo, text_scale=0.05, text_pos=(0.0, -0.1), pos=(0.15, 0.0, -0.1), command=lambda : messenger.send(doneEvent, [0]))
buttons.removeNode()
gui.removeNode()
class NamePicker(DirectFrame):
notify = DirectNotifyGlobal.directNotify.newCategory('PetshopGUI.NamePicker')
def __init__(self, doneEvent, petSeed, gender):
zoneId = ZoneUtil.getCanonicalSafeZoneId(base.localAvatar.getZoneId())
name, dna, traitSeed = PetUtil.getPetInfoFromSeed(petSeed, zoneId)
self.gui = loader.loadModel('phase_4/models/gui/PetNamePanel')
self.guiScale = 0.09
DirectFrame.__init__(self, relief=None, geom=self.gui, geom_scale=self.guiScale, state='normal', frameSize=(-1, 1, -1, 1))
self.initialiseoptions(PetshopGUI.NamePicker)
self.petView = self.attachNewNode('petView')
self.petView.setPos(-0.21, 0, -0.04)
self.petModel = Pet.Pet(forGui=1)
self.petModel.setDNA(dna)
self.petModel.fitAndCenterHead(0.435, forGui=1)
self.petModel.reparentTo(self.petView)
self.petModel.setH(225)
self.petModel.enterNeutralHappy()
self.ng = PetNameGenerator.PetNameGenerator()
if gender == 1:
self.allNames = self.ng.boyFirsts
else:
self.allNames = self.ng.girlFirsts
self.allNames += self.ng.neutralFirsts
self.allNames.sort()
self.checkNames()
self.letters = []
for name in self.allNames:
if name[0:TTLocalizer.PGUIcharLength] not in self.letters:
self.letters.append(name[0:TTLocalizer.PGUIcharLength])
self.curLetter = self.letters[0]
self.curNames = []
self.curName = ''
self.alphabetList = self.makeScrollList(self.gui, (-0.012, 0, -0.075), (1, 0.8, 0.8, 1), self.letters, self.makeLabel, [TextNode.ACenter, 'alphabet'], 6)
self.nameList = None
self.rebuildNameList()
self.randomButton = DirectButton(parent=self, relief=None, image=(self.gui.find('**/RandomUpButton'), self.gui.find('**/RandomDownButton'), self.gui.find('**/RandomRolloverButton')), scale=self.guiScale, text=TTLocalizer.RandomButton, text_pos=(-0.8, -5.7), text_scale=0.8, text_fg=text2Color, pressEffect=False, command=self.randomName)
self.nameResult = DirectLabel(parent=self, relief=None, scale=self.guiScale, text='', text_align=TextNode.ACenter, text_pos=(-1.85, 2.6), text_fg=text0Color, text_scale=0.6, text_wordwrap=8)
self.submitButton = DirectButton(parent=self, relief=None, image=(self.gui.find('**/SubmitUpButton'), self.gui.find('**/SubmitDownButton'), self.gui.find('**/SubmitRolloverButton')), scale=self.guiScale, text=TTLocalizer.PetshopAdopt, text_pos=(3.3, -5.7), text_scale=TTLocalizer.PGUIsubmitButton, text_fg=text0Color, pressEffect=False, command=lambda : messenger.send(doneEvent, [self.ng.returnUniqueID(self.curName)]))
model = loader.loadModel('phase_4/models/gui/PetShopInterface')
modelScale = 0.1
cancelImageList = (model.find('**/CancelButtonUp'), model.find('**/CancelButtonDown'), model.find('**/CancelButtonRollover'))
cancelIcon = model.find('**/CancelIcon')
self.cancelButton = DirectButton(parent=self, relief=None, pos=(-0.04, 0, -0.47), image=cancelImageList, geom=cancelIcon, scale=modelScale, pressEffect=False, command=lambda : messenger.send(doneEvent, [-1]))
self.randomName()
def checkNames(self):
if __dev__:
for name in self.allNames:
if not name.replace(' ', '').isalpha():
self.notify.warning('Bad name:%s' % name)
def destroy(self):
self.petModel.delete()
DirectFrame.destroy(self)
def rebuildNameList(self):
self.curNames = []
for name in self.allNames:
if name[0:TTLocalizer.PGUIcharLength] == self.curLetter:
self.curNames += [name]
if self.nameList:
self.nameList.destroy()
self.nameList = self.makeScrollList(self.gui, (0.277, 0, -0.075), (1, 0.8, 0.8, 1), self.curNames, self.makeLabel, [TextNode.ACenter, 'name'], 5)
def updateNameText(self):
self.nameResult['text'] = self.curName
def nameClickedOn(self, listType, index):
if listType == 'alphabet':
self.curLetter = self.letters[index]
self.rebuildNameList()
elif listType == 'name':
self.curName = self.curNames[index]
self.updateNameText()
def makeLabel(self, te, index, others):
alig = others[0]
listName = others[1]
if alig == TextNode.ARight:
newpos = (0.44, 0, 0)
elif alig == TextNode.ALeft:
newpos = (0, 0, 0)
else:
newpos = (0.2, 0, 0)
df = DirectButton(parent=self, state='normal', relief=None, text=te, text_scale=0.1, text_pos=(0.2, 0, 0), text_align=alig, textMayChange=0, command=lambda : self.nameClickedOn(listName, index))
return df
def makeScrollList(self, gui, ipos, mcolor, nitems, nitemMakeFunction, nitemMakeExtraArgs, nVisibleItems):
decScale = self.guiScale / 0.44
incScale = (decScale, decScale, -decScale)
it = nitems[:]
listType = nitemMakeExtraArgs[1]
if listType == 'alphabet':
arrowList = (gui.find('**/ArrowSmUpButton'),
gui.find('**/ArrowSmUpRollover'),
gui.find('**/ArrowSmUpRollover'),
gui.find('**/ArrowSmUpButton'))
fHeight = 0.09
elif listType == 'name':
arrowList = (gui.find('**/ArrowUpBigButton'),
gui.find('**/ArrowUpBigRollover'),
gui.find('**/ArrowUpBigRollover'),
gui.find('**/ArrowUpBigButton'))
fHeight = 0.119
ds = DirectScrolledList(parent=self, items=it, itemMakeFunction=nitemMakeFunction, itemMakeExtraArgs=nitemMakeExtraArgs, relief=None, command=None, pos=ipos, scale=0.44, incButton_image=arrowList, incButton_image_pos=(1.015, 0, 3.32), incButton_relief=None, incButton_scale=incScale, incButton_image3_color=Vec4(0.4, 0.4, 0.4, 1), decButton_image=arrowList, decButton_image_pos=(1.015, 0, 1.11), decButton_relief=None, decButton_scale=decScale, decButton_image3_color=Vec4(0.4, 0.4, 0.4, 1), numItemsVisible=nVisibleItems, forceHeight=fHeight)
return ds
def randomName(self):
numNames = len(self.allNames)
self.curName = self.allNames[random.randrange(numNames)]
self.curLetter = self.curName[0:TTLocalizer.PGUIcharLength]
self.rebuildNameList()
self.updateNameText()
self.alphabetList.scrollTo(self.letters.index(self.curLetter))
self.nameList.scrollTo(self.curNames.index(self.curName))
class MainMenuDlg(DirectFrame):
notify = DirectNotifyGlobal.directNotify.newCategory('PetshopGUI.MainMenuDlg')
def __init__(self, doneEvent):
model = loader.loadModel('phase_4/models/gui/AdoptReturnSell')
modelPos = (0, 0, -0.3)
modelScale = 0.055
DirectFrame.__init__(self, relief=None, state='normal', geom=model, geom_scale=(modelScale, modelScale, modelScale), pos=modelPos, frameSize=(-1, 1, -1, 1))
self.initialiseoptions(PetshopGUI.MainMenuDlg)
textScale = TTLocalizer.PGUItextScale
sellFishImageList = (model.find('**/SellButtonUp'),
model.find('**/SellButtonDown'),
model.find('**/SellButtonRollover'),
model.find('**/SellButtonDown'))
fishLogoImageList = model.find('**/Fish')
cancelImageList = (model.find('**/CancelButtonUp'), model.find('**/cancelButtonDown'), model.find('**/CancelButtonRollover'))
XImageList = model.find('**/CancelIcon')
adoptImageList = (model.find('**/AdoptButtonUp'), model.find('**/AdoptButtonDown'), model.find('**/AdoptButtonRollover'))
pawLogoAdoptImageList = model.find('**/PawPink')
returnImageList = (model.find('**/ReturnButtonUp'),
model.find('**/ReturnButtonDown'),
model.find('**/ReturnButtonRollover'),
model.find('**/ReturnButtonDown'))
pawLogoReturnImageList = model.find('**/PawYellow')
self.cancelButton = DirectButton(parent=self, relief=None, scale=(modelScale, modelScale, modelScale), geom=XImageList, image=cancelImageList, text=('', TTLocalizer.PetshopCancel), text_pos=TTLocalizer.PGUIcancelButtonPos, text_scale=0.8, pressEffect=False, command=lambda : messenger.send(doneEvent, [0]))
self.sellFishButton = DirectButton(parent=self, relief=None, image=sellFishImageList, image3_color=disabledImageColor, geom=fishLogoImageList, scale=(modelScale, modelScale, modelScale), text=TTLocalizer.PetshopSell, text_scale=textScale, text_pos=(0, 6), text0_fg=text2Color, text1_fg=text2Color, text2_fg=text0Color, text3_fg=text3Color, pressEffect=False, command=lambda : messenger.send(doneEvent, [1]))
fishValue = base.localAvatar.fishTank.getTotalValue()
if fishValue == 0:
self.sellFishButton['state'] = DGG.DISABLED
self.adoptPetButton = DirectButton(parent=self, relief=None, image=adoptImageList, geom=pawLogoAdoptImageList, scale=(modelScale, modelScale, modelScale), text=TTLocalizer.PetshopAdoptAPet, text_scale=textScale, text_pos=(0, 12.5), text0_fg=text0Color, text1_fg=text1Color, text2_fg=text2Color, text3_fg=text3Color, pressEffect=False, command=lambda : messenger.send(doneEvent, [2]))
self.returnPetButton = DirectButton(parent=self, relief=None, image=returnImageList, geom=pawLogoReturnImageList, image3_color=disabledImageColor, scale=(modelScale, modelScale, modelScale), text=TTLocalizer.PetshopReturnPet, text_scale=textScale, text_pos=(-0.6, 9.2), text0_fg=text2Color, text1_fg=text2Color, text2_fg=text0Color, text3_fg=text3Color, pressEffect=False, command=lambda : messenger.send(doneEvent, [3]))
if not base.localAvatar.hasPet():
self.returnPetButton['state'] = DGG.DISABLED
model.removeNode()
class AdoptPetDlg(DirectFrame):
notify = DirectNotifyGlobal.directNotify.newCategory('PetshopGUI.AdoptPetDlg')
def __init__(self, doneEvent, petSeed, petNameIndex):
zoneId = ZoneUtil.getCanonicalSafeZoneId(base.localAvatar.getZoneId())
name, dna, traitSeed = PetUtil.getPetInfoFromSeed(petSeed, zoneId)
name = PetNameGenerator.PetNameGenerator().getName(petNameIndex)
cost = PetUtil.getPetCostFromSeed(petSeed, zoneId)
model = loader.loadModel('phase_4/models/gui/AdoptPet')
modelPos = (0, 0, -0.3)
modelScale = 0.055
DirectFrame.__init__(self, relief=None, state='normal', geom=model, geom_color=ToontownGlobals.GlobalDialogColor, geom_scale=modelScale, frameSize=(-1, 1, -1, 1), pos=modelPos, text=TTLocalizer.PetshopAdoptConfirm % (name, cost), text_wordwrap=12, text_scale=0.05, text_pos=(0, 0.55), text_fg=text0Color)
self.initialiseoptions(PetshopGUI.AdoptPetDlg)
self.petView = self.attachNewNode('petView')
self.petView.setPos(-0.13, 0, 0.8)
self.petModel = Pet.Pet(forGui=1)
self.petModel.setDNA(dna)
self.petModel.fitAndCenterHead(0.395, forGui=1)
self.petModel.reparentTo(self.petView)
self.petModel.setH(130)
self.petModel.enterNeutralHappy()
self.moneyDisplay = DirectLabel(parent=self, relief=None, text=str(base.localAvatar.getTotalMoney()), text_scale=0.075, text_fg=(0.95, 0.95, 0, 1), text_shadow=(0, 0, 0, 1), text_pos=(0.225, 0.33), text_font=ToontownGlobals.getSignFont())
self.accept(localAvatar.uniqueName('moneyChange'), self.__moneyChange)
self.accept(localAvatar.uniqueName('bankMoneyChange'), self.__moneyChange)
okImageList = (model.find('**/CheckButtonUp'), model.find('**/CheckButtonDown'), model.find('**/CheckButtonRollover'))
cancelImageList = (model.find('**/CancelButtonUp'), model.find('**/CancelButtonDown'), model.find('**/CancelRollover'))
cancelIcon = model.find('**/CancelIcon')
checkIcon = model.find('**/CheckIcon')
self.cancelButton = DirectButton(parent=self, relief=None, image=cancelImageList, geom=cancelIcon, scale=modelScale, text=('', TTLocalizer.PetshopGoBack), text_pos=(-5.8, 4.4), text_scale=0.7, pressEffect=False, command=lambda : messenger.send(doneEvent, [0]))
self.okButton = DirectButton(parent=self, relief=None, image=okImageList, geom=checkIcon, scale=modelScale, text=('', TTLocalizer.PetshopAdopt), text_pos=(5.8, 4.4), text_scale=0.7, pressEffect=False, command=lambda : messenger.send(doneEvent, [1]))
model.removeNode()
def destroy(self):
self.ignore(localAvatar.uniqueName('moneyChange'))
self.ignore(localAvatar.uniqueName('bankMoneyChange'))
self.petModel.delete()
DirectFrame.destroy(self)
def __moneyChange(self, money):
self.moneyDisplay['text'] = str(base.localAvatar.getTotalMoney())
class ReturnPetDlg(DirectFrame):
notify = DirectNotifyGlobal.directNotify.newCategory('PetshopGUI.ReturnPetDlg')
def __init__(self, doneEvent):
def showDialog(avatar):
model = loader.loadModel('phase_4/models/gui/ReturnPet')
modelPos = (0, 0, -0.3)
modelScale = (0.055, 0.055, 0.055)
base.r = self
DirectFrame.__init__(self, relief=None, state='normal', geom=model, geom_scale=modelScale, frameSize=(-1, 1, -1, 1), pos=modelPos, text=TTLocalizer.PetshopReturnConfirm % avatar.getName(), text_wordwrap=12, text_scale=TTLocalizer.PGUIreturnConfirm, text_pos=(0, 0.45), text_fg=text2Color)
self.initialiseoptions(PetshopGUI.ReturnPetDlg)
okImageList = (model.find('**/CheckButtonUp'), model.find('**/CheckButtonDown'), model.find('**/CheckRollover'))
cancelImageList = (model.find('**/CancelButtonUp'), model.find('**/CancelButtonDown'), model.find('**/CancelRollover'))
cancelIcon = model.find('**/CancelIcon')
checkIcon = model.find('**/CheckIcon')
self.cancelButton = DirectButton(parent=self, relief=None, image=cancelImageList, geom=cancelIcon, scale=modelScale, text=('', TTLocalizer.PetshopGoBack), text_pos=(-5.8, 4.4), text_scale=0.7, pressEffect=False, command=lambda : messenger.send(doneEvent, [0]))
self.okButton = DirectButton(parent=self, relief=None, image=okImageList, geom=checkIcon, scale=modelScale, text=('', TTLocalizer.PetshopReturn), text_pos=(5.8, 4.4), text_scale=0.7, pressEffect=False, command=lambda : messenger.send(doneEvent, [1]))
self.petView = self.attachNewNode('petView')
self.petView.setPos(-0.15, 0, 0.8)
self.petModel = Pet.Pet(forGui=1)
self.petModel.setDNA(avatar.getDNA())
self.petModel.fitAndCenterHead(0.395, forGui=1)
self.petModel.reparentTo(self.petView)
self.petModel.setH(130)
self.petModel.enterNeutralSad()
model.removeNode()
self.initialized = True
return
self.initialized = False
self.petPanel = PetDetail.PetDetail(base.localAvatar.getPetId(), showDialog)
def destroy(self):
if self.initialized:
self.petPanel.avatar.disable()
self.petPanel.avatar.delete()
self.petPanel.avatar = None
self.PetPanel = None
self.petModel.delete()
DirectFrame.destroy(self)
class ChoosePetDlg(DirectFrame):
notify = DirectNotifyGlobal.directNotify.newCategory('PetshopGUI.ChoosePetDlg')
def __init__(self, doneEvent, petSeeds):
model = loader.loadModel('phase_4/models/gui/PetShopInterface')
modelPos = (0, 0, -0.9)
modelScale = (0.185, 0.185, 0.185)
DirectFrame.__init__(self, relief=None, state='normal', geom=model, geom_scale=modelScale, frameSize=(-1, 1, -1, 1), pos=modelPos, text=TTLocalizer.PetshopChooserTitle, text_wordwrap=26, text_scale=TTLocalizer.PGUIchooserTitle, text_fg=Vec4(0.36, 0.94, 0.93, 1), text_pos=(0, 1.58))
self.initialiseoptions(PetshopGUI.ChoosePetDlg)
adoptImageList = (model.find('**/AdoptButtonUp'),
model.find('**/AdoptButtonDown'),
model.find('**/AdoptButtonRollover'),
model.find('**/AdoptButtonRollover'))
cancelImageList = (model.find('**/CancelButtonUp'), model.find('**/CancelButtonDown'), model.find('**/CancelButtonRollover'))
cancelIcon = model.find('**/CancelIcon')
pawLImageList = (model.find('**/Paw1Up'), model.find('**/Paw1Down'), model.find('**/Paw1Rollover'))
pawLArrowImageList = model.find('**/Arrow1')
pawRImageList = (model.find('**/Paw2Up'), model.find('**/Paw2Down'), model.find('**/Paw2Rollover'))
pawRArrowImageList = model.find('**/Arrow2')
self.cancelButton = DirectButton(parent=self, relief=None, image=cancelImageList, geom=cancelIcon, scale=modelScale, pressEffect=False, command=lambda : messenger.send(doneEvent, [-1]))
self.pawLButton = DirectButton(parent=self, relief=None, image=pawLImageList, geom=pawLArrowImageList, scale=modelScale, pressEffect=False, command=lambda : self.__handlePetChange(-1))
self.pawRButton = DirectButton(parent=self, relief=None, image=pawRImageList, geom=pawRArrowImageList, scale=modelScale, pressEffect=False, command=lambda : self.__handlePetChange(1))
self.okButton = DirectButton(parent=self, relief=None, image=adoptImageList, image3_color=disabledImageColor, scale=modelScale, text=TTLocalizer.PetshopAdopt, text_scale=TTLocalizer.PGUIokButton, text_pos=TTLocalizer.PGUIokButtonPos, text0_fg=text0Color, text1_fg=text1Color, text2_fg=text2Color, text3_fg=text3Color, pressEffect=False, command=lambda : messenger.send(doneEvent, [self.curPet]))
self.moneyDisplay = DirectLabel(parent=self, relief=None, text=str(base.localAvatar.getTotalMoney()), text_scale=0.1, text_fg=(0.95, 0.95, 0, 1), text_shadow=(0, 0, 0, 1), text_pos=(0.34, 0.12), text_font=ToontownGlobals.getSignFont())
self.accept(localAvatar.uniqueName('moneyChange'), self.__moneyChange)
self.accept(localAvatar.uniqueName('bankMoneyChange'), self.__moneyChange)
self.petView = self.attachNewNode('petView')
self.petView.setPos(-0.05, 0, 1.15)
model.removeNode()
self.petSeeds = petSeeds
self.makePetList()
self.showPet()
def makePetList(self):
self.numPets = len(self.petSeeds)
self.curPet = 0
self.petDNA = []
self.petName = []
self.petDesc = []
self.petCost = []
for i in xrange(self.numPets):
random.seed(self.petSeeds[i])
zoneId = ZoneUtil.getCanonicalSafeZoneId(base.localAvatar.getZoneId())
name, dna, traitSeed = PetUtil.getPetInfoFromSeed(self.petSeeds[i], zoneId)
cost = PetUtil.getPetCostFromSeed(self.petSeeds[i], zoneId)
traits = PetTraits.PetTraits(traitSeed, zoneId)
traitList = traits.getExtremeTraitDescriptions()
numGenders = len(PetDNA.PetGenders)
gender = i % numGenders
PetDNA.setGender(dna, gender)
self.petDNA.append(dna)
self.petName.append(TTLocalizer.PetshopUnknownName)
descList = []
descList.append(TTLocalizer.PetshopDescGender % PetDNA.getGenderString(gender=gender))
if traitList:
descList.append(TTLocalizer.PetshopDescTrait % traitList[0])
else:
descList.append(TTLocalizer.PetshopDescTrait % TTLocalizer.PetshopDescStandard)
traitList.extend(['',
'',
'',
''])
for trait in traitList[1:4]:
descList.append('\t%s' % trait)
descList.append(TTLocalizer.PetshopDescCost % cost)
self.petDesc.append(string.join(descList, '\n'))
self.petCost.append(cost)
def destroy(self):
self.ignore(localAvatar.uniqueName('moneyChange'))
self.ignore(localAvatar.uniqueName('bankMoneyChange'))
self.petModel.delete()
DirectFrame.destroy(self)
def __handlePetChange(self, nDir):
self.curPet = (self.curPet + nDir) % self.numPets
self.nameLabel.destroy()
self.petModel.delete()
self.descLabel.destroy()
self.showPet()
def showPet(self):
self.nameLabel = DirectLabel(parent=self, pos=(0, 0, 1.35), relief=None, text=self.petName[self.curPet], text_fg=Vec4(0.45, 0, 0.61, 1), text_pos=(0, 0), text_scale=0.08, text_shadow=(1, 1, 1, 1))
self.petModel = Pet.Pet(forGui=1)
self.petModel.setDNA(self.petDNA[self.curPet])
self.petModel.fitAndCenterHead(0.57, forGui=1)
self.petModel.reparentTo(self.petView)
self.petModel.setH(130)
self.petModel.enterNeutralHappy()
self.descLabel = DirectLabel(parent=self, pos=(-0.4, 0, 0.72), relief=None, scale=0.05, text=self.petDesc[self.curPet], text_align=TextNode.ALeft, text_wordwrap=TTLocalizer.PGUIwordwrap, text_scale=TTLocalizer.PGUIdescLabel)
if self.petCost[self.curPet] > base.localAvatar.getTotalMoney():
self.okButton['state'] = DGG.DISABLED
else:
self.okButton['state'] = DGG.NORMAL
def __moneyChange(self, money):
self.moneyDisplay['text'] = str(base.localAvatar.getTotalMoney())
def __init__(self, eventDict, petSeeds):
self.eventDict = eventDict
self.mainMenuDoneEvent = 'MainMenuGuiDone'
self.adoptPetDoneEvent = 'AdoptPetGuiDone'
self.returnPetDoneEvent = 'ReturnPetGuiDone'
self.petChooserDoneEvent = 'PetChooserGuiDone'
self.fishGuiDoneEvent = 'MyFishGuiDone'
self.namePickerDoneEvent = 'NamePickerGuiDone'
self.goHomeDlgDoneEvent = 'GoHomeDlgDone'
self.dialog = None
self.dialogStack = []
self.petSeeds = petSeeds
self.timer = ToontownTimer.ToontownTimer()
self.timer.reparentTo(aspect2d)
self.timer.posInTopRightCorner()
self.timer.countdown(PetConstants.PETCLERK_TIMER, self.__timerExpired)
self.doDialog(Dialog_MainMenu)
def __timerExpired(self):
messenger.send(self.eventDict['guiDone'], [True])
def destroy(self):
self.destroyDialog()
self.timer.destroy()
del self.timer
self.ignore(self.mainMenuDoneEvent)
self.ignore(self.adoptPetDoneEvent)
self.ignore(self.returnPetDoneEvent)
self.ignore(self.petChooserDoneEvent)
self.ignore(self.fishGuiDoneEvent)
self.ignore(self.namePickerDoneEvent)
self.ignore(self.goHomeDlgDoneEvent)
def destroyDialog(self):
if self.dialog != None:
self.dialog.destroy()
self.dialog = None
def popDialog(self):
self.dialogStack.pop()
self.doDialog(self.dialogStack.pop())
def doDialog(self, nDialog):
self.destroyDialog()
self.dialogStack.append(nDialog)
if nDialog == Dialog_MainMenu:
self.acceptOnce(self.mainMenuDoneEvent, self.__handleMainMenuDlg)
self.dialog = self.MainMenuDlg(self.mainMenuDoneEvent)
elif nDialog == Dialog_AdoptPet:
self.acceptOnce(self.adoptPetDoneEvent, self.__handleAdoptPetDlg)
self.dialog = self.AdoptPetDlg(self.adoptPetDoneEvent, self.petSeeds[self.adoptPetNum], self.adoptPetNameIndex)
elif nDialog == Dialog_ChoosePet:
self.acceptOnce(self.petChooserDoneEvent, self.__handleChoosePetDlg)
self.dialog = self.ChoosePetDlg(self.petChooserDoneEvent, self.petSeeds)
elif nDialog == Dialog_ReturnPet:
self.acceptOnce(self.returnPetDoneEvent, self.__handleReturnPetDlg)
self.dialog = self.ReturnPetDlg(self.returnPetDoneEvent)
elif nDialog == Dialog_SellFish:
self.acceptOnce(self.fishGuiDoneEvent, self.__handleFishSellDlg)
self.dialog = FishSellGUI.FishSellGUI(self.fishGuiDoneEvent)
elif nDialog == Dialog_NamePicker:
self.acceptOnce(self.namePickerDoneEvent, self.__handleNamePickerDlg)
self.dialog = self.NamePicker(self.namePickerDoneEvent, self.petSeeds[self.adoptPetNum], gender=self.adoptPetNum % 2)
elif nDialog == Dialog_GoHome:
self.acceptOnce(self.goHomeDlgDoneEvent, self.__handleGoHomeDlg)
self.dialog = self.GoHomeDlg(self.goHomeDlgDoneEvent)
def __handleMainMenuDlg(self, exitVal):
if exitVal == 0:
messenger.send(self.eventDict['guiDone'])
elif exitVal == 1:
self.doDialog(Dialog_SellFish)
elif exitVal == 2:
self.doDialog(Dialog_ChoosePet)
elif exitVal == 3:
self.doDialog(Dialog_ReturnPet)
def __handleFishSellDlg(self, exitVal):
if exitVal == 0:
self.popDialog()
elif exitVal == 1:
self.destroyDialog()
messenger.send(self.eventDict['fishSold'])
def __handleChoosePetDlg(self, exitVal):
if exitVal == -1:
self.popDialog()
else:
self.adoptPetNum = exitVal
self.doDialog(Dialog_NamePicker)
def __handleNamePickerDlg(self, exitVal):
if exitVal == -1:
self.popDialog()
else:
self.adoptPetNameIndex = exitVal
if base.localAvatar.hasPet():
self.doDialog(Dialog_ReturnPet)
else:
self.doDialog(Dialog_AdoptPet)
def __handleAdoptPetDlg(self, exitVal):
if exitVal == 0:
self.popDialog()
elif exitVal == 1:
self.destroyDialog()
messenger.send(self.eventDict['petAdopted'], [self.adoptPetNum, self.adoptPetNameIndex])
messenger.send(self.eventDict['guiDone'])
def __handleGoHomeDlg(self, exitVal):
if exitVal == 0:
messenger.send(self.eventDict['guiDone'])
elif exitVal == 1:
messenger.send(self.eventDict['guiDone'])
place = base.cr.playGame.getPlace()
if place == None:
self.notify.warning('Tried to go home, but place is None.')
return
place.goHomeNow(base.localAvatar.lastHood)
def __handleReturnPetDlg(self, exitVal):
if exitVal == 0:
self.popDialog()
elif exitVal == 1:
if self.dialogStack[len(self.dialogStack) - 2] == Dialog_NamePicker:
self.doDialog(Dialog_AdoptPet)
else:
self.destroyDialog()
messenger.send(self.eventDict['petReturned'])
messenger.send(self.eventDict['guiDone'])
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.text
~~~~~~~~~~~~~~~~~~~~
Lexers for non-source code file types.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from bisect import bisect
from pygments.lexer import Lexer, LexerContext, RegexLexer, ExtendedRegexLexer, \
bygroups, include, using, this, do_insertions
from pygments.token import Punctuation, Text, Comment, Keyword, Name, String, \
Generic, Operator, Number, Whitespace, Literal
from pygments.util import get_bool_opt
from pygments.lexers.other import BashLexer
__all__ = ['IniLexer', 'SourcesListLexer', 'BaseMakefileLexer',
'MakefileLexer', 'DiffLexer', 'IrcLogsLexer', 'TexLexer',
'GroffLexer', 'ApacheConfLexer', 'BBCodeLexer', 'MoinWikiLexer',
'RstLexer', 'VimLexer', 'GettextLexer', 'SquidConfLexer',
'DebianControlLexer', 'DarcsPatchLexer', 'YamlLexer',
'LighttpdConfLexer', 'NginxConfLexer', 'CMakeLexer']
class IniLexer(RegexLexer):
"""
Lexer for configuration files in INI style.
"""
name = 'INI'
aliases = ['ini', 'cfg']
filenames = ['*.ini', '*.cfg', '*.properties']
mimetypes = ['text/x-ini']
tokens = {
'root': [
(r'\s+', Text),
(r'[;#].*?$', Comment),
(r'\[.*?\]$', Keyword),
(r'(.*?)([ \t]*)(=)([ \t]*)(.*?)$',
bygroups(Name.Attribute, Text, Operator, Text, String))
]
}
def analyse_text(text):
npos = text.find('\n')
if npos < 3:
return False
return text[0] == '[' and text[npos-1] == ']'
class SourcesListLexer(RegexLexer):
"""
Lexer that highlights debian sources.list files.
*New in Pygments 0.7.*
"""
name = 'Debian Sourcelist'
aliases = ['sourceslist', 'sources.list']
filenames = ['sources.list']
mimetype = ['application/x-debian-sourceslist']
tokens = {
'root': [
(r'\s+', Text),
(r'#.*?$', Comment),
(r'^(deb(?:-src)?)(\s+)',
bygroups(Keyword, Text), 'distribution')
],
'distribution': [
(r'#.*?$', Comment, '#pop'),
(r'\$\(ARCH\)', Name.Variable),
(r'[^\s$[]+', String),
(r'\[', String.Other, 'escaped-distribution'),
(r'\$', String),
(r'\s+', Text, 'components')
],
'escaped-distribution': [
(r'\]', String.Other, '#pop'),
(r'\$\(ARCH\)', Name.Variable),
(r'[^\]$]+', String.Other),
(r'\$', String.Other)
],
'components': [
(r'#.*?$', Comment, '#pop:2'),
(r'$', Text, '#pop:2'),
(r'\s+', Text),
(r'\S+', Keyword.Pseudo),
]
}
def analyse_text(text):
for line in text.split('\n'):
line = line.strip()
if not (line.startswith('#') or line.startswith('deb ') or
line.startswith('deb-src ') or not line):
return False
return True
class MakefileLexer(Lexer):
"""
Lexer for BSD and GNU make extensions (lenient enough to handle both in
the same file even).
*Rewritten in Pygments 0.10.*
"""
name = 'Makefile'
aliases = ['make', 'makefile', 'mf', 'bsdmake']
filenames = ['*.mak', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile']
mimetypes = ['text/x-makefile']
r_special = re.compile(r'^(?:'
# BSD Make
r'\.\s*(include|undef|error|warning|if|else|elif|endif|for|endfor)|'
# GNU Make
r'\s*(ifeq|ifneq|ifdef|ifndef|else|endif|-?include|define|endef|:))(?=\s)')
r_comment = re.compile(r'^\s*@?#')
def get_tokens_unprocessed(self, text):
ins = []
lines = text.splitlines(True)
done = ''
lex = BaseMakefileLexer(**self.options)
backslashflag = False
for line in lines:
if self.r_special.match(line) or backslashflag:
ins.append((len(done), [(0, Comment.Preproc, line)]))
backslashflag = line.strip().endswith('\\')
elif self.r_comment.match(line):
ins.append((len(done), [(0, Comment, line)]))
else:
done += line
for item in do_insertions(ins, lex.get_tokens_unprocessed(done)):
yield item
class BaseMakefileLexer(RegexLexer):
"""
Lexer for simple Makefiles (no preprocessing).
*New in Pygments 0.10.*
"""
name = 'Makefile'
aliases = ['basemake']
filenames = []
mimetypes = []
tokens = {
'root': [
(r'^(?:[\t ]+.*\n|\n)+', using(BashLexer)),
(r'\$\((?:.*\\\n|.*\n)+', using(BashLexer)),
(r'\s+', Text),
(r'#.*?\n', Comment),
(r'(export)(\s+)(?=[a-zA-Z0-9_${}\t -]+\n)',
bygroups(Keyword, Text), 'export'),
(r'export\s+', Keyword),
# assignment
(r'([a-zA-Z0-9_${}.-]+)(\s*)([!?:+]?=)([ \t]*)((?:.*\\\n|.*\n)+)',
bygroups(Name.Variable, Text, Operator, Text, using(BashLexer))),
# strings
(r'(?s)"(\\\\|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\.|[^'\\])*'", String.Single),
# targets
(r'([^\n:]+)(:+)([ \t]*)', bygroups(Name.Function, Operator, Text),
'block-header'),
# TODO: add paren handling (grr)
],
'export': [
(r'[a-zA-Z0-9_${}-]+', Name.Variable),
(r'\n', Text, '#pop'),
(r'\s+', Text),
],
'block-header': [
(r'[^,\\\n#]+', Number),
(r',', Punctuation),
(r'#.*?\n', Comment),
(r'\\\n', Text), # line continuation
(r'\\.', Text),
(r'(?:[\t ]+.*\n|\n)+', using(BashLexer), '#pop'),
],
}
class DiffLexer(RegexLexer):
"""
Lexer for unified or context-style diffs or patches.
"""
name = 'Diff'
aliases = ['diff', 'udiff']
filenames = ['*.diff', '*.patch']
mimetypes = ['text/x-diff', 'text/x-patch']
tokens = {
'root': [
(r' .*\n', Text),
(r'\+.*\n', Generic.Inserted),
(r'-.*\n', Generic.Deleted),
(r'!.*\n', Generic.Strong),
(r'@.*\n', Generic.Subheading),
(r'([Ii]ndex|diff).*\n', Generic.Heading),
(r'=.*\n', Generic.Heading),
(r'.*\n', Text),
]
}
def analyse_text(text):
if text[:7] == 'Index: ':
return True
if text[:5] == 'diff ':
return True
if text[:4] == '--- ':
return 0.9
DPATCH_KEYWORDS = ['hunk', 'addfile', 'adddir', 'rmfile', 'rmdir', 'move',
'replace']
class DarcsPatchLexer(RegexLexer):
"""
DarcsPatchLexer is a lexer for the various versions of the darcs patch
format. Examples of this format are derived by commands such as
``darcs annotate --patch`` and ``darcs send``.
*New in Pygments 0.10.*
"""
name = 'Darcs Patch'
aliases = ['dpatch']
filenames = ['*.dpatch', '*.darcspatch']
tokens = {
'root': [
(r'<', Operator),
(r'>', Operator),
(r'{', Operator),
(r'}', Operator),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)(\])',
bygroups(Operator, Keyword, Name, Text, Name, Operator,
Literal.Date, Text, Operator)),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)',
bygroups(Operator, Keyword, Name, Text, Name, Operator,
Literal.Date, Text), 'comment'),
(r'New patches:', Generic.Heading),
(r'Context:', Generic.Heading),
(r'Patch bundle hash:', Generic.Heading),
(r'(\s*)(%s)(.*\n)' % '|'.join(DPATCH_KEYWORDS),
bygroups(Text, Keyword, Text)),
(r'\+', Generic.Inserted, "insert"),
(r'-', Generic.Deleted, "delete"),
(r'.*\n', Text),
],
'comment': [
(r'[^\]].*\n', Comment),
(r'\]', Operator, "#pop"),
],
'specialText': [ # darcs add [_CODE_] special operators for clarity
(r'\n', Text, "#pop"), # line-based
(r'\[_[^_]*_]', Operator),
],
'insert': [
include('specialText'),
(r'\[', Generic.Inserted),
(r'[^\n\[]*', Generic.Inserted),
],
'delete': [
include('specialText'),
(r'\[', Generic.Deleted),
(r'[^\n\[]*', Generic.Deleted),
],
}
class IrcLogsLexer(RegexLexer):
"""
Lexer for IRC logs in *irssi*, *xchat* or *weechat* style.
"""
name = 'IRC logs'
aliases = ['irc']
filenames = ['*.weechatlog']
mimetypes = ['text/x-irclog']
flags = re.VERBOSE | re.MULTILINE
timestamp = r"""
(
# irssi / xchat and others
(?: \[|\()? # Opening bracket or paren for the timestamp
(?: # Timestamp
(?: (?:\d{1,4} [-/]?)+ # Date as - or /-separated groups of digits
[T ])? # Date/time separator: T or space
(?: \d?\d [:.]?)+ # Time as :/.-separated groups of 1 or 2 digits
)
(?: \]|\))?\s+ # Closing bracket or paren for the timestamp
|
# weechat
\d{4}\s\w{3}\s\d{2}\s # Date
\d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
|
# xchat
\w{3}\s\d{2}\s # Date
\d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
)?
"""
tokens = {
'root': [
# log start/end
(r'^\*\*\*\*(.*)\*\*\*\*$', Comment),
# hack
("^" + timestamp + r'(\s*<[^>]*>\s*)$', bygroups(Comment.Preproc, Name.Tag)),
# normal msgs
("^" + timestamp + r"""
(\s*<.*?>\s*) # Nick """,
bygroups(Comment.Preproc, Name.Tag), 'msg'),
# /me msgs
("^" + timestamp + r"""
(\s*[*]\s+) # Star
([^\s]+\s+.*?\n) # Nick + rest of message """,
bygroups(Comment.Preproc, Keyword, Generic.Inserted)),
# join/part msgs
("^" + timestamp + r"""
(\s*(?:\*{3}|<?-[!@=P]?->?)\s*) # Star(s) or symbols
([^\s]+\s+) # Nick + Space
(.*?\n) # Rest of message """,
bygroups(Comment.Preproc, Keyword, String, Comment)),
(r"^.*?\n", Text),
],
'msg': [
(r"[^\s]+:(?!//)", Name.Attribute), # Prefix
(r".*\n", Text, '#pop'),
],
}
class BBCodeLexer(RegexLexer):
"""
A lexer that highlights BBCode(-like) syntax.
*New in Pygments 0.6.*
"""
name = 'BBCode'
aliases = ['bbcode']
mimetypes = ['text/x-bbcode']
tokens = {
'root': [
(r'[^[]+', Text),
# tag/end tag begin
(r'\[/?\w+', Keyword, 'tag'),
# stray bracket
(r'\[', Text),
],
'tag': [
(r'\s+', Text),
# attribute with value
(r'(\w+)(=)("?[^\s"\]]+"?)',
bygroups(Name.Attribute, Operator, String)),
# tag argument (a la [color=green])
(r'(=)("?[^\s"\]]+"?)',
bygroups(Operator, String)),
# tag end
(r'\]', Keyword, '#pop'),
],
}
class TexLexer(RegexLexer):
"""
Lexer for the TeX and LaTeX typesetting languages.
"""
name = 'TeX'
aliases = ['tex', 'latex']
filenames = ['*.tex', '*.aux', '*.toc']
mimetypes = ['text/x-tex', 'text/x-latex']
tokens = {
'general': [
(r'%.*?\n', Comment),
(r'[{}]', Name.Builtin),
(r'[&_^]', Name.Builtin),
],
'root': [
(r'\\\[', String.Backtick, 'displaymath'),
(r'\\\(', String, 'inlinemath'),
(r'\$\$', String.Backtick, 'displaymath'),
(r'\$', String, 'inlinemath'),
(r'\\([a-zA-Z]+|.)', Keyword, 'command'),
include('general'),
(r'[^\\$%&_^{}]+', Text),
],
'math': [
(r'\\([a-zA-Z]+|.)', Name.Variable),
include('general'),
(r'[0-9]+', Number),
(r'[-=!+*/()\[\]]', Operator),
(r'[^=!+*/()\[\]\\$%&_^{}0-9-]+', Name.Builtin),
],
'inlinemath': [
(r'\\\)', String, '#pop'),
(r'\$', String, '#pop'),
include('math'),
],
'displaymath': [
(r'\\\]', String, '#pop'),
(r'\$\$', String, '#pop'),
(r'\$', Name.Builtin),
include('math'),
],
'command': [
(r'\[.*?\]', Name.Attribute),
(r'\*', Keyword),
(r'', Text, '#pop'),
],
}
def analyse_text(text):
for start in ("\\documentclass", "\\input", "\\documentstyle",
"\\relax"):
if text[:len(start)] == start:
return True
class GroffLexer(RegexLexer):
"""
Lexer for the (g)roff typesetting language, supporting groff
extensions. Mainly useful for highlighting manpage sources.
*New in Pygments 0.6.*
"""
name = 'Groff'
aliases = ['groff', 'nroff', 'man']
filenames = ['*.[1234567]', '*.man']
mimetypes = ['application/x-troff', 'text/troff']
tokens = {
'root': [
(r'(?i)(\.)(\w+)', bygroups(Text, Keyword), 'request'),
(r'\.', Punctuation, 'request'),
# Regular characters, slurp till we find a backslash or newline
(r'[^\\\n]*', Text, 'textline'),
],
'textline': [
include('escapes'),
(r'[^\\\n]+', Text),
(r'\n', Text, '#pop'),
],
'escapes': [
# groff has many ways to write escapes.
(r'\\"[^\n]*', Comment),
(r'\\[fn]\w', String.Escape),
(r'\\\(..', String.Escape),
(r'\\.\[.*\]', String.Escape),
(r'\\.', String.Escape),
(r'\\\n', Text, 'request'),
],
'request': [
(r'\n', Text, '#pop'),
include('escapes'),
(r'"[^\n"]+"', String.Double),
(r'\d+', Number),
(r'\S+', String),
(r'\s+', Text),
],
}
def analyse_text(text):
if text[:1] != '.':
return False
if text[:3] == '.\\"':
return True
if text[:4] == '.TH ':
return True
if text[1:3].isalnum() and text[3].isspace():
return 0.9
class ApacheConfLexer(RegexLexer):
"""
Lexer for configuration files following the Apache config file
format.
*New in Pygments 0.6.*
"""
name = 'ApacheConf'
aliases = ['apacheconf', 'aconf', 'apache']
filenames = ['.htaccess', 'apache.conf', 'apache2.conf']
mimetypes = ['text/x-apacheconf']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'(#.*?)$', Comment),
(r'(<[^\s>]+)(?:(\s+)(.*?))?(>)',
bygroups(Name.Tag, Text, String, Name.Tag)),
(r'([a-zA-Z][a-zA-Z0-9]*)(\s+)',
bygroups(Name.Builtin, Text), 'value'),
(r'\.+', Text),
],
'value': [
(r'$', Text, '#pop'),
(r'[^\S\n]+', Text),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'\d+', Number),
(r'/([a-zA-Z0-9][a-zA-Z0-9_./-]+)', String.Other),
(r'(on|off|none|any|all|double|email|dns|min|minimal|'
r'os|productonly|full|emerg|alert|crit|error|warn|'
r'notice|info|debug|registry|script|inetd|standalone|'
r'user|group)\b', Keyword),
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
(r'[^\s"]+', Text)
]
}
class MoinWikiLexer(RegexLexer):
"""
For MoinMoin (and Trac) Wiki markup.
*New in Pygments 0.7.*
"""
name = 'MoinMoin/Trac Wiki markup'
aliases = ['trac-wiki', 'moin']
filenames = []
mimetypes = ['text/x-trac-wiki']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^#.*$', Comment),
(r'(!)(\S+)', bygroups(Keyword, Text)), # Ignore-next
# Titles
(r'^(=+)([^=]+)(=+)(\s*#.+)?$',
bygroups(Generic.Heading, using(this), Generic.Heading, String)),
# Literal code blocks, with optional shebang
(r'({{{)(\n#!.+)?', bygroups(Name.Builtin, Name.Namespace), 'codeblock'),
(r'(\'\'\'?|\|\||`|__|~~|\^|,,|::)', Comment), # Formatting
# Lists
(r'^( +)([.*-])( )', bygroups(Text, Name.Builtin, Text)),
(r'^( +)([a-zivx]{1,5}\.)( )', bygroups(Text, Name.Builtin, Text)),
# Other Formatting
(r'\[\[\w+.*?\]\]', Keyword), # Macro
(r'(\[[^\s\]]+)(\s+[^\]]+?)?(\])',
bygroups(Keyword, String, Keyword)), # Link
(r'^----+$', Keyword), # Horizontal rules
(r'[^\n\'\[{!_~^,|]+', Text),
(r'\n', Text),
(r'.', Text),
],
'codeblock': [
(r'}}}', Name.Builtin, '#pop'),
# these blocks are allowed to be nested in Trac, but not MoinMoin
(r'{{{', Text, '#push'),
(r'[^{}]+', Comment.Preproc), # slurp boring text
(r'.', Comment.Preproc), # allow loose { or }
],
}
class RstLexer(RegexLexer):
"""
For `reStructuredText <http://docutils.sf.net/rst.html>`_ markup.
*New in Pygments 0.7.*
Additional options accepted:
`handlecodeblocks`
Highlight the contents of ``.. sourcecode:: langauge`` and
``.. code:: language`` directives with a lexer for the given
language (default: ``True``). *New in Pygments 0.8.*
"""
name = 'reStructuredText'
aliases = ['rst', 'rest', 'restructuredtext']
filenames = ['*.rst', '*.rest']
mimetypes = ["text/x-rst", "text/prs.fallenstein.rst"]
flags = re.MULTILINE
def _handle_sourcecode(self, match):
from pygments.lexers import get_lexer_by_name
from pygments.util import ClassNotFound
# section header
yield match.start(1), Punctuation, match.group(1)
yield match.start(2), Text, match.group(2)
yield match.start(3), Operator.Word, match.group(3)
yield match.start(4), Punctuation, match.group(4)
yield match.start(5), Text, match.group(5)
yield match.start(6), Keyword, match.group(6)
yield match.start(7), Text, match.group(7)
# lookup lexer if wanted and existing
lexer = None
if self.handlecodeblocks:
try:
lexer = get_lexer_by_name(match.group(6).strip())
except ClassNotFound:
pass
indention = match.group(8)
indention_size = len(indention)
code = (indention + match.group(9) + match.group(10) + match.group(11))
# no lexer for this language. handle it like it was a code block
if lexer is None:
yield match.start(8), String, code
return
# highlight the lines with the lexer.
ins = []
codelines = code.splitlines(True)
code = ''
for line in codelines:
if len(line) > indention_size:
ins.append((len(code), [(0, Text, line[:indention_size])]))
code += line[indention_size:]
else:
code += line
for item in do_insertions(ins, lexer.get_tokens_unprocessed(code)):
yield item
tokens = {
'root': [
# Heading with overline
(r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)'
r'(.+)(\n)(\1)(\n)',
bygroups(Generic.Heading, Text, Generic.Heading,
Text, Generic.Heading, Text)),
# Plain heading
(r'^(\S.*)(\n)(={3,}|-{3,}|`{3,}|:{3,}|\.{3,}|\'{3,}|"{3,}|'
r'~{3,}|\^{3,}|_{3,}|\*{3,}|\+{3,}|#{3,})(\n)',
bygroups(Generic.Heading, Text, Generic.Heading, Text)),
# Bulleted lists
(r'^(\s*)([-*+])( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered lists
(r'^(\s*)([0-9#ivxlcmIVXLCM]+\.)( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[0-9#ivxlcmIVXLCM]+\))( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered, but keep words at BOL from becoming lists
(r'^(\s*)([A-Z]+\.)( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[A-Za-z]+\))( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
# Line blocks
(r'^(\s*)(\|)( .+\n(?:\| .+\n)*)',
bygroups(Text, Operator, using(this, state='inline'))),
# Sourcecode directives
(r'^( *\.\.)(\s*)((?:source)?code)(::)([ \t]*)([^\n]+)'
r'(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\8.*|)\n)+)',
_handle_sourcecode),
# A directive
(r'^( *\.\.)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
bygroups(Punctuation, Text, Operator.Word, Punctuation, Text,
using(this, state='inline'))),
# A reference target
(r'^( *\.\.)(\s*)([\w\t ]+:)(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A footnote target
(r'^( *\.\.)(\s*)(\[.+\])(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A substitution def
(r'^( *\.\.)(\s*)(\|.+\|)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
bygroups(Punctuation, Text, Name.Tag, Text, Operator.Word,
Punctuation, Text, using(this, state='inline'))),
# Comments
(r'^ *\.\..*(\n( +.*\n|\n)+)?', Comment.Preproc),
# Field list
(r'^( *)(:[a-zA-Z-]+:)(\s*)$', bygroups(Text, Name.Class, Text)),
(r'^( *)(:.*?:)([ \t]+)(.*?)$',
bygroups(Text, Name.Class, Text, Name.Function)),
# Definition list
(r'^([^ ].*(?<!::)\n)((?:(?: +.*)\n)+)',
bygroups(using(this, state='inline'), using(this, state='inline'))),
# Code blocks
(r'(::)(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\3.*|)\n)+)',
bygroups(String.Escape, Text, String, String, Text, String)),
include('inline'),
],
'inline': [
(r'\\.', Text), # escape
(r'``', String, 'literal'), # code
(r'(`.+?)(<.+?>)(`__?)', # reference with inline target
bygroups(String, String.Interpol, String)),
(r'`.+?`__?', String), # reference
(r'(`.+?`)(:[a-zA-Z0-9:-]+?:)?',
bygroups(Name.Variable, Name.Attribute)), # role
(r'(:[a-zA-Z0-9:-]+?:)(`.+?`)',
bygroups(Name.Attribute, Name.Variable)), # role (content first)
(r'\*\*.+?\*\*', Generic.Strong), # Strong emphasis
(r'\*.+?\*', Generic.Emph), # Emphasis
(r'\[.*?\]_', String), # Footnote or citation
(r'<.+?>', Name.Tag), # Hyperlink
(r'[^\\\n\[*`:]+', Text),
(r'.', Text),
],
'literal': [
(r'[^`\\]+', String),
(r'\\.', String),
(r'``', String, '#pop'),
(r'[`\\]', String),
]
}
def __init__(self, **options):
self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
RegexLexer.__init__(self, **options)
def analyse_text(text):
if text[:2] == '..' and text[2:3] != '.':
return 0.3
p1 = text.find("\n")
p2 = text.find("\n", p1 + 1)
if (p2 > -1 and # has two lines
p1 * 2 + 1 == p2 and # they are the same length
text[p1+1] in '-=' and # the next line both starts and ends with
text[p1+1] == text[p2-1]): # ...a sufficiently high header
return 0.5
class VimLexer(RegexLexer):
"""
Lexer for VimL script files.
*New in Pygments 0.8.*
"""
name = 'VimL'
aliases = ['vim']
filenames = ['*.vim', '.vimrc']
mimetypes = ['text/x-vim']
flags = re.MULTILINE
tokens = {
'root': [
# Who decided that doublequote was a good comment character??
(r'^\s*".*', Comment),
(r'(?<=\s)"[^\-:.%#=*].*', Comment),
(r'[ \t]+', Text),
# TODO: regexes can have other delims
(r'/(\\\\|\\/|[^\n/])*/', String.Regex),
(r'"(\\\\|\\"|[^\n"])*"', String.Double),
(r"'(\\\\|\\'|[^\n'])*'", String.Single),
(r'-?\d+', Number),
(r'#[0-9a-f]{6}', Number.Hex),
(r'^:', Punctuation),
(r'[()<>+=!|,~-]', Punctuation), # Inexact list. Looks decent.
(r'\b(let|if|else|endif|elseif|fun|function|endfunction)\b',
Keyword),
(r'\b(NONE|bold|italic|underline|dark|light)\b', Name.Builtin),
(r'\b\w+\b', Name.Other), # These are postprocessed below
(r'.', Text),
],
}
def __init__(self, **options):
from pygments.lexers._vimbuiltins import command, option, auto
self._cmd = command
self._opt = option
self._aut = auto
RegexLexer.__init__(self, **options)
def is_in(self, w, mapping):
r"""
It's kind of difficult to decide if something might be a keyword
in VimL because it allows you to abbreviate them. In fact,
'ab[breviate]' is a good example. :ab, :abbre, or :abbreviate are
valid ways to call it so rather than making really awful regexps
like::
\bab(?:b(?:r(?:e(?:v(?:i(?:a(?:t(?:e)?)?)?)?)?)?)?)?\b
we match `\b\w+\b` and then call is_in() on those tokens. See
`scripts/get_vimkw.py` for how the lists are extracted.
"""
p = bisect(mapping, (w,))
if p > 0:
if mapping[p-1][0] == w[:len(mapping[p-1][0])] and \
mapping[p-1][1][:len(w)] == w: return True
if p < len(mapping):
return mapping[p][0] == w[:len(mapping[p][0])] and \
mapping[p][1][:len(w)] == w
return False
def get_tokens_unprocessed(self, text):
# TODO: builtins are only subsequent tokens on lines
# and 'keywords' only happen at the beginning except
# for :au ones
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name.Other:
if self.is_in(value, self._cmd):
yield index, Keyword, value
elif self.is_in(value, self._opt) or \
self.is_in(value, self._aut):
yield index, Name.Builtin, value
else:
yield index, Text, value
else:
yield index, token, value
class GettextLexer(RegexLexer):
"""
Lexer for Gettext catalog files.
*New in Pygments 0.9.*
"""
name = 'Gettext Catalog'
aliases = ['pot', 'po']
filenames = ['*.pot', '*.po']
mimetypes = ['application/x-gettext', 'text/x-gettext', 'text/gettext']
tokens = {
'root': [
(r'^#,\s.*?$', Keyword.Type),
(r'^#:\s.*?$', Keyword.Declaration),
#(r'^#$', Comment),
(r'^(#|#\.\s|#\|\s|#~\s|#\s).*$', Comment.Single),
(r'^(")([\w-]*:)(.*")$',
bygroups(String, Name.Property, String)),
(r'^".*"$', String),
(r'^(msgid|msgid_plural|msgstr)(\s+)(".*")$',
bygroups(Name.Variable, Text, String)),
(r'^(msgstr\[)(\d)(\])(\s+)(".*")$',
bygroups(Name.Variable, Number.Integer, Name.Variable, Text, String)),
]
}
class SquidConfLexer(RegexLexer):
"""
Lexer for `squid <http://www.squid-cache.org/>`_ configuration files.
*New in Pygments 0.9.*
"""
name = 'SquidConf'
aliases = ['squidconf', 'squid.conf', 'squid']
filenames = ['squid.conf']
mimetypes = ['text/x-squidconf']
flags = re.IGNORECASE
keywords = [ "acl", "always_direct", "announce_host",
"announce_period", "announce_port", "announce_to",
"anonymize_headers", "append_domain", "as_whois_server",
"auth_param_basic", "authenticate_children",
"authenticate_program", "authenticate_ttl", "broken_posts",
"buffered_logs", "cache_access_log", "cache_announce",
"cache_dir", "cache_dns_program", "cache_effective_group",
"cache_effective_user", "cache_host", "cache_host_acl",
"cache_host_domain", "cache_log", "cache_mem",
"cache_mem_high", "cache_mem_low", "cache_mgr",
"cachemgr_passwd", "cache_peer", "cache_peer_access",
"cahce_replacement_policy", "cache_stoplist",
"cache_stoplist_pattern", "cache_store_log", "cache_swap",
"cache_swap_high", "cache_swap_log", "cache_swap_low",
"client_db", "client_lifetime", "client_netmask",
"connect_timeout", "coredump_dir", "dead_peer_timeout",
"debug_options", "delay_access", "delay_class",
"delay_initial_bucket_level", "delay_parameters",
"delay_pools", "deny_info", "dns_children", "dns_defnames",
"dns_nameservers", "dns_testnames", "emulate_httpd_log",
"err_html_text", "fake_user_agent", "firewall_ip",
"forwarded_for", "forward_snmpd_port", "fqdncache_size",
"ftpget_options", "ftpget_program", "ftp_list_width",
"ftp_passive", "ftp_user", "half_closed_clients",
"header_access", "header_replace", "hierarchy_stoplist",
"high_response_time_warning", "high_page_fault_warning",
"htcp_port", "http_access", "http_anonymizer", "httpd_accel",
"httpd_accel_host", "httpd_accel_port",
"httpd_accel_uses_host_header", "httpd_accel_with_proxy",
"http_port", "http_reply_access", "icp_access",
"icp_hit_stale", "icp_port", "icp_query_timeout",
"ident_lookup", "ident_lookup_access", "ident_timeout",
"incoming_http_average", "incoming_icp_average",
"inside_firewall", "ipcache_high", "ipcache_low",
"ipcache_size", "local_domain", "local_ip", "logfile_rotate",
"log_fqdn", "log_icp_queries", "log_mime_hdrs",
"maximum_object_size", "maximum_single_addr_tries",
"mcast_groups", "mcast_icp_query_timeout", "mcast_miss_addr",
"mcast_miss_encode_key", "mcast_miss_port", "memory_pools",
"memory_pools_limit", "memory_replacement_policy",
"mime_table", "min_http_poll_cnt", "min_icp_poll_cnt",
"minimum_direct_hops", "minimum_object_size",
"minimum_retry_timeout", "miss_access", "negative_dns_ttl",
"negative_ttl", "neighbor_timeout", "neighbor_type_domain",
"netdb_high", "netdb_low", "netdb_ping_period",
"netdb_ping_rate", "never_direct", "no_cache",
"passthrough_proxy", "pconn_timeout", "pid_filename",
"pinger_program", "positive_dns_ttl", "prefer_direct",
"proxy_auth", "proxy_auth_realm", "query_icmp", "quick_abort",
"quick_abort", "quick_abort_max", "quick_abort_min",
"quick_abort_pct", "range_offset_limit", "read_timeout",
"redirect_children", "redirect_program",
"redirect_rewrites_host_header", "reference_age",
"reference_age", "refresh_pattern", "reload_into_ims",
"request_body_max_size", "request_size", "request_timeout",
"shutdown_lifetime", "single_parent_bypass",
"siteselect_timeout", "snmp_access", "snmp_incoming_address",
"snmp_port", "source_ping", "ssl_proxy",
"store_avg_object_size", "store_objects_per_bucket",
"strip_query_terms", "swap_level1_dirs", "swap_level2_dirs",
"tcp_incoming_address", "tcp_outgoing_address",
"tcp_recv_bufsize", "test_reachability", "udp_hit_obj",
"udp_hit_obj_size", "udp_incoming_address",
"udp_outgoing_address", "unique_hostname", "unlinkd_program",
"uri_whitespace", "useragent_log", "visible_hostname",
"wais_relay", "wais_relay_host", "wais_relay_port",
]
opts = [ "proxy-only", "weight", "ttl", "no-query", "default",
"round-robin", "multicast-responder", "on", "off", "all",
"deny", "allow", "via", "parent", "no-digest", "heap", "lru",
"realm", "children", "credentialsttl", "none", "disable",
"offline_toggle", "diskd", "q1", "q2",
]
actions = [ "shutdown", "info", "parameter", "server_list",
"client_list", r'squid\.conf',
]
actions_stats = [ "objects", "vm_objects", "utilization",
"ipcache", "fqdncache", "dns", "redirector", "io",
"reply_headers", "filedescriptors", "netdb",
]
actions_log = [ "status", "enable", "disable", "clear"]
acls = [ "url_regex", "urlpath_regex", "referer_regex", "port",
"proto", "req_mime_type", "rep_mime_type", "method",
"browser", "user", "src", "dst", "time", "dstdomain", "ident",
"snmp_community",
]
ip_re = r'\b(?:\d{1,3}\.){3}\d{1,3}\b'
def makelistre(list):
return r'\b(?:'+'|'.join(list)+r')\b'
tokens = {
'root': [
(r'\s+', Text),
(r'#', Comment, 'comment'),
(makelistre(keywords), Keyword),
(makelistre(opts), Name.Constant),
# Actions
(makelistre(actions), String),
(r'stats/'+makelistre(actions), String),
(r'log/'+makelistre(actions)+r'=', String),
(makelistre(acls), Keyword),
(ip_re+r'(?:/(?:'+ip_re+r')|\d+)?', Number),
(r'\b\d+\b', Number),
(r'\S+', Text),
],
'comment': [
(r'\s*TAG:.*', String.Escape, '#pop'),
(r'.*', Comment, '#pop'),
],
}
class DebianControlLexer(RegexLexer):
"""
Lexer for Debian ``control`` files and ``apt-cache show <pkg>`` outputs.
*New in Pygments 0.9.*
"""
name = 'Debian Control file'
aliases = ['control']
filenames = ['control']
tokens = {
'root': [
(r'^(Description)', Keyword, 'description'),
(r'^(Maintainer)(:\s*)', bygroups(Keyword, Text), 'maintainer'),
(r'^((Build-)?Depends)', Keyword, 'depends'),
(r'^((?:Python-)?Version)(:\s*)([^\s]+)$',
bygroups(Keyword, Text, Number)),
(r'^((?:Installed-)?Size)(:\s*)([^\s]+)$',
bygroups(Keyword, Text, Number)),
(r'^(MD5Sum|SHA1|SHA256)(:\s*)([^\s]+)$',
bygroups(Keyword, Text, Number)),
(r'^([a-zA-Z\-0-9\.]*?)(:\s*)(.*?)$',
bygroups(Keyword, Whitespace, String)),
],
'maintainer': [
(r'<[^>]+>', Generic.Strong),
(r'<[^>]+>$', Generic.Strong, '#pop'),
(r',\n?', Text),
(r'.', Text),
],
'description': [
(r'(.*)(Homepage)(: )([^\s]+)', bygroups(Text, String, Name, Name.Class)),
(r':.*\n', Generic.Strong),
(r' .*\n', Text),
('', Text, '#pop'),
],
'depends': [
(r':\s*', Text),
(r'(\$)(\{)(\w+\s*:\s*\w+)', bygroups(Operator, Text, Name.Entity)),
(r'\(', Text, 'depend_vers'),
(r',', Text),
(r'\|', Operator),
(r'[\s]+', Text),
(r'[}\)]\s*$', Text, '#pop'),
(r'[}]', Text),
(r'[^,]$', Name.Function, '#pop'),
(r'([\+\.a-zA-Z0-9-][\s\n]*)', Name.Function),
(r'\[.*?\]', Name.Entity),
],
'depend_vers': [
(r'\),', Text, '#pop'),
(r'\)[^,]', Text, '#pop:2'),
(r'([><=]+)(\s*)([^\)]+)', bygroups(Operator, Text, Number))
]
}
class YamlLexerContext(LexerContext):
"""Indentation context for the YAML lexer."""
def __init__(self, *args, **kwds):
super(YamlLexerContext, self).__init__(*args, **kwds)
self.indent_stack = []
self.indent = -1
self.next_indent = 0
self.block_scalar_indent = None
class YamlLexer(ExtendedRegexLexer):
"""
Lexer for `YAML <http://yaml.org/>`_, a human-friendly data serialization
language.
*New in Pygments 0.11.*
"""
name = 'YAML'
aliases = ['yaml']
filenames = ['*.yaml', '*.yml']
mimetypes = ['text/x-yaml']
def something(token_class):
"""Do not produce empty tokens."""
def callback(lexer, match, context):
text = match.group()
if not text:
return
yield match.start(), token_class, text
context.pos = match.end()
return callback
def reset_indent(token_class):
"""Reset the indentation levels."""
def callback(lexer, match, context):
text = match.group()
context.indent_stack = []
context.indent = -1
context.next_indent = 0
context.block_scalar_indent = None
yield match.start(), token_class, text
context.pos = match.end()
return callback
def save_indent(token_class, start=False):
"""Save a possible indentation level."""
def callback(lexer, match, context):
text = match.group()
extra = ''
if start:
context.next_indent = len(text)
if context.next_indent < context.indent:
while context.next_indent < context.indent:
context.indent = context.indent_stack.pop()
if context.next_indent > context.indent:
extra = text[context.indent:]
text = text[:context.indent]
else:
context.next_indent += len(text)
if text:
yield match.start(), token_class, text
if extra:
yield match.start()+len(text), token_class.Error, extra
context.pos = match.end()
return callback
def set_indent(token_class, implicit=False):
"""Set the previously saved indentation level."""
def callback(lexer, match, context):
text = match.group()
if context.indent < context.next_indent:
context.indent_stack.append(context.indent)
context.indent = context.next_indent
if not implicit:
context.next_indent += len(text)
yield match.start(), token_class, text
context.pos = match.end()
return callback
def set_block_scalar_indent(token_class):
"""Set an explicit indentation level for a block scalar."""
def callback(lexer, match, context):
text = match.group()
context.block_scalar_indent = None
if not text:
return
increment = match.group(1)
if increment:
current_indent = max(context.indent, 0)
increment = int(increment)
context.block_scalar_indent = current_indent + increment
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_block_scalar_empty_line(indent_token_class, content_token_class):
"""Process an empty line in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if (context.block_scalar_indent is None or
len(text) <= context.block_scalar_indent):
if text:
yield match.start(), indent_token_class, text
else:
indentation = text[:context.block_scalar_indent]
content = text[context.block_scalar_indent:]
yield match.start(), indent_token_class, indentation
yield (match.start()+context.block_scalar_indent,
content_token_class, content)
context.pos = match.end()
return callback
def parse_block_scalar_indent(token_class):
"""Process indentation spaces in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if context.block_scalar_indent is None:
if len(text) <= max(context.indent, 0):
context.stack.pop()
context.stack.pop()
return
context.block_scalar_indent = len(text)
else:
if len(text) < context.block_scalar_indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_plain_scalar_indent(token_class):
"""Process indentation spaces in a plain scalar."""
def callback(lexer, match, context):
text = match.group()
if len(text) <= context.indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
tokens = {
# the root rules
'root': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# the '%YAML' directive
(r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'),
# the %TAG directive
(r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'),
# document start and document end indicators
(r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace),
'block-line'),
# indentation spaces
(r'[ ]*(?![ \t\n\r\f\v]|$)', save_indent(Text, start=True),
('block-line', 'indentation')),
],
# trailing whitespaces after directives or a block scalar indicator
'ignored-line': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# line break
(r'\n', Text, '#pop:2'),
],
# the %YAML directive
'yaml-directive': [
# the version number
(r'([ ]+)([0-9]+\.[0-9]+)',
bygroups(Text, Number), 'ignored-line'),
],
# the %YAG directive
'tag-directive': [
# a tag handle and the corresponding prefix
(r'([ ]+)(!|![0-9A-Za-z_-]*!)'
r'([ ]+)(!|!?[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)',
bygroups(Text, Keyword.Type, Text, Keyword.Type),
'ignored-line'),
],
# block scalar indicators and indentation spaces
'indentation': [
# trailing whitespaces are ignored
(r'[ ]*$', something(Text), '#pop:2'),
# whitespaces preceeding block collection indicators
(r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text)),
# block collection indicators
(r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)),
# the beginning a block line
(r'[ ]*', save_indent(Text), '#pop'),
],
# an indented line in the block context
'block-line': [
# the line end
(r'[ ]*(?=#|$)', something(Text), '#pop'),
# whitespaces separating tokens
(r'[ ]+', Text),
# tags, anchors and aliases,
include('descriptors'),
# block collections and scalars
include('block-nodes'),
# flow collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`-]|[?:-][^ \t\n\r\f\v])',
something(Name.Variable),
'plain-scalar-in-block-context'),
],
# tags, anchors, aliases
'descriptors' : [
# a full-form tag
(r'!<[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+>', Keyword.Type),
# a tag in the form '!', '!suffix' or '!handle!suffix'
(r'!(?:[0-9A-Za-z_-]+)?'
r'(?:![0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)?', Keyword.Type),
# an anchor
(r'&[0-9A-Za-z_-]+', Name.Label),
# an alias
(r'\*[0-9A-Za-z_-]+', Name.Variable),
],
# block collections and scalars
'block-nodes': [
# implicit key
(r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)),
# literal and folded scalars
(r'[|>]', Punctuation.Indicator,
('block-scalar-content', 'block-scalar-header')),
],
# flow collections and quoted scalars
'flow-nodes': [
# a flow sequence
(r'\[', Punctuation.Indicator, 'flow-sequence'),
# a flow mapping
(r'\{', Punctuation.Indicator, 'flow-mapping'),
# a single-quoted scalar
(r'\'', String, 'single-quoted-scalar'),
# a double-quoted scalar
(r'\"', String, 'double-quoted-scalar'),
],
# the content of a flow collection
'flow-collection': [
# whitespaces
(r'[ ]+', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# simple indicators
(r'[?:,]', Punctuation.Indicator),
# tags, anchors and aliases
include('descriptors'),
# nested collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`])',
something(Name.Variable),
'plain-scalar-in-flow-context'),
],
# a flow sequence indicated by '[' and ']'
'flow-sequence': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\]', Punctuation.Indicator, '#pop'),
],
# a flow mapping indicated by '{' and '}'
'flow-mapping': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\}', Punctuation.Indicator, '#pop'),
],
# block scalar lines
'block-scalar-content': [
# line break
(r'\n', Text),
# empty line
(r'^[ ]+$',
parse_block_scalar_empty_line(Text, Name.Constant)),
# indentation spaces (we may leave the state here)
(r'^[ ]*', parse_block_scalar_indent(Text)),
# line content
(r'[^\n\r\f\v]+', Name.Constant),
],
# the content of a literal or folded scalar
'block-scalar-header': [
# indentation indicator followed by chomping flag
(r'([1-9])?[+-]?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
# chomping flag followed by indentation indicator
(r'[+-]?([1-9])?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
],
# ignored and regular whitespaces in quoted scalars
'quoted-scalar-whitespaces': [
# leading and trailing whitespaces are ignored
(r'^[ ]+|[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
],
# single-quoted scalars
'single-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of the quote character
(r'\'\'', String.Escape),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v\']+', String),
# the closing quote
(r'\'', String, '#pop'),
],
# double-quoted scalars
'double-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of special characters
(r'\\[0abt\tn\nvfre "\\N_LP]', String),
# escape codes
(r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',
String.Escape),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v\"\\]+', String),
# the closing quote
(r'"', String, '#pop'),
],
# the beginning of a new line while scanning a plain scalar
'plain-scalar-in-block-context-new-line': [
# empty lines
(r'^[ ]+$', Text),
# line breaks
(r'\n+', Text),
# document start and document end indicators
(r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'),
# indentation spaces (we may leave the block line state here)
(r'^[ ]*', parse_plain_scalar_indent(Text), '#pop'),
],
# a plain scalar in the block context
'plain-scalar-in-block-context': [
# the scalar ends with the ':' indicator
(r'[ ]*(?=:[ ]|:$)', something(Text), '#pop'),
# the scalar ends with whitespaces followed by a comment
(r'[ ]+(?=#)', Text, '#pop'),
# trailing whitespaces are ignored
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text, 'plain-scalar-in-block-context-new-line'),
# other whitespaces are a part of the value
(r'[ ]+', Literal.Scalar.Plain),
# regular non-whitespace characters
(r'(?::(?![ \t\n\r\f\v])|[^ \t\n\r\f\v:])+', Literal.Scalar.Plain),
],
# a plain scalar is the flow context
'plain-scalar-in-flow-context': [
# the scalar ends with an indicator character
(r'[ ]*(?=[,:?\[\]{}])', something(Text), '#pop'),
# the scalar ends with a comment
(r'[ ]+(?=#)', Text, '#pop'),
# leading and trailing whitespaces are ignored
(r'^[ ]+|[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v,:?\[\]{}]+', Name.Variable),
],
}
def get_tokens_unprocessed(self, text=None, context=None):
if context is None:
context = YamlLexerContext(text, 0)
return super(YamlLexer, self).get_tokens_unprocessed(text, context)
class LighttpdConfLexer(RegexLexer):
"""
Lexer for `Lighttpd <http://lighttpd.net/>`_ configuration files.
*New in Pygments 0.11.*
"""
name = 'Lighttpd configuration file'
aliases = ['lighty', 'lighttpd']
filenames = []
mimetypes = ['text/x-lighttpd-conf']
tokens = {
'root': [
(r'#.*\n', Comment.Single),
(r'/\S*', Name), # pathname
(r'[a-zA-Z._-]+', Keyword),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'[0-9]+', Number),
(r'=>|=~|\+=|==|=|\+', Operator),
(r'\$[A-Z]+', Name.Builtin),
(r'[(){}\[\],]', Punctuation),
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
(r'\s+', Text),
],
}
class NginxConfLexer(RegexLexer):
"""
Lexer for `Nginx <http://nginx.net/>`_ configuration files.
*New in Pygments 0.11.*
"""
name = 'Nginx configuration file'
aliases = ['nginx']
filenames = []
mimetypes = ['text/x-nginx-conf']
tokens = {
'root': [
(r'(include)(\s+)([^\s;]+)', bygroups(Keyword, Text, Name)),
(r'[^\s;#]+', Keyword, 'stmt'),
include('base'),
],
'block': [
(r'}', Punctuation, '#pop:2'),
(r'[^\s;#]+', Keyword.Namespace, 'stmt'),
include('base'),
],
'stmt': [
(r'{', Punctuation, 'block'),
(r';', Punctuation, '#pop'),
include('base'),
],
'base': [
(r'#.*\n', Comment.Single),
(r'on|off', Name.Constant),
(r'\$[^\s;#()]+', Name.Variable),
(r'([a-z0-9.-]+)(:)([0-9]+)',
bygroups(Name, Punctuation, Number.Integer)),
(r'[a-z-]+/[a-z-+]+', String), # mimetype
#(r'[a-zA-Z._-]+', Keyword),
(r'[0-9]+[km]?\b', Number.Integer),
(r'(~)(\s*)([^\s{]+)', bygroups(Punctuation, Text, String.Regex)),
(r'[:=~]', Punctuation),
(r'[^\s;#{}$]+', String), # catch all
(r'/[^\s;#]*', Name), # pathname
(r'\s+', Text),
(r'[$;]', Text), # leftover characters
],
}
class CMakeLexer(RegexLexer):
"""
Lexer for `CMake <http://cmake.org/Wiki/CMake>`_ files.
*New in Pygments 1.2.*
"""
name = 'CMake'
aliases = ['cmake']
filenames = ['*.cmake']
mimetypes = ['text/x-cmake']
tokens = {
'root': [
#(r'(ADD_CUSTOM_COMMAND|ADD_CUSTOM_TARGET|ADD_DEFINITIONS|'
# r'ADD_DEPENDENCIES|ADD_EXECUTABLE|ADD_LIBRARY|ADD_SUBDIRECTORY|'
# r'ADD_TEST|AUX_SOURCE_DIRECTORY|BUILD_COMMAND|BUILD_NAME|'
# r'CMAKE_MINIMUM_REQUIRED|CONFIGURE_FILE|CREATE_TEST_SOURCELIST|'
# r'ELSE|ELSEIF|ENABLE_LANGUAGE|ENABLE_TESTING|ENDFOREACH|'
# r'ENDFUNCTION|ENDIF|ENDMACRO|ENDWHILE|EXEC_PROGRAM|'
# r'EXECUTE_PROCESS|EXPORT_LIBRARY_DEPENDENCIES|FILE|FIND_FILE|'
# r'FIND_LIBRARY|FIND_PACKAGE|FIND_PATH|FIND_PROGRAM|FLTK_WRAP_UI|'
# r'FOREACH|FUNCTION|GET_CMAKE_PROPERTY|GET_DIRECTORY_PROPERTY|'
# r'GET_FILENAME_COMPONENT|GET_SOURCE_FILE_PROPERTY|'
# r'GET_TARGET_PROPERTY|GET_TEST_PROPERTY|IF|INCLUDE|'
# r'INCLUDE_DIRECTORIES|INCLUDE_EXTERNAL_MSPROJECT|'
# r'INCLUDE_REGULAR_EXPRESSION|INSTALL|INSTALL_FILES|'
# r'INSTALL_PROGRAMS|INSTALL_TARGETS|LINK_DIRECTORIES|'
# r'LINK_LIBRARIES|LIST|LOAD_CACHE|LOAD_COMMAND|MACRO|'
# r'MAKE_DIRECTORY|MARK_AS_ADVANCED|MATH|MESSAGE|OPTION|'
# r'OUTPUT_REQUIRED_FILES|PROJECT|QT_WRAP_CPP|QT_WRAP_UI|REMOVE|'
# r'REMOVE_DEFINITIONS|SEPARATE_ARGUMENTS|SET|'
# r'SET_DIRECTORY_PROPERTIES|SET_SOURCE_FILES_PROPERTIES|'
# r'SET_TARGET_PROPERTIES|SET_TESTS_PROPERTIES|SITE_NAME|'
# r'SOURCE_GROUP|STRING|SUBDIR_DEPENDS|SUBDIRS|'
# r'TARGET_LINK_LIBRARIES|TRY_COMPILE|TRY_RUN|UNSET|'
# r'USE_MANGLED_MESA|UTILITY_SOURCE|VARIABLE_REQUIRES|'
# r'VTK_MAKE_INSTANTIATOR|VTK_WRAP_JAVA|VTK_WRAP_PYTHON|'
# r'VTK_WRAP_TCL|WHILE|WRITE_FILE|'
# r'COUNTARGS)\b', Name.Builtin, 'args'),
(r'\b([A-Za-z_]+)([ \t]*)(\()', bygroups(Name.Builtin, Text,
Punctuation), 'args'),
include('keywords'),
include('ws')
],
'args': [
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop'),
(r'(\${)(.+?)(})', bygroups(Operator, Name.Variable, Operator)),
(r'(?s)".*?"', String.Double),
(r'\\\S+', String),
(r'[^\)$"# \t\n]+', String),
(r'\n', Text), # explicitly legal
include('keywords'),
include('ws')
],
'string': [
],
'keywords': [
(r'\b(WIN32|UNIX|APPLE|CYGWIN|BORLAND|MINGW|MSVC|MSVC_IDE|MSVC60|'
r'MSVC70|MSVC71|MSVC80|MSVC90)\b', Keyword),
],
'ws': [
(r'[ \t]+', Text),
(r'#.+\n', Comment),
]
}
|
|
import os
import shutil
import subprocess
import tempfile
import traceback
import zipfile
import json
import resource
from celery import task
from django.conf import settings
from django.utils.timezone import now
import apptools.addr2lines
from ide.utils.sdk import generate_wscript_file, generate_jshint_file, generate_manifest_dict, \
generate_simplyjs_manifest_dict, generate_pebblejs_manifest_dict
from utils.keen_helper import send_keen_event
from ide.models.build import BuildResult, BuildSize
from ide.models.files import SourceFile, ResourceFile, ResourceVariant
from ide.utils.prepreprocessor import process_file as check_preprocessor_directives
__author__ = 'katharine'
def _set_resource_limits():
resource.setrlimit(resource.RLIMIT_CPU, (20, 20)) # 20 seconds of CPU time
resource.setrlimit(resource.RLIMIT_NOFILE, (100, 100)) # 100 open files
resource.setrlimit(resource.RLIMIT_RSS, (20 * 1024 * 1024, 20 * 1024 * 1024)) # 20 MB of memory
resource.setrlimit(resource.RLIMIT_FSIZE, (5 * 1024 * 1024, 5 * 1024 * 1024)) # 5 MB output files.
def create_source_files(project, base_dir):
"""
:param project: Project
"""
source_files = project.source_files.all()
src_dir = os.path.join(base_dir, 'src')
if project.project_type == 'pebblejs':
src_dir = os.path.join(src_dir, 'js')
worker_dir = None
try:
os.mkdir(src_dir)
except OSError as e:
if e.errno == 17: # file exists
pass
else:
raise
for f in source_files:
target_dir = src_dir
if f.target == 'worker' and project.project_type == 'native':
if worker_dir is None:
worker_dir = os.path.join(base_dir, 'worker_src')
os.mkdir(worker_dir)
target_dir = worker_dir
abs_target = os.path.abspath(os.path.join(target_dir, f.file_name))
if not abs_target.startswith(target_dir):
raise Exception("Suspicious filename: %s" % f.file_name)
abs_target_dir = os.path.dirname(abs_target)
if not os.path.exists(abs_target_dir):
os.makedirs(abs_target_dir)
f.copy_to_path(abs_target)
# Make sure we don't duplicate downloading effort; just open the one we created.
with open(abs_target) as fh:
check_preprocessor_directives(abs_target_dir, abs_target, fh.read())
def save_debug_info(base_dir, build_result, kind, platform, elf_file):
path = os.path.join(base_dir, 'build', elf_file)
if os.path.exists(path):
try:
debug_info = apptools.addr2lines.create_coalesced_group(path)
except:
print traceback.format_exc()
else:
build_result.save_debug_info(debug_info, platform, kind)
def store_size_info(build_result, platform, zip):
platform_dir = platform + '/'
if platform == 'aplite':
platform_dir = ''
try:
build_size = BuildSize.objects.create(
build=build_result,
binary_size=zip.getinfo(platform_dir + 'pebble-app.bin').file_size,
resource_size=zip.getinfo(platform_dir + 'app_resources.pbpack').file_size,
platform=platform,
)
try:
build_size.worker_size = zip.getinfo(platform_dir + 'pebble-worker.bin').file_size
except KeyError:
pass
build_size.save()
except KeyError:
pass
@task(ignore_result=True, acks_late=True)
def run_compile(build_result):
build_result = BuildResult.objects.get(pk=build_result)
project = build_result.project
source_files = SourceFile.objects.filter(project=project)
resources = ResourceFile.objects.filter(project=project)
# Assemble the project somewhere
base_dir = tempfile.mkdtemp(dir=os.path.join(settings.CHROOT_ROOT, 'tmp') if settings.CHROOT_ROOT else None)
try:
# Resources
resource_root = 'resources'
os.makedirs(os.path.join(base_dir, resource_root, 'images'))
os.makedirs(os.path.join(base_dir, resource_root, 'fonts'))
os.makedirs(os.path.join(base_dir, resource_root, 'data'))
if project.project_type == 'native':
# Source code
create_source_files(project, base_dir)
manifest_dict = generate_manifest_dict(project, resources)
open(os.path.join(base_dir, 'appinfo.json'), 'w').write(json.dumps(manifest_dict))
for f in resources:
target_dir = os.path.abspath(os.path.join(base_dir, resource_root, ResourceFile.DIR_MAP[f.kind]))
abs_target = os.path.abspath(os.path.join(target_dir, f.file_name))
f.copy_all_variants_to_dir(target_dir)
# Reconstitute the SDK
open(os.path.join(base_dir, 'wscript'), 'w').write(generate_wscript_file(project))
open(os.path.join(base_dir, 'pebble-jshintrc'), 'w').write(generate_jshint_file(project))
elif project.project_type == 'simplyjs':
shutil.rmtree(base_dir)
shutil.copytree(settings.SIMPLYJS_ROOT, base_dir)
manifest_dict = generate_simplyjs_manifest_dict(project)
js = '\n\n'.join(x.get_contents() for x in source_files if x.file_name.endswith('.js'))
escaped_js = json.dumps(js)
build_result.save_simplyjs(js)
open(os.path.join(base_dir, 'appinfo.json'), 'w').write(json.dumps(manifest_dict))
open(os.path.join(base_dir, 'src', 'js', 'zzz_userscript.js'), 'w').write("""
(function() {
simply.mainScriptSource = %s;
})();
""" % escaped_js)
elif project.project_type == 'pebblejs':
shutil.rmtree(base_dir)
shutil.copytree(settings.PEBBLEJS_ROOT, base_dir)
manifest_dict = generate_pebblejs_manifest_dict(project, resources)
create_source_files(project, base_dir)
for f in resources:
if f.kind != 'png':
continue
target_dir = os.path.abspath(os.path.join(base_dir, resource_root, ResourceFile.DIR_MAP[f.kind]))
abs_target = os.path.abspath(os.path.join(target_dir, f.file_name))
if not abs_target.startswith(target_dir):
raise Exception("Suspicious filename: %s" % f.file_name)
f.copy_to_path(ResourceVariant.VARIANT_DEFAULT, abs_target)
open(os.path.join(base_dir, 'appinfo.json'), 'w').write(json.dumps(manifest_dict))
# Build the thing
cwd = os.getcwd()
success = False
output = 'Failed to get output'
build_start_time = now()
try:
os.chdir(base_dir)
if project.sdk_version == '2':
tool = settings.SDK2_PEBBLE_TOOL
elif project.sdk_version == '3':
tool = settings.SDK3_PEBBLE_TOOL
else:
raise Exception("invalid sdk version.")
output = subprocess.check_output([tool, "build"], stderr=subprocess.STDOUT, preexec_fn=_set_resource_limits)
except subprocess.CalledProcessError as e:
output = e.output
print output
success = False
except Exception as e:
success = False
output = str(e)
else:
success = True
temp_file = os.path.join(base_dir, 'build', '%s.pbw' % os.path.basename(base_dir))
if not os.path.exists(temp_file):
success = False
print "Success was a lie."
finally:
build_end_time = now()
os.chdir(cwd)
if success:
# Try reading file sizes out of it first.
try:
s = os.stat(temp_file)
build_result.total_size = s.st_size
# Now peek into the zip to see the component parts
with zipfile.ZipFile(temp_file, 'r') as z:
store_size_info(build_result, 'aplite', z)
store_size_info(build_result, 'basalt', z)
except Exception as e:
print "Couldn't extract filesizes: %s" % e
# Try pulling out debug information.
if project.sdk_version == '2':
save_debug_info(base_dir, build_result, BuildResult.DEBUG_APP, 'aplite', os.path.join(base_dir, 'build', 'pebble-app.elf'))
save_debug_info(base_dir, build_result, BuildResult.DEBUG_WORKER, 'aplite', os.path.join(base_dir, 'build', 'pebble-worker.elf'))
else:
save_debug_info(base_dir, build_result, BuildResult.DEBUG_APP, 'aplite', os.path.join(base_dir, 'build', 'aplite/pebble-app.elf'))
save_debug_info(base_dir, build_result, BuildResult.DEBUG_WORKER, 'aplite', os.path.join(base_dir, 'build', 'aplite/pebble-worker.elf'))
save_debug_info(base_dir, build_result, BuildResult.DEBUG_APP, 'basalt', os.path.join(base_dir, 'build', 'basalt/pebble-app.elf'))
save_debug_info(base_dir, build_result, BuildResult.DEBUG_WORKER, 'basalt', os.path.join(base_dir, 'build', 'basalt/pebble-worker.elf'))
build_result.save_pbw(temp_file)
build_result.save_build_log(output)
build_result.state = BuildResult.STATE_SUCCEEDED if success else BuildResult.STATE_FAILED
build_result.finished = now()
build_result.save()
data = {
'data': {
'cloudpebble': {
'build_id': build_result.id,
'job_run_time': (build_result.finished - build_result.started).total_seconds(),
},
'build_time': (build_end_time - build_start_time).total_seconds(),
}
}
event_name = 'app_build_succeeded' if success else 'app_build_failed'
send_keen_event(['cloudpebble', 'sdk'], event_name, data, project=project)
except Exception as e:
print "Build failed due to internal error: %s" % e
traceback.print_exc()
build_result.state = BuildResult.STATE_FAILED
build_result.finished = now()
try:
build_result.save_build_log("Something broke:\n%s" % e)
except:
pass
build_result.save()
finally:
# shutil.rmtree(base_dir)
print base_dir
|
|
"""Support for Netgear LTE modems."""
import asyncio
from datetime import timedelta
import logging
import aiohttp
import attr
import eternalegypt
import voluptuous as vol
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.notify import DOMAIN as NOTIFY_DOMAIN
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import (
CONF_HOST,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_PASSWORD,
CONF_RECIPIENT,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv, discovery
from homeassistant.helpers.aiohttp_client import async_create_clientsession
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from . import sensor_types
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=10)
DISPATCHER_NETGEAR_LTE = "netgear_lte_update"
DOMAIN = "netgear_lte"
DATA_KEY = "netgear_lte"
EVENT_SMS = "netgear_lte_sms"
SERVICE_DELETE_SMS = "delete_sms"
SERVICE_SET_OPTION = "set_option"
SERVICE_CONNECT_LTE = "connect_lte"
SERVICE_DISCONNECT_LTE = "disconnect_lte"
ATTR_HOST = "host"
ATTR_SMS_ID = "sms_id"
ATTR_FROM = "from"
ATTR_MESSAGE = "message"
ATTR_FAILOVER = "failover"
ATTR_AUTOCONNECT = "autoconnect"
FAILOVER_MODES = ["auto", "wire", "mobile"]
AUTOCONNECT_MODES = ["never", "home", "always"]
NOTIFY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DOMAIN): cv.string,
vol.Optional(CONF_RECIPIENT, default=[]): vol.All(cv.ensure_list, [cv.string]),
}
)
SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(
CONF_MONITORED_CONDITIONS, default=sensor_types.DEFAULT_SENSORS
): vol.All(cv.ensure_list, [vol.In(sensor_types.ALL_SENSORS)])
}
)
BINARY_SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(
CONF_MONITORED_CONDITIONS, default=sensor_types.DEFAULT_BINARY_SENSORS
): vol.All(cv.ensure_list, [vol.In(sensor_types.ALL_BINARY_SENSORS)])
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(NOTIFY_DOMAIN, default={}): vol.All(
cv.ensure_list, [NOTIFY_SCHEMA]
),
vol.Optional(SENSOR_DOMAIN, default={}): SENSOR_SCHEMA,
vol.Optional(
BINARY_SENSOR_DOMAIN, default={}
): BINARY_SENSOR_SCHEMA,
}
)
],
)
},
extra=vol.ALLOW_EXTRA,
)
DELETE_SMS_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_HOST): cv.string,
vol.Required(ATTR_SMS_ID): vol.All(cv.ensure_list, [cv.positive_int]),
}
)
SET_OPTION_SCHEMA = vol.Schema(
vol.All(
cv.has_at_least_one_key(ATTR_FAILOVER, ATTR_AUTOCONNECT),
{
vol.Optional(ATTR_HOST): cv.string,
vol.Optional(ATTR_FAILOVER): vol.In(FAILOVER_MODES),
vol.Optional(ATTR_AUTOCONNECT): vol.In(AUTOCONNECT_MODES),
},
)
)
CONNECT_LTE_SCHEMA = vol.Schema({vol.Optional(ATTR_HOST): cv.string})
DISCONNECT_LTE_SCHEMA = vol.Schema({vol.Optional(ATTR_HOST): cv.string})
@attr.s
class ModemData:
"""Class for modem state."""
hass = attr.ib()
host = attr.ib()
modem = attr.ib()
data = attr.ib(init=False, default=None)
connected = attr.ib(init=False, default=True)
async def async_update(self):
"""Call the API to update the data."""
try:
self.data = await self.modem.information()
if not self.connected:
_LOGGER.warning("Connected to %s", self.host)
self.connected = True
except eternalegypt.Error:
if self.connected:
_LOGGER.warning("Lost connection to %s", self.host)
self.connected = False
self.data = None
async_dispatcher_send(self.hass, DISPATCHER_NETGEAR_LTE)
@attr.s
class LTEData:
"""Shared state."""
websession = attr.ib()
modem_data = attr.ib(init=False, factory=dict)
def get_modem_data(self, config):
"""Get modem_data for the host in config."""
if config[CONF_HOST] is not None:
return self.modem_data.get(config[CONF_HOST])
if len(self.modem_data) != 1:
return None
return next(iter(self.modem_data.values()))
async def async_setup(hass, config):
"""Set up Netgear LTE component."""
if DATA_KEY not in hass.data:
websession = async_create_clientsession(
hass, cookie_jar=aiohttp.CookieJar(unsafe=True)
)
hass.data[DATA_KEY] = LTEData(websession)
async def service_handler(service):
"""Apply a service."""
host = service.data.get(ATTR_HOST)
conf = {CONF_HOST: host}
modem_data = hass.data[DATA_KEY].get_modem_data(conf)
if not modem_data:
_LOGGER.error("%s: host %s unavailable", service.service, host)
return
if service.service == SERVICE_DELETE_SMS:
for sms_id in service.data[ATTR_SMS_ID]:
await modem_data.modem.delete_sms(sms_id)
elif service.service == SERVICE_SET_OPTION:
failover = service.data.get(ATTR_FAILOVER)
if failover:
await modem_data.modem.set_failover_mode(failover)
autoconnect = service.data.get(ATTR_AUTOCONNECT)
if autoconnect:
await modem_data.modem.set_autoconnect_mode(autoconnect)
elif service.service == SERVICE_CONNECT_LTE:
await modem_data.modem.connect_lte()
elif service.service == SERVICE_DISCONNECT_LTE:
await modem_data.modem.disconnect_lte()
service_schemas = {
SERVICE_DELETE_SMS: DELETE_SMS_SCHEMA,
SERVICE_SET_OPTION: SET_OPTION_SCHEMA,
SERVICE_CONNECT_LTE: CONNECT_LTE_SCHEMA,
SERVICE_DISCONNECT_LTE: DISCONNECT_LTE_SCHEMA,
}
for service, schema in service_schemas.items():
hass.services.async_register(
DOMAIN, service, service_handler, schema=schema
)
netgear_lte_config = config[DOMAIN]
# Set up each modem
tasks = [_setup_lte(hass, lte_conf) for lte_conf in netgear_lte_config]
await asyncio.wait(tasks)
# Load platforms for each modem
for lte_conf in netgear_lte_config:
# Notify
for notify_conf in lte_conf[NOTIFY_DOMAIN]:
discovery_info = {
CONF_HOST: lte_conf[CONF_HOST],
CONF_NAME: notify_conf.get(CONF_NAME),
NOTIFY_DOMAIN: notify_conf,
}
hass.async_create_task(
discovery.async_load_platform(
hass, NOTIFY_DOMAIN, DOMAIN, discovery_info, config
)
)
# Sensor
sensor_conf = lte_conf.get(SENSOR_DOMAIN)
discovery_info = {CONF_HOST: lte_conf[CONF_HOST], SENSOR_DOMAIN: sensor_conf}
hass.async_create_task(
discovery.async_load_platform(
hass, SENSOR_DOMAIN, DOMAIN, discovery_info, config
)
)
# Binary Sensor
binary_sensor_conf = lte_conf.get(BINARY_SENSOR_DOMAIN)
discovery_info = {
CONF_HOST: lte_conf[CONF_HOST],
BINARY_SENSOR_DOMAIN: binary_sensor_conf,
}
hass.async_create_task(
discovery.async_load_platform(
hass, BINARY_SENSOR_DOMAIN, DOMAIN, discovery_info, config
)
)
return True
async def _setup_lte(hass, lte_config):
"""Set up a Netgear LTE modem."""
host = lte_config[CONF_HOST]
password = lte_config[CONF_PASSWORD]
websession = hass.data[DATA_KEY].websession
modem = eternalegypt.Modem(hostname=host, websession=websession)
modem_data = ModemData(hass, host, modem)
try:
await _login(hass, modem_data, password)
except eternalegypt.Error:
retry_task = hass.loop.create_task(_retry_login(hass, modem_data, password))
@callback
def cleanup_retry(event):
"""Clean up retry task resources."""
if not retry_task.done():
retry_task.cancel()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, cleanup_retry)
async def _login(hass, modem_data, password):
"""Log in and complete setup."""
await modem_data.modem.login(password=password)
def fire_sms_event(sms):
"""Send an SMS event."""
data = {
ATTR_HOST: modem_data.host,
ATTR_SMS_ID: sms.id,
ATTR_FROM: sms.sender,
ATTR_MESSAGE: sms.message,
}
hass.bus.async_fire(EVENT_SMS, data)
await modem_data.modem.add_sms_listener(fire_sms_event)
await modem_data.async_update()
hass.data[DATA_KEY].modem_data[modem_data.host] = modem_data
async def _update(now):
"""Periodic update."""
await modem_data.async_update()
update_unsub = async_track_time_interval(hass, _update, SCAN_INTERVAL)
async def cleanup(event):
"""Clean up resources."""
update_unsub()
await modem_data.modem.logout()
del hass.data[DATA_KEY].modem_data[modem_data.host]
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, cleanup)
async def _retry_login(hass, modem_data, password):
"""Sleep and retry setup."""
_LOGGER.warning("Could not connect to %s. Will keep trying", modem_data.host)
modem_data.connected = False
delay = 15
while not modem_data.connected:
await asyncio.sleep(delay)
try:
await _login(hass, modem_data, password)
except eternalegypt.Error:
delay = min(2 * delay, 300)
@attr.s
class LTEEntity(Entity):
"""Base LTE entity."""
modem_data = attr.ib()
sensor_type = attr.ib()
_unique_id = attr.ib(init=False)
@_unique_id.default
def _init_unique_id(self):
"""Register unique_id while we know data is valid."""
return f"{self.sensor_type}_{self.modem_data.data.serial_number}"
async def async_added_to_hass(self):
"""Register callback."""
self.async_on_remove(
async_dispatcher_connect(
self.hass, DISPATCHER_NETGEAR_LTE, self.async_write_ha_state
)
)
async def async_update(self):
"""Force update of state."""
await self.modem_data.async_update()
@property
def should_poll(self):
"""Return that the sensor should not be polled."""
return False
@property
def available(self):
"""Return the availability of the sensor."""
return self.modem_data.data is not None
@property
def unique_id(self):
"""Return a unique ID like 'usage_5TG365AB0078V'."""
return self._unique_id
@property
def name(self):
"""Return the name of the sensor."""
return f"Netgear LTE {self.sensor_type}"
|
|
# coding: utf-8
import logging
import requests
from .config import Config
from .utils import is_valid_email, is_valid_cpf, is_valid_cnpj
from .parsers import (PagSeguroNotificationResponse,
PagSeguroPreApprovalNotificationResponse,
PagSeguroPreApprovalCancel,
PagSeguroCheckoutSession,
PagSeguroPreApprovalPayment,
PagSeguroCheckoutResponse,
PagSeguroTransactionSearchResult,
PagSeguroPreApproval,
PagSeguroPreApprovalSearch)
logger = logging.getLogger()
class PagSeguro(object):
""" Pag Seguro V2 wrapper """
PAC = 1
SEDEX = 2
NONE = 3
def __init__(self, email, token, data=None, config=None):
config = config or {}
if not type(config) == dict:
raise Exception('Malformed config dict param')
self.config = Config(**config)
self.data = {}
self.data['email'] = email
self.data['token'] = token
if data and isinstance(data, dict):
self.data.update(data)
self.items = []
self.sender = {}
self.shipping = {}
self._reference = ""
self.extra_amount = None
self.redirect_url = None
self.notification_url = None
self.abandon_url = None
self.credit_card = {}
self.pre_approval = {}
self.checkout_session = None
self.payment = {}
def build_checkout_params(self, **kwargs):
""" build a dict with params """
params = kwargs or {}
if self.sender:
params['senderName'] = self.sender.get('name')
params['senderAreaCode'] = self.sender.get('area_code')
params['senderPhone'] = self.sender.get('phone')
params['senderEmail'] = is_valid_email(self.sender.get('email'))
params['senderCPF'] = is_valid_cpf(self.sender.get('cpf'))
params['senderCNPJ'] = is_valid_cnpj(self.sender.get('cnpj'))
params['senderBornDate'] = self.sender.get('born_date')
params['senderHash'] = self.sender.get('hash')
if self.config.USE_SHIPPING:
if self.shipping:
params['shippingType'] = self.shipping.get('type')
params['shippingAddressStreet'] = self.shipping.get('street')
params['shippingAddressNumber'] = self.shipping.get('number')
params['shippingAddressComplement'] = self.shipping.get(
'complement')
params['shippingAddressDistrict'] = self.shipping.get(
'district')
params['shippingAddressPostalCode'] = self.shipping.get(
'postal_code')
params['shippingAddressCity'] = self.shipping.get('city')
params['shippingAddressState'] = self.shipping.get('state')
params['shippingAddressCountry'] = self.shipping.get('country',
'BRA')
if self.shipping.get('cost'):
params['shippingCost'] = self.shipping.get('cost')
else:
params['shippingAddressRequired'] = 'false'
if self.extra_amount:
params['extraAmount'] = self.extra_amount
params['reference'] = self.reference
params['receiverEmail'] = self.data['email']
if self.redirect_url:
params['redirectURL'] = self.redirect_url
if self.notification_url:
params['notificationURL'] = self.notification_url
if self.abandon_url:
params['abandonURL'] = self.abandon_url
for i, item in enumerate(self.items, 1):
params['itemId%s' % i] = item.get('id')
params['itemDescription%s' % i] = item.get('description')
params['itemAmount%s' % i] = item.get('amount')
params['itemQuantity%s' % i] = item.get('quantity')
params['itemWeight%s' % i] = item.get('weight')
params['itemShippingCost%s' % i] = item.get('shipping_cost')
if self.payment:
params['paymentMethod'] = self.payment.get('method')
params['paymentMode'] = self.payment.get('mode')
if self.credit_card:
params['billingAddressCountry'] = 'BRA'
credit_card_keys_map = [
('creditCardToken', 'credit_card_token'),
('installmentQuantity', 'installment_quantity'),
('installmentValue', 'installment_value'),
('noInterestInstallmentQuantity',
'no_interest_installment_quantity'),
('creditCardHolderName', 'card_holder_name'),
('creditCardHolderCPF', 'card_holder_cpf'),
('creditCardHolderBirthDate', 'card_holder_birth_date'),
('creditCardHolderAreaCode', 'card_holder_area_code'),
('creditCardHolderPhone', 'card_holder_phone'),
('billingAddressStreet', 'billing_address_street'),
('billingAddressNumber', 'billing_address_number'),
('billingAddressComplement', 'billing_address_complement'),
('billingAddressDistrict', 'billing_address_district'),
('billingAddressPostalCode', 'billing_address_postal_code'),
('billingAddressCity', 'billing_address_city'),
('billingAddressState', 'billing_address_state'),
]
for key_to_set, key_to_get in credit_card_keys_map:
params[key_to_set] = self.credit_card.get(key_to_get)
if self.pre_approval:
params['preApprovalCharge'] = self.pre_approval.get('charge')
params['preApprovalName'] = self.pre_approval.get('name')
params['preApprovalDetails'] = self.pre_approval.get('details')
params['preApprovalAmountPerPayment'] = self.pre_approval.get(
'amount_per_payment')
params['preApprovalMaxAmountPerPayment'] = self.pre_approval.get(
'max_amount_per_payment')
params['preApprovalPeriod'] = self.pre_approval.get('period')
params['preApprovalMaxPaymentsPerPeriod'] = self.pre_approval.get(
'max_payments_per_period')
params['preApprovalMaxAmountPerPeriod'] = self.pre_approval.get(
'max_amount_per_period')
params['preApprovalInitialDate'] = self.pre_approval.get(
'initial_date')
params['preApprovalFinalDate'] = self.pre_approval.get(
'final_date')
params['preApprovalMaxTotalAmount'] = self.pre_approval.get(
'max_total_amount')
self.data.update(params)
self.clean_none_params()
def build_pre_approval_payment_params(self, **kwargs):
""" build a dict with params """
params = kwargs or {}
params['reference'] = self.reference
params['preApprovalCode'] = self.code
for i, item in enumerate(self.items, 1):
params['itemId%s' % i] = item.get('id')
params['itemDescription%s' % i] = item.get('description')
params['itemAmount%s' % i] = item.get('amount')
params['itemQuantity%s' % i] = item.get('quantity')
params['itemWeight%s' % i] = item.get('weight')
params['itemShippingCost%s' % i] = item.get('shipping_cost')
self.data.update(params)
self.clean_none_params()
def clean_none_params(self):
self.data = \
{k: v for k, v in self.data.items() if v or isinstance(v, bool)}
@property
def reference_prefix(self):
return self.config.REFERENCE_PREFIX or "%s"
@reference_prefix.setter
def reference_prefix(self, value):
self.config.REFERENCE_PREFIX = (value or "") + "%s"
@property
def reference(self):
return self.reference_prefix % self._reference
@reference.setter
def reference(self, value):
if not isinstance(value, str):
value = str(value)
if value.startswith(self.reference_prefix):
value = value[len(self.reference_prefix):]
self._reference = value
def get(self, url):
""" do a get transaction """
return requests.get(url, params=self.data, headers=self.config.HEADERS)
def post(self, url):
""" do a post request """
return requests.post(url, data=self.data, headers=self.config.HEADERS)
def checkout(self, transparent=False, **kwargs):
""" create a pagseguro checkout """
self.data['currency'] = self.config.CURRENCY
self.build_checkout_params(**kwargs)
if transparent:
response = self.post(url=self.config.TRANSPARENT_CHECKOUT_URL)
else:
response = self.post(url=self.config.CHECKOUT_URL)
return PagSeguroCheckoutResponse(response.content, config=self.config)
def transparent_checkout_session(self):
response = self.post(url=self.config.SESSION_CHECKOUT_URL)
return PagSeguroCheckoutSession(response.content,
config=self.config).session_id
def check_notification(self, code):
""" check a notification by its code """
response = self.get(url=self.config.NOTIFICATION_URL % code)
return PagSeguroNotificationResponse(response.content, self.config)
def check_pre_approval_notification(self, code):
""" check a notification by its code """
response = self.get(
url=self.config.PRE_APPROVAL_NOTIFICATION_URL % code)
return PagSeguroPreApprovalNotificationResponse(
response.content, self.config)
def pre_approval_ask_payment(self, **kwargs):
""" ask form a subscribe payment """
self.build_pre_approval_payment_params(**kwargs)
response = self.post(url=self.config.PRE_APPROVAL_PAYMENT_URL)
return PagSeguroPreApprovalPayment(response.content, self.config)
def pre_approval_cancel(self, code):
""" cancel a subscribe """
response = self.get(url=self.config.PRE_APPROVAL_CANCEL_URL % code)
return PagSeguroPreApprovalCancel(response.content, self.config)
def check_transaction(self, code):
""" check a transaction by its code """
response = self.get(url=self.config.TRANSACTION_URL % code)
return PagSeguroNotificationResponse(response.content, self.config)
def query_transactions(self, initial_date, final_date,
page=None,
max_results=None):
""" query transaction by date range """
last_page = False
results = []
while last_page is False:
search_result = self._consume_query_transactions(
initial_date, final_date, page, max_results)
results.extend(search_result.transactions)
if search_result.current_page is None or \
search_result.total_pages is None or \
search_result.current_page == search_result.total_pages:
last_page = True
else:
page = search_result.current_page + 1
return results
def _consume_query_transactions(self, initial_date, final_date,
page=None,
max_results=None):
querystring = {
'initialDate': initial_date.strftime('%Y-%m-%dT%H:%M'),
'finalDate': final_date.strftime('%Y-%m-%dT%H:%M'),
'page': page,
'maxPageResults': max_results,
}
self.data.update(querystring)
self.clean_none_params()
response = self.get(url=self.config.QUERY_TRANSACTION_URL)
return PagSeguroTransactionSearchResult(response.content, self.config)
def query_pre_approvals(self, initial_date, final_date, page=None,
max_results=None):
""" query pre-approvals by date range """
last_page = False
results = []
while last_page is False:
search_result = self._consume_query_pre_approvals(
initial_date, final_date, page, max_results)
results.extend(search_result.pre_approvals)
if search_result.current_page is None or \
search_result.total_pages is None or \
search_result.current_page == search_result.total_pages:
last_page = True
else:
page = search_result.current_page + 1
return results
def _consume_query_pre_approvals(self, initial_date, final_date, page=None,
max_results=None):
querystring = {
'initialDate': initial_date.strftime('%Y-%m-%dT%H:%M'),
'finalDate': final_date.strftime('%Y-%m-%dT%H:%M'),
'page': page,
'maxPageResults': max_results,
}
self.data.update(querystring)
self.clean_none_params()
response = self.get(url=self.config.QUERY_PRE_APPROVAL_URL)
return PagSeguroPreApprovalSearch(response.content, self.config)
def query_pre_approvals_by_code(self, code):
""" query pre-approvals by code """
result = self._consume_query_pre_approvals_by_code(code)
return result
def _consume_query_pre_approvals_by_code(self, code):
response = self.get(
url='%s/%s' % (self.config.QUERY_PRE_APPROVAL_URL, code)
)
return PagSeguroPreApproval(response.content, self.config)
def add_item(self, **kwargs):
self.items.append(kwargs)
|
|
#!/usr/bin/env python
# coding=utf-8
import os
import sys
import subprocess
import urllib
import zipfile
import platform
import shlex
import time
import json
import datetime
# =======================================================================================================================
# Project paths
# =======================================================================================================================
# COMPILER_VERSION = '20161024'
COMPILER_VERSION = '20180204'
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
CONTRIB_PATH = os.path.join(PROJECT_PATH, 'contrib')
COMPILER_PATH = os.path.join(CONTRIB_PATH, 'compiler', 'closure-compiler-v%s.jar' % COMPILER_VERSION)
SRC_PATH = os.path.join(PROJECT_PATH, 'src')
OUT_PATH = os.path.join(PROJECT_PATH, 'out')
CLOSURE_LIBRARY_PATH = os.path.join(CONTRIB_PATH, 'closure-library')
CLOSURE_SOURCE_PATH = os.path.join(CLOSURE_LIBRARY_PATH, 'closure', 'goog')
CLOSURE_LINTER_WRAPPER_PATH = os.path.join(CONTRIB_PATH, 'closure-linter-wrapper')
CLOSURE_BIN_PATH = os.path.join(CLOSURE_LIBRARY_PATH, 'closure', 'bin')
DEPS_WRITER_PATH = os.path.join(CLOSURE_BIN_PATH, 'build', 'depswriter.py')
PYTHON = 'python'
# =======================================================================================================================
# Synchronize contributions.
# =======================================================================================================================
def __has_closure_library():
return os.path.exists(CLOSURE_LIBRARY_PATH)
def __has_closure_compiler():
return os.path.exists(COMPILER_PATH)
def __has_closure_linter_wrapper():
return os.path.exists(CLOSURE_LINTER_WRAPPER_PATH)
def __has_closure_linter():
has_lint = True
try:
subprocess.Popen(['gjslint'], shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except StandardError:
has_lint = False
return has_lint
def __ensure_dir_exists(path):
if not os.path.exists(path):
os.mkdir(path)
def __need_sync_contrib():
return not __has_closure_library() \
or not __has_closure_compiler() \
or not __has_closure_linter_wrapper() \
or not __has_closure_linter()
def __sync_contrib():
t = time.time()
__ensure_dir_exists(CONTRIB_PATH)
subprocess.call(['git', 'submodule', 'init'])
subprocess.call(['git', 'submodule', 'update'])
# Download closure compiler
if not os.path.exists(COMPILER_PATH):
print 'Downloading Google Closure Compiler v.' + COMPILER_VERSION
try:
__download_and_unzip_from_http(
"http://dl.google.com/closure-compiler/compiler-%s.zip" % COMPILER_VERSION,
'compiler'
)
except StandardError as e:
print e
print 'Failed'
return False
# Install closure linter
if not __has_closure_linter():
if not __install_closure_linter():
return False
print 'Environment ready. Time spent: {:.3f}s\n'.format(time.time() - t)
return True
def __download_and_unzip_from_http(from_url, dir_name):
z_obj_path = os.path.join(CONTRIB_PATH, dir_name + '.zip')
# download zip archive from url
if not os.path.exists(z_obj_path):
urllib.urlretrieve(
from_url,
z_obj_path
)
# extract zip archive
target_path = os.path.join(CONTRIB_PATH, dir_name)
__ensure_dir_exists(target_path)
z_obj = zipfile.ZipFile(z_obj_path)
z_obj.extractall(path=target_path)
z_obj.close()
# remove archive file
os.remove(z_obj_path)
return True
def __install_closure_linter():
print 'Installing Google Closure Linter v.2.3.9'
commands = [] if platform.system() == 'Windows' else ['sudo']
commands.append('easy_install')
commands.append('https://closure-linter.googlecode.com/files/closure_linter-2.3.9.tar.gz')
try:
subprocess.call(commands)
except StandardError:
print 'Failed: you should install easy_install module for python first'
return False
print 'Success'
return True
def sync_required(func):
def wrapper():
if __need_sync_contrib():
__sync_contrib()
return func()
return wrapper
# =======================================================================================================================
# Build project
# =======================================================================================================================
def __get_version():
f = open(os.path.join(PROJECT_PATH, 'package.json'));
package_json = json.loads(f.read());
f.close()
return package_json['version']
def __get_file_overview():
return "/**\n * GraphicsJS is a lightweight JavaScript graphics library with an intuitive API, based on SVG/VML technology.\n * Version: %s (%s)\n * License: BSD 3-clause\n * Copyright: AnyChart.com %s. All rights reserved.\n */\n" % (__get_version(), datetime.datetime.now().strftime("%Y-%m-%d"), str(datetime.datetime.now().year))
def __getNotOptimizedCompilerArgs():
compilerArgs = [
'--compilation_level WHITESPACE_ONLY',
'--formatting PRETTY_PRINT'
]
return compilerArgs
def __getOptimizedCompilerArgs():
compilerArgs = [
'--charset UTF-8',
'--compilation_level ADVANCED_OPTIMIZATIONS',
'--process_closure_primitives',
'--language_in ECMASCRIPT3',
'--language_out ECMASCRIPT3',
'--hide_warnings_for "contrib/closure-library"',
'--assume_function_wrapper',
'--use_types_for_optimization true',
'--output_wrapper "' + __get_file_overview() + '(function(){%output%})();"',
'--env BROWSER',
'--extra_annotation_name "includeDoc"',
'--extra_annotation_name "illustration"',
'--extra_annotation_name "illustrationDesc"',
'--extra_annotation_name "ignoreDoc"',
'--extra_annotation_name "propertyDoc"',
'--extra_annotation_name "shortDescription"',
'--warning_level VERBOSE',
'--jscomp_warning accessControls',
'--jscomp_warning ambiguousFunctionDecl',
'--jscomp_warning checkDebuggerStatement',
'--jscomp_warning checkEventfulObjectDisposal',
'--jscomp_warning checkRegExp',
'--jscomp_warning checkTypes',
'--jscomp_warning checkVars',
'--jscomp_warning closureDepMethodUsageChecks',
'--jscomp_warning conformanceViolations',
'--jscomp_warning const',
'--jscomp_warning constantProperty',
'--jscomp_warning deprecated',
'--jscomp_warning deprecatedAnnotations',
'--jscomp_warning duplicate',
'--jscomp_warning duplicateMessage',
'--jscomp_warning es3',
'--jscomp_warning es5Strict',
'--jscomp_warning externsValidation',
'--jscomp_off extraRequire',
'--jscomp_warning fileoverviewTags',
'--jscomp_warning functionParams',
'--jscomp_warning globalThis',
'--jscomp_warning internetExplorerChecks',
'--jscomp_warning invalidCasts',
'--jscomp_warning misplacedTypeAnnotation',
'--jscomp_warning missingGetCssName',
'--jscomp_off missingOverride',
'--jscomp_warning missingPolyfill',
'--jscomp_warning missingProperties',
'--jscomp_warning missingProvide',
'--jscomp_warning missingRequire',
'--jscomp_warning missingReturn',
'--jscomp_warning msgDescriptions',
'--jscomp_off newCheckTypes',
'--jscomp_off newCheckTypesExtraChecks',
'--jscomp_off nonStandardJsDocs',
'--jscomp_off reportUnknownTypes',
'--jscomp_warning suspiciousCode',
'--jscomp_warning strictModuleDepCheck',
'--jscomp_warning typeInvalidation',
'--jscomp_warning undefinedNames',
'--jscomp_warning undefinedVars',
'--jscomp_warning unknownDefines',
'--jscomp_off unusedLocalVariables',
'--jscomp_off unusedPrivateMembers',
'--jscomp_warning uselessCode',
'--jscomp_off useOfGoogBase',
'--jscomp_warning underscore',
'--jscomp_warning visibility',
'--jscomp_warning lintChecks',
]
return compilerArgs
def __getDefaultCompilerArgs(outputFile):
result = [
'java -jar',
COMPILER_PATH,
'--js="%s"' % os.path.join(SRC_PATH, '**.js'),
'--js="%s"' % os.path.join(CLOSURE_SOURCE_PATH, '**.js'),
'--define "goog.DEBUG=false"',
'--js_output_file ' + outputFile,
'--dependency_mode=STRICT',
'--entry_point acgraphentry',
'--hide_warnings_for="goog"'
]
return result
@sync_required
def __compileBinary():
__ensure_dir_exists(OUT_PATH)
t = time.time()
outputFileName = os.path.join(OUT_PATH, 'graphics.min.js')
print 'Building optimized Graphics library js to ' + outputFileName
commands = __getDefaultCompilerArgs(outputFileName) + \
__getOptimizedCompilerArgs()
success = (__call_compiler(commands) == 0)
res = 'Success' if success else 'Failed'
print res + ". Time spent: {:.3f}s\n".format(time.time() - t)
return success
@sync_required
def __compilePlain():
__ensure_dir_exists(OUT_PATH)
t = time.time()
outputFileName = os.path.join(OUT_PATH, 'graphics.js')
print 'Building plain Graphics library js to ' + outputFileName
commands = __getDefaultCompilerArgs(outputFileName) + \
__getNotOptimizedCompilerArgs()
success = (__call_compiler(commands) == 0)
res = 'Success' if success else 'Failed'
print res + ". Time spent: {:.3f}s\n".format(time.time() - t)
return success
def __call_compiler(commands):
commands = " ".join(commands).replace('\\', '\\\\')
commands = shlex.split(commands)
# print commands
p = subprocess.Popen(commands, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(output, err) = p.communicate()
retcode = p.poll()
if len(output) > 0:
print output
return retcode
# =======================================================================================================================
# Build deps
# =======================================================================================================================
@sync_required
def __buildDepsFromCommandLine():
t = time.time()
output_file = os.path.join(SRC_PATH, 'deps.js')
success = (__callDepsWriter(SRC_PATH, output_file, 'whole project') == 0)
res = 'Success' if success else 'Failed'
print res + ". Time spent: {:.3f}s\n".format(time.time() - t)
return success
def __callDepsWriter(root, output_file, bundle_name):
print 'Writing deps file to ' + output_file
return subprocess.call([
PYTHON,
DEPS_WRITER_PATH,
'--root_with_prefix=' + root + ' ' + os.path.relpath(root, CLOSURE_SOURCE_PATH),
'--output_file=' + output_file
])
# =======================================================================================================================
# Linter.
# =======================================================================================================================
@sync_required
def __lintFromCommandLine():
t = time.time()
success = (__callLinter(SRC_PATH) == 0)
res = 'Success' if success else 'Failed'
print res + ". Time spent: {:.3f}s\n".format(time.time() - t)
return success
def __callLinter(root):
print 'Linting ' + root + ' directory'
return subprocess.call([
PYTHON,
os.path.join(CLOSURE_LINTER_WRAPPER_PATH, 'gjslint.py'),
'--flagfile',
'gjslint.cfg',
'-r',
root
])
# =======================================================================================================================
# JSDoc auto fix.
# =======================================================================================================================
@sync_required
def __autofixFromCommandLine():
t = time.time()
success = (__callAutoFix(SRC_PATH) == 0)
res = 'Success' if success else 'Failed'
print res + ". Time spent: {:.3f}s\n".format(time.time() - t)
return res
def __callAutoFix(root):
print 'Trying to fix ' + root + ' directory'
return subprocess.call([
PYTHON,
os.path.join(CLOSURE_LINTER_WRAPPER_PATH, 'fixjsstyle.py'),
'--flagfile',
'gjslint.cfg',
'-r',
root
])
# =======================================================================================================================
# Help
# =======================================================================================================================
def __printHelp():
print "Build script commands:\n" \
"\n" \
"without params Prepares the environment, than lints and builds everything.\n" \
"\n" \
"contrib Prepares buildin environment.\n" \
"\n" \
"deps Build ./src/deps.js file, needed to run the library in uncompiled mode.\n" \
"\n" \
"compile Builds the library minified js to ./out/ directory.\n" \
"\n" \
"plain Builds the library as one file pretty-printed js to ./out/ directory.\n" \
"\n" \
"lint Lints library sources.\n" \
"\n" \
"autofix Tries to fix lint errors in library sources.\n"
# =======================================================================================================================
# Main
# =======================================================================================================================
def __execMainScript():
print ''
args = sys.argv
if len(args) == 1:
success = __sync_contrib() and \
__lintFromCommandLine() and \
__buildDepsFromCommandLine() and \
__compilePlain() and \
__compileBinary()
elif args[1] == 'contrib':
success = __sync_contrib()
elif args[1] == 'compile':
success = __compileBinary()
elif args[1] == 'plain':
success = __compilePlain()
elif args[1] == 'deps':
success = __buildDepsFromCommandLine()
elif args[1] == 'lint':
success = __lintFromCommandLine()
elif args[1] == 'autofix':
success = __autofixFromCommandLine()
else:
__printHelp()
success = True
return success
if __name__ == '__main__':
try:
success = __execMainScript()
except StandardError as e:
print e
success = False
sys.exit(0 if success else 1)
|
|
import argparse
import os
import platform
import sys
from distutils.spawn import find_executable
from typing import ClassVar, Type
wpt_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
sys.path.insert(0, os.path.abspath(os.path.join(wpt_root, "tools")))
from . import browser, install, testfiles, virtualenv
from ..serve import serve
logger = None
class WptrunError(Exception):
pass
class WptrunnerHelpAction(argparse.Action):
def __init__(self,
option_strings,
dest=argparse.SUPPRESS,
default=argparse.SUPPRESS,
help=None):
super(WptrunnerHelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
from wptrunner import wptcommandline
wptparser = wptcommandline.create_parser()
wptparser.usage = parser.usage
wptparser.print_help()
parser.exit()
def create_parser():
from wptrunner import wptcommandline
parser = argparse.ArgumentParser(add_help=False, parents=[install.channel_args])
parser.add_argument("product", action="store",
help="Browser to run tests in")
parser.add_argument("--affected", action="store", default=None,
help="Run affected tests since revish")
parser.add_argument("--yes", "-y", dest="prompt", action="store_false", default=True,
help="Don't prompt before installing components")
parser.add_argument("--install-browser", action="store_true",
help="Install the browser from the release channel specified by --channel "
"(or the nightly channel by default).")
parser.add_argument("--install-webdriver", action="store_true",
help="Install WebDriver from the release channel specified by --channel "
"(or the nightly channel by default).")
parser._add_container_actions(wptcommandline.create_parser())
return parser
def exit(msg=None):
if msg:
logger.critical(msg)
sys.exit(1)
else:
sys.exit(0)
def args_general(kwargs):
def set_if_none(name, value):
if kwargs.get(name) is None:
kwargs[name] = value
logger.info("Set %s to %s" % (name, value))
set_if_none("tests_root", wpt_root)
set_if_none("metadata_root", wpt_root)
set_if_none("manifest_update", True)
set_if_none("manifest_download", True)
if kwargs["ssl_type"] in (None, "pregenerated"):
cert_root = os.path.join(wpt_root, "tools", "certs")
if kwargs["ca_cert_path"] is None:
kwargs["ca_cert_path"] = os.path.join(cert_root, "cacert.pem")
if kwargs["host_key_path"] is None:
kwargs["host_key_path"] = os.path.join(cert_root, "web-platform.test.key")
if kwargs["host_cert_path"] is None:
kwargs["host_cert_path"] = os.path.join(cert_root, "web-platform.test.pem")
elif kwargs["ssl_type"] == "openssl":
if not find_executable(kwargs["openssl_binary"]):
if os.uname()[0] == "Windows":
raise WptrunError("""OpenSSL binary not found. If you need HTTPS tests, install OpenSSL from
https://slproweb.com/products/Win32OpenSSL.html
Ensuring that libraries are added to /bin and add the resulting bin directory to
your PATH.
Otherwise run with --ssl-type=none""")
else:
raise WptrunError("""OpenSSL not found. If you don't need HTTPS support run with --ssl-type=none,
otherwise install OpenSSL and ensure that it's on your $PATH.""")
def check_environ(product):
if product not in ("android_weblayer", "android_webview", "chrome", "chrome_android", "firefox", "firefox_android", "servo"):
config_builder = serve.build_config(os.path.join(wpt_root, "config.json"))
# Override the ports to avoid looking for free ports
config_builder.ssl = {"type": "none"}
config_builder.ports = {"http": [8000]}
is_windows = platform.uname()[0] == "Windows"
with config_builder as config:
expected_hosts = set(config.domains_set)
if is_windows:
expected_hosts.update(config.not_domains_set)
missing_hosts = set(expected_hosts)
if is_windows:
hosts_path = r"%s\System32\drivers\etc\hosts" % os.environ.get("SystemRoot", r"C:\Windows")
else:
hosts_path = "/etc/hosts"
if os.path.abspath(os.curdir) == wpt_root:
wpt_path = "wpt"
else:
wpt_path = os.path.join(wpt_root, "wpt")
with open(hosts_path, "r") as f:
for line in f:
line = line.split("#", 1)[0].strip()
parts = line.split()
hosts = parts[1:]
for host in hosts:
missing_hosts.discard(host)
if missing_hosts:
if is_windows:
message = """Missing hosts file configuration. Run
python %s make-hosts-file | Out-File %s -Encoding ascii -Append
in PowerShell with Administrator privileges.""" % (wpt_path, hosts_path)
else:
message = """Missing hosts file configuration. Run
%s make-hosts-file | sudo tee -a %s""" % ("./wpt" if wpt_path == "wpt" else wpt_path,
hosts_path)
raise WptrunError(message)
class BrowserSetup(object):
name = None # type: ClassVar[str]
browser_cls = None # type: ClassVar[Type[browser.Browser]]
def __init__(self, venv, prompt=True):
self.browser = self.browser_cls(logger)
self.venv = venv
self.prompt = prompt
def prompt_install(self, component):
if not self.prompt:
return True
while True:
resp = input("Download and install %s [Y/n]? " % component).strip().lower()
if not resp or resp == "y":
return True
elif resp == "n":
return False
def install(self, channel=None):
if self.prompt_install(self.name):
return self.browser.install(self.venv.path, channel)
def install_requirements(self):
if not self.venv.skip_virtualenv_setup:
self.venv.install_requirements(os.path.join(wpt_root, "tools", "wptrunner", self.browser.requirements))
def setup(self, kwargs):
self.setup_kwargs(kwargs)
def safe_unsetenv(env_var):
"""Safely remove an environment variable.
Python3 does not support os.unsetenv in Windows for python<3.9, so we better
remove the variable directly from os.environ.
"""
try:
del os.environ[env_var]
except KeyError:
pass
class Firefox(BrowserSetup):
name = "firefox"
browser_cls = browser.Firefox
def setup_kwargs(self, kwargs):
if kwargs["binary"] is None:
if kwargs["browser_channel"] is None:
kwargs["browser_channel"] = "nightly"
logger.info("No browser channel specified. Running nightly instead.")
binary = self.browser.find_binary(self.venv.path,
kwargs["browser_channel"])
if binary is None:
raise WptrunError("""Firefox binary not found on $PATH.
Install Firefox or use --binary to set the binary path""")
kwargs["binary"] = binary
if kwargs["certutil_binary"] is None and kwargs["ssl_type"] != "none":
certutil = self.browser.find_certutil()
if certutil is None:
# Can't download this for now because it's missing the libnss3 library
logger.info("""Can't find certutil, certificates will not be checked.
Consider installing certutil via your OS package manager or directly.""")
else:
logger.info("Using certutil %s" % certutil)
kwargs["certutil_binary"] = certutil
if kwargs["webdriver_binary"] is None and "wdspec" in kwargs["test_types"]:
webdriver_binary = None
if not kwargs["install_webdriver"]:
webdriver_binary = self.browser.find_webdriver()
if webdriver_binary is None:
install = self.prompt_install("geckodriver")
if install:
logger.info("Downloading geckodriver")
webdriver_binary = self.browser.install_webdriver(
dest=self.venv.bin_path,
channel=kwargs["browser_channel"],
browser_binary=kwargs["binary"])
else:
logger.info("Using webdriver binary %s" % webdriver_binary)
if webdriver_binary:
kwargs["webdriver_binary"] = webdriver_binary
else:
logger.info("Unable to find or install geckodriver, skipping wdspec tests")
kwargs["test_types"].remove("wdspec")
if kwargs["prefs_root"] is None:
prefs_root = self.browser.install_prefs(kwargs["binary"],
self.venv.path,
channel=kwargs["browser_channel"])
kwargs["prefs_root"] = prefs_root
if kwargs["headless"] is None and not kwargs["debug_test"]:
kwargs["headless"] = True
logger.info("Running in headless mode, pass --no-headless to disable")
# Turn off Firefox WebRTC ICE logging on WPT (turned on by mozrunner)
safe_unsetenv('R_LOG_LEVEL')
safe_unsetenv('R_LOG_DESTINATION')
safe_unsetenv('R_LOG_VERBOSE')
# Allow WebRTC tests to call getUserMedia.
kwargs["extra_prefs"].append("media.navigator.streams.fake=true")
class FirefoxAndroid(BrowserSetup):
name = "firefox_android"
browser_cls = browser.FirefoxAndroid
def setup_kwargs(self, kwargs):
from . import android
import mozdevice
# We don't support multiple channels for android yet
if kwargs["browser_channel"] is None:
kwargs["browser_channel"] = "nightly"
if kwargs["prefs_root"] is None:
prefs_root = self.browser.install_prefs(kwargs["binary"],
self.venv.path,
channel=kwargs["browser_channel"])
kwargs["prefs_root"] = prefs_root
if kwargs["package_name"] is None:
kwargs["package_name"] = "org.mozilla.geckoview.test"
app = kwargs["package_name"]
if kwargs["device_serial"] is None:
kwargs["device_serial"] = "emulator-5554"
# We're running on an emulator so ensure that's set up
if kwargs["device_serial"].startswith("emulator-"):
emulator = android.install(logger, reinstall=False, no_prompt=not self.prompt)
android.start(logger, emulator=emulator, reinstall=False)
if "ADB_PATH" not in os.environ:
adb_path = os.path.join(android.get_sdk_path(None),
"platform-tools",
"adb")
os.environ["ADB_PATH"] = adb_path
adb_path = os.environ["ADB_PATH"]
device = mozdevice.ADBDeviceFactory(adb=adb_path,
device=kwargs["device_serial"])
if self.browser.apk_path:
device.uninstall_app(app)
device.install_app(self.browser.apk_path)
elif not device.is_app_installed(app):
raise WptrunError("app %s not installed on device %s" %
(app, kwargs["device_serial"]))
class Chrome(BrowserSetup):
name = "chrome"
browser_cls = browser.Chrome
experimental_channels = ("dev", "canary", "nightly")
def setup_kwargs(self, kwargs):
browser_channel = kwargs["browser_channel"]
if kwargs["binary"] is None:
binary = self.browser.find_binary(channel=browser_channel)
if binary:
kwargs["binary"] = binary
else:
raise WptrunError("Unable to locate Chrome binary")
if kwargs["mojojs_path"]:
kwargs["enable_mojojs"] = True
logger.info("--mojojs-path is provided, enabling MojoJS")
# TODO(Hexcles): Enable this everywhere when Chrome 86 becomes stable.
elif browser_channel in self.experimental_channels:
try:
path = self.browser.install_mojojs(
dest=self.venv.path,
channel=browser_channel,
browser_binary=kwargs["binary"],
)
kwargs["mojojs_path"] = path
kwargs["enable_mojojs"] = True
logger.info("MojoJS enabled automatically (mojojs_path: %s)" % path)
except Exception as e:
logger.error("Cannot enable MojoJS: %s" % e)
if kwargs["webdriver_binary"] is None:
webdriver_binary = None
if not kwargs["install_webdriver"]:
webdriver_binary = self.browser.find_webdriver()
if webdriver_binary and not self.browser.webdriver_supports_browser(
webdriver_binary, kwargs["binary"], browser_channel):
webdriver_binary = None
if webdriver_binary is None:
install = self.prompt_install("chromedriver")
if install:
webdriver_binary = self.browser.install_webdriver(
dest=self.venv.bin_path,
channel=browser_channel,
browser_binary=kwargs["binary"],
)
else:
logger.info("Using webdriver binary %s" % webdriver_binary)
if webdriver_binary:
kwargs["webdriver_binary"] = webdriver_binary
else:
raise WptrunError("Unable to locate or install matching ChromeDriver binary")
if browser_channel in self.experimental_channels:
logger.info("Automatically turning on experimental features for Chrome Dev/Canary or Chromium trunk")
kwargs["binary_args"].append("--enable-experimental-web-platform-features")
# HACK(Hexcles): work around https://github.com/web-platform-tests/wpt/issues/16448
kwargs["webdriver_args"].append("--disable-build-check")
# To start the WebTransport over HTTP/3 test server.
kwargs["enable_webtransport_h3"] = True
if os.getenv("TASKCLUSTER_ROOT_URL"):
# We are on Taskcluster, where our Docker container does not have
# enough capabilities to run Chrome with sandboxing. (gh-20133)
kwargs["binary_args"].append("--no-sandbox")
class ChromeAndroid(BrowserSetup):
name = "chrome_android"
browser_cls = browser.ChromeAndroid
def setup_kwargs(self, kwargs):
if kwargs.get("device_serial"):
self.browser.device_serial = kwargs["device_serial"]
browser_channel = kwargs["browser_channel"]
if kwargs["package_name"] is None:
kwargs["package_name"] = self.browser.find_binary(
channel=browser_channel)
if kwargs["webdriver_binary"] is None:
webdriver_binary = None
if not kwargs["install_webdriver"]:
webdriver_binary = self.browser.find_webdriver()
if webdriver_binary is None:
install = self.prompt_install("chromedriver")
if install:
logger.info("Downloading chromedriver")
webdriver_binary = self.browser.install_webdriver(
dest=self.venv.bin_path,
channel=browser_channel,
browser_binary=kwargs["package_name"],
)
else:
logger.info("Using webdriver binary %s" % webdriver_binary)
if webdriver_binary:
kwargs["webdriver_binary"] = webdriver_binary
else:
raise WptrunError("Unable to locate or install chromedriver binary")
if browser_channel in ("dev", "canary"):
logger.info("Automatically turning on experimental features for Chrome Dev/Canary")
kwargs["binary_args"].append("--enable-experimental-web-platform-features")
# HACK(Hexcles): work around https://github.com/web-platform-tests/wpt/issues/16448
kwargs["webdriver_args"].append("--disable-build-check")
class ChromeiOS(BrowserSetup):
name = "chrome_ios"
browser_cls = browser.ChromeiOS
def setup_kwargs(self, kwargs):
if kwargs["webdriver_binary"] is None:
raise WptrunError("Unable to locate or install chromedriver binary")
class AndroidWeblayer(BrowserSetup):
name = "android_weblayer"
browser_cls = browser.AndroidWeblayer
experimental_channels = ("dev", "canary")
def setup_kwargs(self, kwargs):
if kwargs.get("device_serial"):
self.browser.device_serial = kwargs["device_serial"]
browser_channel = kwargs["browser_channel"]
if kwargs["webdriver_binary"] is None:
webdriver_binary = None
if not kwargs["install_webdriver"]:
webdriver_binary = self.browser.find_webdriver()
if webdriver_binary is None:
install = self.prompt_install("chromedriver")
if install:
logger.info("Downloading chromedriver")
webdriver_binary = self.browser.install_webdriver(
dest=self.venv.bin_path,
channel=browser_channel)
else:
logger.info("Using webdriver binary %s" % webdriver_binary)
if webdriver_binary:
kwargs["webdriver_binary"] = webdriver_binary
else:
raise WptrunError("Unable to locate or install chromedriver binary")
if browser_channel in self.experimental_channels:
logger.info("Automatically turning on experimental features for WebLayer Dev/Canary")
kwargs["binary_args"].append("--enable-experimental-web-platform-features")
class AndroidWebview(BrowserSetup):
name = "android_webview"
browser_cls = browser.AndroidWebview
def setup_kwargs(self, kwargs):
if kwargs.get("device_serial"):
self.browser.device_serial = kwargs["device_serial"]
if kwargs["webdriver_binary"] is None:
webdriver_binary = None
if not kwargs["install_webdriver"]:
webdriver_binary = self.browser.find_webdriver()
if webdriver_binary is None:
install = self.prompt_install("chromedriver")
if install:
logger.info("Downloading chromedriver")
webdriver_binary = self.browser.install_webdriver(
dest=self.venv.bin_path,
channel=kwargs["browser_channel"])
else:
logger.info("Using webdriver binary %s" % webdriver_binary)
if webdriver_binary:
kwargs["webdriver_binary"] = webdriver_binary
else:
raise WptrunError("Unable to locate or install chromedriver binary")
class Opera(BrowserSetup):
name = "opera"
browser_cls = browser.Opera
def setup_kwargs(self, kwargs):
if kwargs["webdriver_binary"] is None:
webdriver_binary = None
if not kwargs["install_webdriver"]:
webdriver_binary = self.browser.find_webdriver()
if webdriver_binary is None:
install = self.prompt_install("operadriver")
if install:
logger.info("Downloading operadriver")
webdriver_binary = self.browser.install_webdriver(
dest=self.venv.bin_path,
channel=kwargs["browser_channel"])
else:
logger.info("Using webdriver binary %s" % webdriver_binary)
if webdriver_binary:
kwargs["webdriver_binary"] = webdriver_binary
else:
raise WptrunError("Unable to locate or install operadriver binary")
class EdgeChromium(BrowserSetup):
name = "MicrosoftEdge"
browser_cls = browser.EdgeChromium
def setup_kwargs(self, kwargs):
browser_channel = kwargs["browser_channel"]
if kwargs["binary"] is None:
binary = self.browser.find_binary(channel=browser_channel)
if binary:
logger.info("Using Edge binary %s" % binary)
kwargs["binary"] = binary
else:
raise WptrunError("Unable to locate Edge binary")
if kwargs["webdriver_binary"] is None:
webdriver_binary = None
if not kwargs["install_webdriver"]:
webdriver_binary = self.browser.find_webdriver()
if (webdriver_binary and not self.browser.webdriver_supports_browser(
webdriver_binary, kwargs["binary"])):
webdriver_binary = None
if webdriver_binary is None:
install = self.prompt_install("msedgedriver")
if install:
logger.info("Downloading msedgedriver")
webdriver_binary = self.browser.install_webdriver(
dest=self.venv.bin_path,
channel=browser_channel)
else:
logger.info("Using webdriver binary %s" % webdriver_binary)
if webdriver_binary:
kwargs["webdriver_binary"] = webdriver_binary
else:
raise WptrunError("Unable to locate or install msedgedriver binary")
if browser_channel in ("dev", "canary"):
logger.info("Automatically turning on experimental features for Edge Dev/Canary")
kwargs["binary_args"].append("--enable-experimental-web-platform-features")
class Edge(BrowserSetup):
name = "edge"
browser_cls = browser.Edge
def install(self, channel=None):
raise NotImplementedError
def setup_kwargs(self, kwargs):
if kwargs["webdriver_binary"] is None:
webdriver_binary = self.browser.find_webdriver()
if webdriver_binary is None:
raise WptrunError("""Unable to find WebDriver and we aren't yet clever enough to work out which
version to download. Please go to the following URL and install the correct
version for your Edge/Windows release somewhere on the %PATH%:
https://developer.microsoft.com/en-us/microsoft-edge/tools/webdriver/
""")
kwargs["webdriver_binary"] = webdriver_binary
class EdgeWebDriver(Edge):
name = "edge_webdriver"
browser_cls = browser.EdgeWebDriver
class InternetExplorer(BrowserSetup):
name = "ie"
browser_cls = browser.InternetExplorer
def install(self, channel=None):
raise NotImplementedError
def setup_kwargs(self, kwargs):
if kwargs["webdriver_binary"] is None:
webdriver_binary = self.browser.find_webdriver()
if webdriver_binary is None:
raise WptrunError("""Unable to find WebDriver and we aren't yet clever enough to work out which
version to download. Please go to the following URL and install the driver for Internet Explorer
somewhere on the %PATH%:
https://selenium-release.storage.googleapis.com/index.html
""")
kwargs["webdriver_binary"] = webdriver_binary
class Safari(BrowserSetup):
name = "safari"
browser_cls = browser.Safari
def install(self, channel=None):
raise NotImplementedError
def setup_kwargs(self, kwargs):
if kwargs["webdriver_binary"] is None:
webdriver_binary = self.browser.find_webdriver(channel=kwargs["browser_channel"])
if webdriver_binary is None:
raise WptrunError("Unable to locate safaridriver binary")
kwargs["webdriver_binary"] = webdriver_binary
class Sauce(BrowserSetup):
name = "sauce"
browser_cls = browser.Sauce
def install(self, channel=None):
raise NotImplementedError
def setup_kwargs(self, kwargs):
if kwargs["sauce_browser"] is None:
raise WptrunError("Missing required argument --sauce-browser")
if kwargs["sauce_version"] is None:
raise WptrunError("Missing required argument --sauce-version")
kwargs["test_types"] = ["testharness", "reftest"]
class Servo(BrowserSetup):
name = "servo"
browser_cls = browser.Servo
def install(self, channel=None):
if self.prompt_install(self.name):
return self.browser.install(self.venv.path)
def setup_kwargs(self, kwargs):
if kwargs["binary"] is None:
binary = self.browser.find_binary(self.venv.path, None)
if binary is None:
raise WptrunError("Unable to find servo binary in PATH")
kwargs["binary"] = binary
class ServoWebDriver(Servo):
name = "servodriver"
browser_cls = browser.ServoWebDriver
class WebKit(BrowserSetup):
name = "webkit"
browser_cls = browser.WebKit
def install(self, channel=None):
raise NotImplementedError
def setup_kwargs(self, kwargs):
pass
class WebKitGTKMiniBrowser(BrowserSetup):
name = "webkitgtk_minibrowser"
browser_cls = browser.WebKitGTKMiniBrowser
def install(self, channel=None):
if self.prompt_install(self.name):
return self.browser.install(self.venv.path, channel, self.prompt)
def setup_kwargs(self, kwargs):
if kwargs["binary"] is None:
binary = self.browser.find_binary(venv_path=self.venv.path, channel=kwargs["browser_channel"])
if binary is None:
raise WptrunError("Unable to find MiniBrowser binary")
kwargs["binary"] = binary
if kwargs["webdriver_binary"] is None:
webdriver_binary = self.browser.find_webdriver(venv_path=self.venv.path, channel=kwargs["browser_channel"])
if webdriver_binary is None:
raise WptrunError("Unable to find WebKitWebDriver in PATH")
kwargs["webdriver_binary"] = webdriver_binary
class Epiphany(BrowserSetup):
name = "epiphany"
browser_cls = browser.Epiphany
def install(self, channel=None):
raise NotImplementedError
def setup_kwargs(self, kwargs):
if kwargs["binary"] is None:
binary = self.browser.find_binary()
if binary is None:
raise WptrunError("Unable to find epiphany in PATH")
kwargs["binary"] = binary
if kwargs["webdriver_binary"] is None:
webdriver_binary = self.browser.find_webdriver()
if webdriver_binary is None:
raise WptrunError("Unable to find WebKitWebDriver in PATH")
kwargs["webdriver_binary"] = webdriver_binary
product_setup = {
"android_weblayer": AndroidWeblayer,
"android_webview": AndroidWebview,
"firefox": Firefox,
"firefox_android": FirefoxAndroid,
"chrome": Chrome,
"chrome_android": ChromeAndroid,
"chrome_ios": ChromeiOS,
"edgechromium": EdgeChromium,
"edge": Edge,
"edge_webdriver": EdgeWebDriver,
"ie": InternetExplorer,
"safari": Safari,
"servo": Servo,
"servodriver": ServoWebDriver,
"sauce": Sauce,
"opera": Opera,
"webkit": WebKit,
"webkitgtk_minibrowser": WebKitGTKMiniBrowser,
"epiphany": Epiphany,
}
def setup_logging(kwargs, default_config=None, formatter_defaults=None):
import mozlog
from wptrunner import wptrunner
global logger
# Use the grouped formatter by default where mozlog 3.9+ is installed
if default_config is None:
if hasattr(mozlog.formatters, "GroupingFormatter"):
default_formatter = "grouped"
else:
default_formatter = "mach"
default_config = {default_formatter: sys.stdout}
wptrunner.setup_logging(kwargs, default_config, formatter_defaults=formatter_defaults)
logger = wptrunner.logger
return logger
def setup_wptrunner(venv, **kwargs):
from wptrunner import wptcommandline
kwargs = kwargs.copy()
kwargs["product"] = kwargs["product"].replace("-", "_")
check_environ(kwargs["product"])
args_general(kwargs)
if kwargs["product"] not in product_setup:
raise WptrunError("Unsupported product %s" % kwargs["product"])
setup_cls = product_setup[kwargs["product"]](venv, kwargs["prompt"])
setup_cls.install_requirements()
affected_revish = kwargs.get("affected")
if affected_revish is not None:
files_changed, _ = testfiles.files_changed(
affected_revish, include_uncommitted=True, include_new=True)
# TODO: Perhaps use wptrunner.testloader.ManifestLoader here
# and remove the manifest-related code from testfiles.
# https://github.com/web-platform-tests/wpt/issues/14421
tests_changed, tests_affected = testfiles.affected_testfiles(
files_changed, manifest_path=kwargs.get("manifest_path"), manifest_update=kwargs["manifest_update"])
test_list = tests_changed | tests_affected
logger.info("Identified %s affected tests" % len(test_list))
test_list = [os.path.relpath(item, wpt_root) for item in test_list]
kwargs["test_list"] += test_list
kwargs["default_exclude"] = True
if kwargs["install_browser"] and not kwargs["channel"]:
logger.info("--install-browser is given but --channel is not set, default to nightly channel")
kwargs["channel"] = "nightly"
if kwargs["channel"]:
channel = install.get_channel(kwargs["product"], kwargs["channel"])
if channel is not None:
if channel != kwargs["channel"]:
logger.info("Interpreting channel '%s' as '%s'" % (kwargs["channel"],
channel))
kwargs["browser_channel"] = channel
else:
logger.info("Valid channels for %s not known; using argument unmodified" % kwargs["product"])
kwargs["browser_channel"] = kwargs["channel"]
if kwargs["install_browser"]:
logger.info("Installing browser")
kwargs["binary"] = setup_cls.install(channel=channel)
setup_cls.setup(kwargs)
# Remove kwargs we handle here
wptrunner_kwargs = kwargs.copy()
for kwarg in ["affected",
"install_browser",
"install_webdriver",
"channel",
"prompt"]:
del wptrunner_kwargs[kwarg]
wptcommandline.check_args(wptrunner_kwargs)
wptrunner_path = os.path.join(wpt_root, "tools", "wptrunner")
if not venv.skip_virtualenv_setup:
venv.install_requirements(os.path.join(wptrunner_path, "requirements.txt"))
# Only update browser_version if it was not given as a command line
# argument, so that it can be overridden on the command line.
if not wptrunner_kwargs["browser_version"]:
wptrunner_kwargs["browser_version"] = setup_cls.browser.version(
binary=wptrunner_kwargs.get("binary") or wptrunner_kwargs.get("package_name"),
webdriver_binary=wptrunner_kwargs.get("webdriver_binary"),
)
return wptrunner_kwargs
def run(venv, **kwargs):
setup_logging(kwargs)
wptrunner_kwargs = setup_wptrunner(venv, **kwargs)
rv = run_single(venv, **wptrunner_kwargs) > 0
return rv
def run_single(venv, **kwargs):
from wptrunner import wptrunner
return wptrunner.start(**kwargs)
def main():
try:
parser = create_parser()
args = parser.parse_args()
venv = virtualenv.Virtualenv(os.path.join(wpt_root, "_venv_%s") % platform.uname()[0])
venv.start()
venv.install_requirements(os.path.join(wpt_root, "tools", "wptrunner", "requirements.txt"))
venv.install("requests")
return run(venv, vars(args))
except WptrunError as e:
exit(e)
if __name__ == "__main__":
import pdb
from tools import localpaths # noqa: F401
try:
main() # type: ignore
except Exception:
pdb.post_mortem()
|
|
# Copyright 2020 Kapil Thangavelu
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
"""Functional Tests for the Docker
The uses here is a little specialized to be invoked by tools/dev/dockerpkg.py
during image building.
"""
import json
import os
import pytest
import yaml
try:
import docker
except ImportError:
docker = None
TEST_DOCKER = docker and os.environ.get("TEST_DOCKER", "no") == "yes"
CUSTODIAN_ORG_IMAGE = os.environ.get("CUSTODIAN_ORG_IMAGE")
CUSTODIAN_IMAGE = os.environ.get("CUSTODIAN_CLI_IMAGE")
CUSTODIAN_MAILER_IMAGE = os.environ.get("CUSTODIAN_MAILER_IMAGE")
CUSTODIAN_PSTREAM_IMAGE = os.environ.get("CUSTODIAN_POLICYSTREAM_IMAGE")
@pytest.fixture
def custodian_org_dir(tmpdir):
with open(os.path.join(tmpdir, "accounts.json"), "w") as fh:
fh.write(
json.dumps(
{
"accounts": [
{
"account_id": "644160558196",
"name": "c7n-test",
"role": "arn:aws:iam::644160558196:role/Github-CI",
"region": [
"us-east-1",
"us-east-2",
"us-west-2",
"eu-west-1",
],
}
]
}
)
)
with open(os.path.join(tmpdir, "policies-aws.json"), "w") as fh:
fh.write(
json.dumps(
{
"policies": [
{"name": "dynamo", "resource": "aws.dynamodb-table"},
{"name": "lambda", "resource": "aws.ecr"},
]
}
)
)
os.chmod(tmpdir, 0o777)
return tmpdir
@pytest.fixture
def custodian_env_creds():
env = get_env_creds()
docker_env_list = []
for k, v in env.items():
docker_env_list.append("%s=%s" % (k, v))
return docker_env_list
def get_env_creds(check_aws=False, check_azure=False, check_gcp=False):
aws_keys = (
check_aws,
["AWS_DEFAULT_REGION", "AWS_SECRET_ACCESS_KEY", "AWS_ACCESS_KEY_ID"],
)
azure_keys = (check_azure, ["AZURE_SUBSCRIPTION_ID", "AZURE_ACCESS_TOKEN"])
gcp_keys = (check_gcp, ["GOOGLE_CLOUD_PROJECT", "GOOGLE_APPLICATION_CREDENTIALS"])
env_set = (aws_keys, azure_keys, gcp_keys)
env = {}
for (check, key_set) in env_set:
key_env = {}
for k in key_set:
if k in os.environ:
key_env[k] = os.environ[k]
if check:
return set(key_env) == set(key_set)
env.update(key_env)
return env
@pytest.mark.skipif(
not (TEST_DOCKER and CUSTODIAN_ORG_IMAGE and get_env_creds(check_aws=True)),
reason="docker testing not requested",
)
def test_org_run_aws(custodian_org_dir, custodian_env_creds):
client = docker.from_env()
client.containers.run(
CUSTODIAN_ORG_IMAGE,
(
"run -v -a c7n -c {dir}/accounts.json"
" -s {dir}/output"
" --region=all"
" -u {dir}/policies-aws.json"
).format(dir="/home/custodian/"),
environment=custodian_env_creds,
remove=True,
stderr=True,
volumes={custodian_org_dir: {"bind": "/home/custodian", "mode": "rw"}},
)
@pytest.mark.skipif(not TEST_DOCKER, reason="docker testing not requested")
@pytest.mark.parametrize(
"image_name",
list(
filter(
None,
[
CUSTODIAN_IMAGE,
CUSTODIAN_ORG_IMAGE,
CUSTODIAN_MAILER_IMAGE,
CUSTODIAN_PSTREAM_IMAGE,
],
)
),
)
def test_image_metadata(image_name):
client = docker.from_env()
image = client.images.get(image_name)
assert set(image.labels) == {
"name",
"repository",
"org.opencontainers.image.created",
"org.opencontainers.image.description",
"org.opencontainers.image.documentation",
"org.opencontainers.image.licenses",
"org.opencontainers.image.title",
"org.opencontainers.image.source",
"org.opencontainers.image.revision",
}
@pytest.mark.skipif(
not (TEST_DOCKER and CUSTODIAN_IMAGE), reason="docker testing not requested"
)
def test_cli_providers_available():
providers = os.environ.get("CUSTODIAN_PROVIDERS", None)
if providers is None:
providers = {"aws", "azure", "gcp", "k8s"}
elif providers == "":
providers = {"aws"}
else:
providers = set(providers.split())
client = docker.from_env()
output = client.containers.run(CUSTODIAN_IMAGE, "schema", stderr=True)
resources = yaml.safe_load(output.strip())["resources"]
found_providers = {r.split(".", 1)[0] for r in resources}
assert providers == found_providers
@pytest.mark.skipif(
not (TEST_DOCKER and CUSTODIAN_IMAGE), reason="docker testing not requested"
)
def test_cli_version_debug():
client = docker.from_env()
output = client.containers.run(CUSTODIAN_IMAGE, "version --debug", stderr=True).decode('utf8')
assert "Docker: True" in output
assert "boto3==" in output
@pytest.mark.skipif(
not (TEST_DOCKER and CUSTODIAN_IMAGE and get_env_creds(check_aws=True)),
reason="docker testing not requested",
)
def test_cli_run_aws(custodian_org_dir, custodian_env_creds):
client = docker.from_env()
output = client.containers.run(
CUSTODIAN_IMAGE,
("run -v" " -s {dir}/output" " {dir}/policies-aws.json").format(
dir="/home/custodian"
),
environment=custodian_env_creds,
remove=True,
stderr=True,
volumes={custodian_org_dir: {"bind": "/home/custodian", "mode": "rw"}},
)
print()
print(output.decode("utf8"))
@pytest.mark.skipif(
not (TEST_DOCKER and CUSTODIAN_IMAGE and get_env_creds(check_aws=True)),
reason="docker testing not requested",
)
def test_cli_run_aws_sans_home_dir(custodian_org_dir, custodian_env_creds):
# Specifically targeting #5581 and distroless containers that we don't
# have errors about creating cache directories
client = docker.from_env()
output = client.containers.run(
CUSTODIAN_IMAGE,
("run -v -s {dir}/output {dir}/policies-aws.json").format(dir="/run"),
environment=custodian_env_creds,
remove=True,
stderr=True,
volumes={custodian_org_dir: {"bind": "/run", "mode": "rw"}},
)
print()
print(output.decode("utf8"))
assert "Permission denied" not in output.decode("utf8")
|
|
# Copyright 2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from .common import BaseTest
class Route53HostedZoneTest(BaseTest):
def test_hostedzone_shield(self):
session_factory = self.replay_flight_data("test_zone_shield_enable")
p = self.load_policy(
{
"name": "zone-activate",
"resource": "hostedzone",
"filters": [
{"Config.PrivateZone": False},
{"Name": "invitro.cloud."},
{"type": "shield-enabled", "state": False},
],
"actions": ["set-shield"],
},
session_factory=session_factory,
)
self.assertEqual(len(p.run()), 1)
p = self.load_policy(
{
"name": "zone-verify",
"resource": "hostedzone",
"filters": [{"type": "shield-enabled", "state": True}],
},
session_factory=session_factory,
)
self.assertEqual(p.run()[0]["Id"], "/hostedzone/XXXXURLYV5DGGG")
def test_route53_hostedzone_tag(self):
session_factory = self.replay_flight_data("test_route53_hostedzone_tag")
p = self.load_policy(
{
"name": "hostedzone-tag-records",
"resource": "hostedzone",
"filters": [
{
"type": "value",
"key": "ResourceRecordSetCount",
"value": 2,
"op": "gte",
}
],
"actions": [{"type": "tag", "key": "abc", "value": "xyz"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("route53")
_id = resources[0]["Id"].split("/")[-1]
tags = client.list_tags_for_resource(ResourceType="hostedzone", ResourceId=_id)
self.assertEqual(len(tags["ResourceTagSet"]["Tags"]), 1)
self.assertTrue("abc" in tags["ResourceTagSet"]["Tags"][0].values())
def test_route53_hostedzone_untag(self):
session_factory = self.replay_flight_data("test_route53_hostedzone_untag")
p = self.load_policy(
{
"name": "hostedzone-untag-records",
"resource": "hostedzone",
"filters": [{"tag:abc": "present"}],
"actions": [{"type": "remove-tag", "tags": ["abc"]}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("route53")
_id = resources[0]["Id"].split("/")[-1]
tags = client.list_tags_for_resource(ResourceType="hostedzone", ResourceId=_id)
self.assertEqual(len(tags["ResourceTagSet"]["Tags"]), 0)
def test_route53_hostedzone_markop(self):
session_factory = self.replay_flight_data("test_route53_hostedzone_markop")
p = self.load_policy(
{
"name": "hostedzone-markop-records",
"resource": "hostedzone",
"filters": [{"tag:abc": "present"}],
"actions": [{"type": "mark-for-op", "op": "notify", "days": 4}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("route53")
_id = resources[0]["Id"].split("/")[-1]
tags = client.list_tags_for_resource(ResourceType="hostedzone", ResourceId=_id)
self.assertEqual(len(tags["ResourceTagSet"]["Tags"]), 2)
self.assertTrue("abc" in tags["ResourceTagSet"]["Tags"][0].values())
class Route53HealthCheckTest(BaseTest):
def test_route53_healthcheck_tag(self):
session_factory = self.replay_flight_data("test_route53_healthcheck_tag")
p = self.load_policy(
{
"name": "healthcheck-tag-records",
"resource": "healthcheck",
"filters": [
{
"type": "value",
"key": "HealthCheckConfig.FailureThreshold",
"value": 3,
"op": "gte",
}
],
"actions": [{"type": "tag", "key": "abc", "value": "xyz"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("route53")
tags = client.list_tags_for_resource(
ResourceType="healthcheck", ResourceId=resources[0]["Id"]
)
self.assertEqual(len(tags["ResourceTagSet"]["Tags"]), 2)
self.assertTrue("abc" in tags["ResourceTagSet"]["Tags"][0].values())
def test_route53_healthcheck_untag(self):
session_factory = self.replay_flight_data("test_route53_healthcheck_untag")
p = self.load_policy(
{
"name": "healthcheck-untag-records",
"resource": "healthcheck",
"filters": [{"tag:abc": "present"}],
"actions": [{"type": "remove-tag", "tags": ["abc"]}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("route53")
tags = client.list_tags_for_resource(
ResourceType="healthcheck", ResourceId=resources[0]["Id"]
)
self.assertEqual(len(tags["ResourceTagSet"]["Tags"]), 1) # Name is a tag
self.assertTrue("Name" in tags["ResourceTagSet"]["Tags"][0].values())
def test_route53_healthcheck_markop(self):
session_factory = self.replay_flight_data("test_route53_healthcheck_markop")
p = self.load_policy(
{
"name": "healthcheck-markop-records",
"resource": "healthcheck",
"filters": [{"tag:abc": "present"}],
"actions": [{"type": "mark-for-op", "op": "notify", "days": 4}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("route53")
_id = resources[0]["Id"].split("/")[-1]
tags = client.list_tags_for_resource(ResourceType="healthcheck", ResourceId=_id)
self.assertEqual(len(tags["ResourceTagSet"]["Tags"]), 3)
self.assertTrue("maid_status" in tags["ResourceTagSet"]["Tags"][1].values())
class Route53DomainTest(BaseTest):
def test_route53_domain_auto_renew(self):
session_factory = self.replay_flight_data("test_route53_domain")
p = self.load_policy(
{
"name": "r53domain-auto-renew",
"resource": "r53domain",
"filters": [{"type": "value", "key": "AutoRenew", "value": True}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_route53_domain_transfer_lock(self):
session_factory = self.replay_flight_data("test_route53_domain")
p = self.load_policy(
{
"name": "r53domain-transfer-lock",
"resource": "r53domain",
"filters": [{"type": "value", "key": "TransferLock", "value": False}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_route53_domain_add_tag(self):
session_factory = self.replay_flight_data("test_route53_domain_add_tag")
p = self.load_policy(
{
"name": "r53domain-add-tag",
"resource": "r53domain",
"filters": [{"tag:TestTag": "absent"}],
"actions": [{"type": "tag", "key": "TestTag", "value": "TestValue"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory(region="us-east-1").client("route53domains")
tags = client.list_tags_for_domain(DomainName=resources[0]["DomainName"])[
"TagList"
]
self.assertEqual([tags[0]["Key"], tags[0]["Value"]], ["TestTag", "TestValue"])
def test_route53_domain_remove_tag(self):
session_factory = self.replay_flight_data("test_route53_domain_remove_tag")
p = self.load_policy(
{
"name": "r53domain-add-tag",
"resource": "r53domain",
"filters": [{"tag:TestTag": "present"}],
"actions": [{"type": "remove-tag", "tags": ["TestTag"]}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory(region="us-east-1").client("route53domains")
tags = client.list_tags_for_domain(DomainName=resources[0]["DomainName"])[
"TagList"
]
self.assertEqual(len(tags), 0)
|
|
# pylint: disable=g-bad-file-header
# pylint: disable=cell-var-from-loop
# Copyright 2016 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for working with the Microsoft Visual C++ toolchain."""
import ntpath
import os
import re
import subprocess
import sys
MAX_PATH = 260 # The maximum number of characters in a Windows path.
MAX_OPTION_LENGTH = 10 # The maximum length of a compiler/linker option.
MAX_DRIVE_LENGTH = 3 # The maximum length of a drive.
ASSEMBLY_AS_C_SOURCE = '/Tc'
LIB_SUFFIX = '.lib'
VC_PATH = 'C:\\Program Files (x86)\\Microsoft Visual Studio 14.0'
VC_VERSION = '140'
PLATFORM_SDK_PATH = 'C:\\Program Files (x86)\\Windows Kits'
PLATFORM_SDK_VERSION = '10.0.10240.0'
TMP_PATH = 'C:\\Windows\\Temp'
class Error(Exception):
"""Base class for all script-specific errors."""
pass
class ArgParser(object):
"""Class that parses gcc/clang-style options for a Windows.
The particular substitutions that are needed are passed to the object.
"""
def __init__(self, driver, argv, substitutions):
self.driver = driver
self.substitutions = substitutions
self.options = []
self.target_arch = None
self.compilation_mode = None
self.deps_file = None
self.output_file = None
self._ParseArgs(argv)
def _MatchOneArg(self, args):
"""Finds a pattern which matches the beginning elements of args.
Args:
args: A list of arguments to replace.
Returns:
A tuple of (number of arguments parsed, action, match groups).
"""
for (regex, action) in self.substitutions:
if isinstance(regex, str):
regex = [regex]
j = 0
matches = []
for r in regex:
if j < len(args):
match = re.compile('^' + r + '$').match(args[j])
else:
match = None
matches.append(match)
j += 1
if None in matches:
continue
groups = []
for m in matches:
groups.extend(m.groups())
return (len(regex), action, groups)
return (0, '', [])
def _ParseArgs(self, argv):
"""Parses argv and replaces its elements using special tokens.
The following is a list of supported tokens. The format is $TOKEN%d, where
%d is the 0-based group number from regex matches of the pattern.
$CREATE_PATH%d: Touches a file at the path in the matching token.
$LOAD_PARAMS%d: Loads an ld-style params file and appends all arguments to
the current argument list by recursively calling
_ParseArgs.
$%d : Numeric token that just replaces the match group with
the value specified in the match list.
$PATH%d : Replaces the match with a Windows-normalized version of
the match; assumes that the match is a path.
$PATH%d_NO_EXT: Same as $PATH but strips out any file extension.
$TARGET_ARCH : Set self.target_arch to 'x86' or 'x64' for '-m32' and
'-m64', respectively.
$COMPILE_OUTPUT%d: Sets the output name of a compilation step.
$COMPILATION_MODE: Sets self.compilation_mode from the value of a
'-Xcompilation-mode=' flag.
$CREATE_PRECOMPILED_HEADER: Informs the system that we are generating a
precompiled header rather than an object file.
$GENERATE_DEPS%d: Generates a gcc-style .d file containing dependencies.
Args:
argv: A list of arguments to replace.
Returns:
A list of replaced arguments to pass to the target command.
Raises:
Error: if wrong arguments found
"""
i = 0
matched = []
unmatched = []
files = []
is_pch = False
while i < len(argv):
num_matched, action, groups = self._MatchOneArg(argv[i:])
arg = argv[i]
if num_matched == 0:
# Strip out any .a's that have 0 size, they are header or intermediate
# dependency libraries and don't contain any code. 0-length files are
# considered corrupt by the linker (error LNK1136).
if (os.path.isfile(arg) and os.path.splitext(arg)[1] == '.a' and
os.path.getsize(arg) == 0):
i += 1
continue
# If the argument is an absolute path, then add it directly.
if arg[0] == '/':
self.AddOpt(arg)
elif os.path.isfile(arg):
path = self.NormPath(arg)
ext = os.path.splitext(arg)[1].lower()
if ext in ['.s']:
# Treat assembly files as C source files using a special option.
path = ASSEMBLY_AS_C_SOURCE + path
# If this is an actual file on disk then just pass it to the tool.
files.append(path)
elif not arg.endswith(LIB_SUFFIX):
# Ignore .lib files.
unmatched.append(arg)
i += 1
continue
matched += argv[i:i + num_matched]
# Handle special options.
for entry in action:
if entry == '$CREATE_PRECOMPILED_HEADER':
# The PCH flag comes _first_ on blaze-generated command-lines, so all
# we can do is set a flag here since we have not yet parsed any other
# options.
is_pch = True
continue
if entry == '$TARGET_ARCH':
if arg == '-m32':
self.target_arch = 'x86'
elif arg == '-m64':
self.target_arch = 'x64'
else:
raise Error('Unknown target arch flag: %r' % arg)
continue
if entry == '$COMPILATION_MODE':
empty, prefix, mode = arg.partition('-Xcompilation-mode=')
if empty or not prefix or mode not in ['dbg', 'fastbuild', 'opt']:
raise Error('Invalid compilation mode flag: %r' % arg)
self.compilation_mode = mode
continue
if not groups:
self.options.append(entry)
else:
# Substitute special tokens.
for g in xrange(0, len(groups)):
value = groups[g]
# Check for special tokens.
if entry == ('$CREATE_PATH%d' % g):
with open(value, 'a'):
os.utime(value, None)
continue
if entry == ('$LOAD_PARAMS%d' % g):
try:
# The arguments in the params file need to be processed as
# regular command-line arguments.
params = [line.rstrip() for line in open(value, 'r')]
self._ParseArgs(params)
except IOError, e:
print 'Could not open', value, 'for reading:', str(e)
exit(-1)
continue
# Depending on whether we are creating precompiled headers cl.exe
# needs different options for specifying the output file.
if entry == ('$COMPILE_OUTPUT%d' % g):
if is_pch:
# Just touch the PCH file so that blaze is happy.
with open(value, 'a'):
os.utime(value, None)
# Exit since we don't want to actually try to process a PCH.
sys.exit(0)
else:
self.output_file = value
self.options.append('/Fo%s' % self.NormPath(value))
self.options.append('/Fd%s.pdb' %
self.NormPath(os.path.splitext(value)[0]))
continue
if entry == ('$GENERATE_DEPS%d' % g):
self.options.append('/showIncludes')
self.deps_file = value
continue
# Regular substitution.
patterns = {
'$%d' % g: value,
'$PATH%d_NO_EXT' % g: self.NormPath(os.path.splitext(value)[0]),
'$PATH%d' % g: self.NormPath(value),
}
pattern = re.compile('(%s)' %
'|'.join(map(re.escape, patterns.keys())))
result = pattern.sub(lambda x: patterns[x.group(0)], entry)
self.options.append(result)
i += num_matched
if unmatched:
print 'Warning: Unmatched arguments: ' + ' '.join(unmatched)
# Use the proper runtime flag depending on compilation mode. If the
# compilation is happening in debug mode, this flag already exists. If not,
# then we must add it.
if '/MT' not in self.options and '/MTd' not in self.options:
self.AddOpt('/MT')
# Add in any parsed files
self.options += files
def NormPath(self, path):
"""Uses the current WindowsRunner to normalize the passed path.
Args:
path: the path to normalize.
Returns:
A normalized string representing a path suitable for Windows.
"""
return self.driver.NormPath(path)
def AddOpt(self, option):
"""Adds a single option.
Args:
option: the option to add.
"""
self.options.append(option)
class WindowsRunner(object):
"""Base class that encapsulates the details of running a binary."""
def NormPath(self, path):
"""Normalizes an input unix style path to a < 260 char Windows format.
Windows paths cannot be greater than 260 characters.
Args:
path: A path in unix format.
Returns:
An absolute path in Windows format, rooted from some
directory.
Raises:
Error: if path is too long
"""
abspath = os.path.abspath(path)
long_path = abspath.replace('\\', '\\\\')
# We must allow for the drive letter as well, which is three characters, and
# the length of any compiler option ahead of the path,
if len(long_path) + MAX_DRIVE_LENGTH + MAX_OPTION_LENGTH < MAX_PATH:
return long_path
else:
# TODO(pcloudy):
# Find a new way to deal with long path in real windows
print 'Error: path is too long:' + long_path
raise Error('path is too long: ' + long_path)
return None
def SetupEnvironment(self, build_arch):
"""Setup proper path for running.
Args:
build_arch: Either 'x64' or 'x86', which binary architecture to build for.
Returns:
An environment suitable for running on Windows.
"""
common_paths = [
'C:\\Windows',
'C:\\Windows\\system32',
'C:\\Windows\\System32\\Wbem',
os.path.join(VC_PATH, 'Common7\\IDE'),
os.path.join(VC_PATH,
'Common7\\IDE\\CommonExtensions\\Microsoft\\TestWindow'),
os.path.join(VC_PATH, 'Common7\\Tools'),
os.path.join(VC_PATH, 'VC\\VCPackages'),
]
x86_paths = [
os.path.join(VC_PATH, 'VC\\bin'),
os.path.join(VC_PATH, 'VC\\redist\\x86\\Microsoft.VC' + VC_VERSION +
'.CRT'),
os.path.join(PLATFORM_SDK_PATH, 'bin\\x86'),
] + common_paths
x64_paths = [
os.path.join(VC_PATH, 'VC\\bin\\x86_amd64'),
os.path.join(VC_PATH, 'VC\\bin'),
os.path.join(VC_PATH, 'VC\\redist\\x64\\Microsoft.VC' + VC_VERSION +
'.CRT'),
os.path.join(VC_PATH, 'VC\\redist\\x86\\Microsoft.VC' + VC_VERSION +
'.CRT'),
os.path.join(PLATFORM_SDK_PATH, 'bin\\x64'),
] + common_paths
path = x86_paths if build_arch == 'x86' else x64_paths
include = [
os.path.join(VC_PATH, 'VC\\INCLUDE'),
os.path.join(VC_PATH, 'VC\\ATLMFC\\INCLUDE'),
os.path.join(PLATFORM_SDK_PATH, '10\\include', PLATFORM_SDK_VERSION,
'ucrt'),
os.path.join(PLATFORM_SDK_PATH, '8.1\\include'),
os.path.join(PLATFORM_SDK_PATH, '8.1\\include\\um'),
os.path.join(PLATFORM_SDK_PATH, '8.1\\include\\shared'),
os.path.join(PLATFORM_SDK_PATH, '8.1\\include\\winrt'),
os.path.join(PLATFORM_SDK_PATH, 'NETFXSDK\\4.6.1\\include\\um'),
]
x86_lib_path = [
os.path.join(VC_PATH, 'VC\\bin'),
os.path.join(VC_PATH, 'VC\\LIB'),
os.path.join(VC_PATH, 'VC\\ATLMFC\\LIB'),
os.path.join(VC_PATH,
'VC\\redist\\x86\\Microsoft.VC' + VC_VERSION + '.CRT'),
os.path.join(PLATFORM_SDK_PATH, '10\\lib', PLATFORM_SDK_VERSION,
'ucrt\\x86'),
os.path.join(PLATFORM_SDK_PATH, '8.1\\lib\\winv6.3\\um\\x86'),
os.path.join(PLATFORM_SDK_PATH, 'NETFXSDK\\4.6.1\\lib\\um\\x86'),
]
x64_lib_path = [
os.path.join(VC_PATH, 'VC\\bin\\x86_amd64'),
os.path.join(VC_PATH, 'VC\\LIB\\amd64'),
os.path.join(VC_PATH, 'VC\\ATLMFC\\LIB\\amd64'),
os.path.join(VC_PATH,
'VC\\redist\\x64\\Microsoft.VC' + VC_VERSION + '.CRT'),
os.path.join(PLATFORM_SDK_PATH, '10\\lib', PLATFORM_SDK_VERSION,
'ucrt\\x64'),
os.path.join(PLATFORM_SDK_PATH, '8.1\\lib\\winv6.3\\um\\x64'),
os.path.join(PLATFORM_SDK_PATH, 'NETFXSDK\\4.6.1\\lib\\um\\x64'),
]
lib = x86_lib_path if build_arch == 'x86' else x64_lib_path
build_env = os.environ.copy()
build_env['PATH'] = ';'.join([build_env['PATH']] + path)
build_env['INCLUDE'] = ';'.join(include)
build_env['LIB'] = ';'.join(lib)
build_env['TEMP'] = TMP_PATH
build_env['TMP'] = TMP_PATH
return build_env
def RunBinary(self, binary, args, build_arch, parser):
"""Runs binary on Windows with the passed args.
Args:
binary: The binary to run.
args: The arguments to pass to binary.
build_arch: Either 'x64' or 'x86', which binary architecture to build for.
parser: An ArgParser that contains parsed arguments.
Returns:
The return code from executing binary.
"""
# Filter out some not-so-useful cl windows messages.
filters = [
'.*warning LNK4006: __NULL_IMPORT_DESCRIPTOR already defined.*\n',
'.*warning LNK4044: unrecognized option \'/MT\'; ignored.*\n',
'.*warning LNK4044: unrecognized option \'/link\'; ignored.*\n',
'.*warning LNK4221: This object file does not define any '
'previously.*\n',
'\r\n',
'\n\r',
]
# Check again the arguments are within MAX_PATH.
for arg in args:
if len(arg) > MAX_PATH:
print('Warning: arg "' + arg + '" is > than 260 characters (' +
str(len(arg)) + '); programs may crash with long arguments')
if os.path.splitext(arg)[1].lower() in ['.c', '.cc', '.cpp', '.s']:
# cl.exe prints out the file name it is compiling; add that to the
# filter.
name = arg.rpartition(ntpath.sep)[2]
filters.append(name)
if '/w' in args:
args = [arg for arg in args if arg not in ['/W2', '/W3', '/W4']]
# Setup the Windows paths and the build environment.
# TODO(pcloudy): make these paths configurable
build_env = self.SetupEnvironment(build_arch)
# Construct a large regular expression for all filters.
output_filter = re.compile('(' + ')|('.join(filters) + ')')
includes_filter = re.compile(r'Note: including file:\s+(.*)')
# Run the command.
cmd = [binary] + args
# Save stderr output to a temporary in case we need it.
proc = subprocess.Popen(cmd,
env=build_env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
deps = []
for line in proc.stdout:
if not output_filter.match(line):
includes = includes_filter.match(line)
if includes:
filename = includes.group(1).rstrip()
deps += [filename]
else:
print line.rstrip()
proc.wait()
# Generate deps file if requested.
if parser.deps_file:
with open(parser.deps_file, 'w') as deps_file:
# Start with the name of the output file.
deps_file.write(parser.output_file + ': \\\n')
for i, dep in enumerate(deps):
dep = dep.replace('\\', '/').replace(' ', '\\ ')
deps_file.write(' ' + dep)
if i < len(deps) - 1:
deps_file.write(' \\')
deps_file.write('\n')
return proc.returncode
|
|
import os
import ctypes
import ctypes.util
import syslog
import tempfile
from functools import wraps
if os.name == 'posix':
path = ctypes.util.find_library('rsync')
if path is None:
raise ImportError('Could not find librsync, make sure it is installed')
try:
_librsync = ctypes.cdll.LoadLibrary(path)
except OSError:
raise ImportError('Could not load librsync at "%s"' % path)
elif os.name == 'nt':
try:
_librsync = ctypes.cdll.librsync
except:
raise ImportError('Could not load librsync, make sure it is installed')
else:
raise NotImplementedError('Librsync is not supported on your platform')
MAX_SPOOL = 1024 ** 2 * 5
TRACE_LEVELS = (
syslog.LOG_EMERG, syslog.LOG_ALERT, syslog.LOG_CRIT, syslog.LOG_ERR,
syslog.LOG_WARNING, syslog.LOG_NOTICE, syslog.LOG_INFO, syslog.LOG_DEBUG,
)
RS_DONE = 0
RS_BLOCKED = 1
RS_JOB_BLOCKSIZE = 65536
RS_DEFAULT_STRONG_LEN = 8
RS_DEFAULT_BLOCK_LEN = 2048
#############################
# DEFINES FROM librsync.h #
#############################
# librsync.h: rs_buffers_s
class Buffer(ctypes.Structure):
_fields_ = [
('next_in', ctypes.c_char_p),
('avail_in', ctypes.c_size_t),
('eof_in', ctypes.c_int),
('next_out', ctypes.c_char_p),
('avail_out', ctypes.c_size_t),
]
# char const *rs_strerror(rs_result r);
_librsync.rs_strerror.restype = ctypes.c_char_p
_librsync.rs_strerror.argtypes = (ctypes.c_int, )
# rs_job_t *rs_sig_begin(size_t new_block_len, size_t strong_sum_len);
_librsync.rs_sig_begin.restype = ctypes.c_void_p
_librsync.rs_sig_begin.argtypes = (ctypes.c_size_t, ctypes.c_size_t, )
# rs_job_t *rs_loadsig_begin(rs_signature_t **);
_librsync.rs_loadsig_begin.restype = ctypes.c_void_p
_librsync.rs_loadsig_begin.argtypes = (ctypes.c_void_p, )
# rs_job_t *rs_delta_begin(rs_signature_t *);
_librsync.rs_delta_begin.restype = ctypes.c_void_p
_librsync.rs_delta_begin.argtypes = (ctypes.c_void_p, )
# rs_job_t *rs_patch_begin(rs_copy_cb *, void *copy_arg);
_librsync.rs_patch_begin.restype = ctypes.c_void_p
_librsync.rs_patch_begin.argtypes = (ctypes.c_void_p, ctypes.c_void_p, )
# rs_result rs_build_hash_table(rs_signature_t* sums);
_librsync.rs_build_hash_table.restype = ctypes.c_size_t
_librsync.rs_build_hash_table.argtypes = (ctypes.c_void_p, )
# rs_result rs_job_iter(rs_job_t *, rs_buffers_t *);
_librsync.rs_job_iter.restype = ctypes.c_int
_librsync.rs_job_iter.argtypes = (ctypes.c_void_p, ctypes.c_void_p, )
# void rs_trace_set_level(rs_loglevel level);
_librsync.rs_trace_set_level.restype = None
_librsync.rs_trace_set_level.argtypes = (ctypes.c_int, )
# void rs_free_sumset(rs_signature_t *);
_librsync.rs_free_sumset.restype = None
_librsync.rs_free_sumset.argtypes = (ctypes.c_void_p, )
# rs_result rs_job_free(rs_job_t *);
_librsync.rs_job_free.restype = ctypes.c_int
_librsync.rs_job_free.argtypes = (ctypes.c_void_p, )
# A function declaration for our read callback.
patch_callback = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_void_p, ctypes.c_longlong,
ctypes.c_size_t, ctypes.POINTER(Buffer))
class LibrsyncError(Exception):
def __init__(self, r):
super(LibrsyncError, self).__init__(_librsync.rs_strerror(
ctypes.c_int(r)))
def seekable(f):
@wraps(f)
def wrapper(*args, **kwargs):
s = args[0]
assert callable(getattr(s, 'seek', None)), 'Must provide seekable ' \
'file-like object'
return f(*args, **kwargs)
return wrapper
def _execute(job, f, o=None):
"""
Executes a librsync "job" by reading bytes from `f` and writing results to
`o` if provided. If `o` is omitted, the output is ignored.
"""
# Re-use the same buffer for output, we will read from it after each
# iteration.
out = ctypes.create_string_buffer(RS_JOB_BLOCKSIZE)
while True:
block = f.read(RS_JOB_BLOCKSIZE)
buff = Buffer()
# provide the data block via input buffer.
buff.next_in = ctypes.c_char_p(block)
buff.avail_in = ctypes.c_size_t(len(block))
buff.eof_in = ctypes.c_int(not block)
# Set up our buffer for output.
buff.next_out = ctypes.cast(out, ctypes.c_char_p)
buff.avail_out = ctypes.c_size_t(RS_JOB_BLOCKSIZE)
r = _librsync.rs_job_iter(job, ctypes.byref(buff))
if o:
o.write(out.raw[:RS_JOB_BLOCKSIZE - buff.avail_out])
if r == RS_DONE:
break
elif r != RS_BLOCKED:
raise LibrsyncError(r)
if buff.avail_in > 0:
# There is data left in the input buffer, librsync did not consume
# all of it. Rewind the file a bit so we include that data in our
# next read. It would be better to simply tack data to the end of
# this buffer, but that is very difficult in Python.
f.seek(f.tell() - buff.avail_in)
if o and callable(getattr(o, 'seek', None)):
# As a matter of convenience, rewind the output file.
o.seek(0)
return o
def debug(level=syslog.LOG_DEBUG):
assert level in TRACE_LEVELS, "Invalid log level %i" % level
_librsync.rs_trace_set_level(level)
@seekable
def signature(f, s=None, block_size=RS_DEFAULT_BLOCK_LEN):
"""
Generate a signature for the file `f`. The signature will be written to `s`.
If `s` is omitted, a temporary file will be used. This function returns the
signature file `s`. You can specify the size of the blocks using the
optional `block_size` parameter.
"""
if s is None:
s = tempfile.SpooledTemporaryFile(max_size=MAX_SPOOL, mode='wb+')
job = _librsync.rs_sig_begin(block_size, RS_DEFAULT_STRONG_LEN)
try:
_execute(job, f, s)
finally:
_librsync.rs_job_free(job)
return s
@seekable
def delta(f, s, d=None):
"""
Create a delta for the file `f` using the signature read from `s`. The delta
will be written to `d`. If `d` is omitted, a temporary file will be used.
This function returns the delta file `d`. All parameters must be file-like
objects.
"""
if d is None:
d = tempfile.SpooledTemporaryFile(max_size=MAX_SPOOL, mode='wb+')
sig = ctypes.c_void_p()
try:
job = _librsync.rs_loadsig_begin(ctypes.byref(sig))
try:
_execute(job, s)
finally:
_librsync.rs_job_free(job)
r = _librsync.rs_build_hash_table(sig)
if r != RS_DONE:
raise LibrsyncError(r)
job = _librsync.rs_delta_begin(sig)
try:
_execute(job, f, d)
finally:
_librsync.rs_job_free(job)
finally:
_librsync.rs_free_sumset(sig)
return d
@seekable
def patch(f, d, o=None):
"""
Patch the file `f` using the delta `d`. The patched file will be written to
`o`. If `o` is omitted, a temporary file will be used. This function returns
the be patched file `o`. All parameters should be file-like objects. `f` is
required to be seekable.
"""
if o is None:
o = tempfile.SpooledTemporaryFile(max_size=MAX_SPOOL, mode='wb+')
@patch_callback
def read_cb(opaque, pos, length, buff):
f.seek(pos)
size_p = ctypes.cast(length, ctypes.POINTER(ctypes.c_size_t)).contents
size = size_p.value
block = f.read(size)
size_p.value = len(block)
buff_p = ctypes.cast(buff, ctypes.POINTER(ctypes.c_char_p)).contents
buff_p.value = block
return RS_DONE
job = _librsync.rs_patch_begin(read_cb, None)
try:
_execute(job, d, o)
finally:
_librsync.rs_job_free(job)
return o
|
|
# -*- coding: utf-8 -*-
# Module for pretty-printing tabular data.
# Imported from https://bitbucket.org/astanin/python-tabulate
# Found at https://pypi.python.org/pypi/tabulate
# Pulled in verbatim to xypath to avoid extraneous dependencies.
# Copyright (c) 2011-2013 Sergey Astanin
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Pretty-print tabular data."""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from collections import namedtuple
from platform import python_version_tuple
import re
from six.moves import map
import six
from six.moves import range
from six.moves import zip
if python_version_tuple()[0] < "3":
from itertools import izip_longest
_none_type = type(None)
_int_type = int
_float_type = float
_text_type = six.text_type
_binary_type = str
else:
from itertools import zip_longest as izip_longest
from functools import reduce
_none_type = type(None)
_int_type = int
_float_type = float
_text_type = str
_binary_type = bytes
__all__ = ["tabulate"]
__version__ = "0.6"
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "usecolons", "usehtmlattrs",
"with_header_hide",
"without_header_hide"])
_format_defaults = {"padding": 0,
"usecolons": False,
"usehtmlattrs": False,
"with_header_hide": [],
"without_header_hide": []}
_table_formats = {"simple":
TableFormat(lineabove=None,
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
usecolons=False,
usehtmlattrs=False,
with_header_hide=["linebelow"],
without_header_hide=[]),
"plain":
TableFormat(None, None, None, None,
DataRow("", " ", ""), DataRow("", " ", ""),
**_format_defaults),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
usecolons=False,
usehtmlattrs=False,
with_header_hide=[],
without_header_hide=["linebelowheader"]),
"pipe":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "|", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
usecolons=True,
usehtmlattrs=False,
with_header_hide=[],
without_header_hide=[]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
usecolons=False,
usehtmlattrs=False,
with_header_hide=[],
without_header_hide=["linebelowheader"]),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
usecolons=False,
usehtmlattrs=False,
with_header_hide=[],
without_header_hide=["linebelowheader"]),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=DataRow("!", "!!", ""),
datarow=DataRow("|", "||", ""),
padding=1,
usecolons=False,
usehtmlattrs=True,
with_header_hide=[],
without_header_hide=["linebelowheader"])}
_invisible_codes = re.compile("\x1b\[\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == u'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=None, datarow=DataRow('', '\t', ''), **_format_defaults)
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except ValueError:
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
"""
return _isconvertible(float, string)
def _isint(string):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is int or \
(isinstance(string, _binary_type) or isinstance(string, _text_type)) and \
_isconvertible(int, string)
def _type(string, has_invisible=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type(u'\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
elif _isint(string):
return int
elif _isnumber(string):
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s, has_invisible=True):
"""Flush right.
>>> _padleft(6, u'\u044f\u0439\u0446\u0430') == u' \u044f\u0439\u0446\u0430'
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = u"{0:>%ds}" % iwidth
return fmt.format(s)
def _padright(width, s, has_invisible=True):
"""Flush left.
>>> _padright(6, u'\u044f\u0439\u0446\u0430') == u'\u044f\u0439\u0446\u0430 '
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = u"{0:<%ds}" % iwidth
return fmt.format(s)
def _padboth(width, s, has_invisible=True):
"""Center string.
>>> _padboth(6, u'\u044f\u0439\u0446\u0430') == u' \u044f\u0439\u0446\u0430 '
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = u"{0:^%ds}" % iwidth
return fmt.format(s)
def _strip_invisible(s):
"Remove invisible ANSI color codes."
return re.sub(_invisible_codes, "", s)
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return len(_strip_invisible(s))
else:
return len(_text_type(s))
def _align_column(strings, alignment, minwidth=0, has_invisible=True):
"""[string] -> [padded_string]
>>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))
[' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
"""
if alignment == "right":
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment in "center":
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment in "decimal":
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
else:
strings = [s.strip() for s in strings]
padfn = _padright
if has_invisible:
width_fn = _visible_width
else:
width_fn = len
maxwidth = max(max(list(map(width_fn, strings))), minwidth)
padded_strings = [padfn(maxwidth, s, has_invisible) for s in strings]
return padded_strings
def _more_generic(type1, type2):
types = { _none_type: 0, int: 1, float: 2, _text_type: 4 }
invtypes = { 4: _text_type, 2: float, 1: int, 0: _none_type }
moregeneric = max(types.get(type1, 4), types.get(type2, 4))
return invtypes[moregeneric]
def _column_type(strings, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", u'\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
"""
types = [_type(s, has_invisible) for s in strings ]
return reduce(_more_generic, types, int)
def _format(val, valtype, floatfmt, missingval=u""):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = [u'\u0431\u0443\u043a\u0432\u0430', u'\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [[u'\u0430\u0437', 2], [u'\u0431\u0443\u043a\u0438', 4]] ; \
good_result = u'\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _binary_type, _text_type]:
return u"{0}".format(val)
elif valtype is float:
return format(float(val), floatfmt)
else:
return u"{0}".format(val)
def _align_header(header, alignment, width):
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
else:
return _padleft(width, header)
def _normalize_tabular_data(tabular_data, headers):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* 2D NumPy arrays
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = list(tabular_data.keys())
rows = list(izip_longest(*list(tabular_data.values()))) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = list(tabular_data.keys())
vals = tabular_data.values # values matrix doesn't need to be transposed
names = tabular_data.index
rows = [[v]+list(row) for v,row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type,keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if headers == "keys" and len(rows) > 0: # keys are column indices
headers = list(map(_text_type, list(range(len(rows[0])))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(headers)
rows = list(map(list,rows))
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [u""]*(ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=[], tablefmt="simple",
floatfmt="g", numalign="decimal", stralign="left",
missingval=u""):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable or iterables), a dictionary of
iterables, a two-dimensional NumPy array, or a Pandas' dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: right, center, left, decimal (only
for `numalign`).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point.
`None` values are replaced with a `missingval` string:
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', and 'mediawiki'.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
>>> print(tabulate([["eggs", 42], ["spam", 23]], tablefmt="mediawiki", stralign="left"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
| eggs || align="right"| 42
|-
| spam || align="right"| 23
|}
"""
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = u'\n'.join(['\t'.join(map(_text_type, headers))] + \
[u'\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
if has_invisible:
width_fn = _visible_width
else:
width_fn = len
# format rows and columns, convert numeric values to strings
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval) for v in c]
for c,ct in zip(cols, coltypes)]
# align columns
aligns = [numalign if ct in [int,float] else stralign for ct in coltypes]
minwidths = [width_fn(h)+2 for h in headers] if headers else [0]*len(cols)
cols = [_align_column(c, a, minw, has_invisible)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, cols)]
headers = [_align_header(h, a, minw)
for h, a, minw in zip(headers, aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns)
def _build_row(cells, padding, begin, sep, end):
"Return a string which represents a row of data cells."
pad = u" "*padding
padded_cells = [pad + cell + pad for cell in cells]
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_line(colwidths, padding, begin, fill, sep, end):
"Return a string which represents a horizontal line."
cells = [fill*(w + 2*padding) for w in colwidths]
return _build_row(cells, 0, begin, sep, end)
def _mediawiki_cell_attrs(row, colaligns):
"Prefix every cell in a row with an HTML alignment attribute."
alignment = { "left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| ' }
row2 = [alignment[a] + c for c, a in zip(row, colaligns)]
return row2
def _line_segment_with_colons(linefmt, align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
fill = linefmt.hline
w = colwidth
if align in ["right", "decimal"]:
return (fill[0] * (w - 1)) + ":"
elif align == "center":
return ":" + (fill[0] * (w - 2)) + ":"
elif align == "left":
return ":" + (fill[0] * (w - 1))
else:
return fill[0] * w
def _format_table(fmt, headers, rows, colwidths, colaligns):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if headers else fmt.without_header_hide
pad = fmt.padding
headerrow = fmt.headerrow if fmt.headerrow else fmt.datarow
if fmt.usehtmlattrs:
headers = _mediawiki_cell_attrs(headers, colaligns)
rows = [_mediawiki_cell_attrs(row, colaligns) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
lines.append(_build_line(colwidths, pad, *fmt.lineabove))
if headers:
lines.append(_build_row(headers, pad, *headerrow))
if fmt.linebelowheader and "linebelowheader" not in hidden:
begin, fill, sep, end = fmt.linebelowheader
if fmt.usecolons:
segs = [_line_segment_with_colons(fmt.linebelowheader, a, w + 2*pad)
for w,a in zip(colwidths, colaligns)]
lines.append(_build_row(segs, 0, begin, sep, end))
else:
lines.append(_build_line(colwidths, pad, *fmt.linebelowheader))
if rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in rows[:-1]:
lines.append(_build_row(row, pad, *fmt.datarow))
lines.append(_build_line(colwidths, pad, *fmt.linebetweenrows))
# the last row without a line below
lines.append(_build_row(rows[-1], pad, *fmt.datarow))
else:
for row in rows:
lines.append(_build_row(row, pad, *fmt.datarow))
if fmt.linebelow and "linebelow" not in hidden:
lines.append(_build_line(colwidths, pad, *fmt.linebelow))
return "\n".join(lines)
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v8.resources.types import (
dynamic_search_ads_search_term_view,
)
from google.ads.googleads.v8.services.types import (
dynamic_search_ads_search_term_view_service,
)
from .base import (
DynamicSearchAdsSearchTermViewServiceTransport,
DEFAULT_CLIENT_INFO,
)
class DynamicSearchAdsSearchTermViewServiceGrpcTransport(
DynamicSearchAdsSearchTermViewServiceTransport
):
"""gRPC backend transport for DynamicSearchAdsSearchTermViewService.
Service to fetch dynamic search ads views.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_dynamic_search_ads_search_term_view(
self,
) -> Callable[
[
dynamic_search_ads_search_term_view_service.GetDynamicSearchAdsSearchTermViewRequest
],
dynamic_search_ads_search_term_view.DynamicSearchAdsSearchTermView,
]:
r"""Return a callable for the get dynamic search ads search
term view method over gRPC.
Returns the requested dynamic search ads search term view in
full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetDynamicSearchAdsSearchTermViewRequest],
~.DynamicSearchAdsSearchTermView]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_dynamic_search_ads_search_term_view" not in self._stubs:
self._stubs[
"get_dynamic_search_ads_search_term_view"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v8.services.DynamicSearchAdsSearchTermViewService/GetDynamicSearchAdsSearchTermView",
request_serializer=dynamic_search_ads_search_term_view_service.GetDynamicSearchAdsSearchTermViewRequest.serialize,
response_deserializer=dynamic_search_ads_search_term_view.DynamicSearchAdsSearchTermView.deserialize,
)
return self._stubs["get_dynamic_search_ads_search_term_view"]
__all__ = ("DynamicSearchAdsSearchTermViewServiceGrpcTransport",)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
'''
See Ansible Module Documentation (Below)
'''
DOCUMENTATION = '''
---
module: iptables_chain
short_description: Entirely manage a single iptables chain
description:
- Set the rules of a chain to match the specified rules.
options:
name:
description:
- Name of iptables chain
required: true
table:
description:
- One of filter, nat, mangle, raw, security. The table where the chain should exist.
required: false
default: filter
ip_version:
description:
- One of ipv4, ipv6. The IP version that the chain applies to.
required: false
default: ipv4
rules:
description:
- |-
A list of iptables rules which the chain should match. The list
should be derived from the 'iptables -S <chain>' command so that
the rules will match later rule dumps on re-runs. Otherwise, the
module will give the following error:
Chain update failed. Do input rules match 'iptables -S your_chain' output?
The easiest way to make sure that rules match is to create a chain
once manually, then to dump with 'iptables -S <chain>'. Note: the rules
should not contain a '-N <chain>' rule.
required: true
author:
- "Joel Smith (joelsmith@redhat.com)"
'''
EXAMPLES = '''
# Basic iptables chain example
tasks:
- name: Create the example chain
iptables_chain:
name: example
rules:
- "-A example -p udp -m udp --dport 1025:65535 -m conntrack --ctstate NEW -j ACCEPT"
- "-A example -p tcp -m tcp --dport 1025:65535 -m conntrack --ctstate NEW -j ACCEPT"
- "-A example -p tcp -m tcp --dport 22 -m conntrack --ctstate NEW -j ACCEPT"
- "-A example -p udp -m udp --dport 137 -m conntrack --ctstate NEW -j ACCEPT"
- "-A example -p udp -m udp --dport 138 -m conntrack --ctstate NEW -j ACCEPT"
'''
import os
import subprocess
import fcntl
import errno
import time
class IpTablesChainError(Exception):
'''All IpTablesChain methods throw this exception when errors occur'''
def __init__(self, msg, cmd, exit_code, output):
super(IpTablesChainError, self).__init__(msg)
self.msg = msg
self.cmd = cmd
self.exit_code = exit_code
self.output = output
# pylint: disable=too-few-public-methods
# as seen on http://stackoverflow.com/questions/27803059/conditional-with-statement-in-python
class DummyContextMgr(object):
'''A dummy context manager that does nothing so that a 'with' can conditionally do nothing'''
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback_):
return False
# pylint: enable=too-few-public-methods
class IpTablesChain(object):
'''A single chain to be managed entirely'''
def __init__(self, table, iptchain, ver):
'''Create a single chain manager'''
self.table = table
self.chain = iptchain
self.ver = ver
self.restore_has_locks = None # i.e., unknown
self.wait_takes_seconds = None # i.e., unknown
def _build_cmd(self, *args):
'''
Create an iptables or ip6tables command
Return a list of command args suitable for use with subprocess.*
'''
cmd = 'iptables' if self.ver == 'ipv4' else 'ip6tables'
retval = ["/usr/sbin/%s" % cmd, '--table', self.table]
retval.append('--wait')
if self._check_wait_takes_seconds():
retval.append('600')
retval.extend(args)
return retval
def _build_restore_cmd(self, *args):
'''
Create an iptables-restore or ip6tables-restore command
Return a list of command args suitable for use with subprocess.*
'''
cmd = 'iptables-restore' if self.ver == 'ipv4' else 'ip6tables-restore'
retval = ["/usr/sbin/%s" % cmd, '--noflush', '--table', self.table]
if self._check_restore_has_locks():
retval.extend(['--wait', '600'])
retval.extend(args)
return retval
def _check_wait_takes_seconds(self):
'''Determine whether iptables -w accepts an optional timeout'''
# some versions of iptables have --wait and -w, but don't allow a timeout to be specified
if self.wait_takes_seconds is None:
cmd = 'iptables' if self.ver == 'ipv4' else 'ip6tables'
# try a harmless operation that allows us to see if iptables pukes on the 1
to_run = ["/usr/sbin/%s" % cmd, '--wait', '10', '--rename-chain', 'INPUT', 'INPUT']
try:
subprocess.check_output(to_run, stderr=subprocess.STDOUT)
# we don't expect to ever get here, but if we do, then I guess it takes seconds.
self.wait_takes_seconds = True
except subprocess.CalledProcessError as ex:
if 'File exists.' in ex.output:
self.wait_takes_seconds = True
else:
self.wait_takes_seconds = False
return self.wait_takes_seconds
def _check_restore_has_locks(self):
'''Determine whether iptables-restore has locking built in.'''
# The new version will have --wait just like iptables thanks to this patch:
# http://patchwork.ozlabs.org/patch/739234/
# Until then we'll need to do our own locking. So, this code detects whether we need to do locking
if self.restore_has_locks is None:
with open(os.devnull, 'w') as devnull:
cmd = 'iptables-restore' if self.ver == 'ipv4' else 'ip6tables-restore'
to_run = ["/usr/sbin/%s" % cmd, '--wait', '10', '--noflush']
try:
subprocess.check_call(to_run, stderr=devnull, stdout=devnull)
self.restore_has_locks = True
except subprocess.CalledProcessError:
self.restore_has_locks = False
return self.restore_has_locks
def exists(self):
'''Return True if the chain exists or False otherwise'''
try:
# this is definitely going to throw. We're after the error message.
subprocess.check_output(self._build_cmd('--rename-chain', self.chain, self.chain), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
if 'File exists.' in ex.output:
return True
if 'No chain/target/match by that name.' in ex.output:
return False
raise IpTablesChainError(msg="Failed to determine if chain exists",
cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)
def get(self):
'''Get all the rules of the chain'''
cmd = self._build_cmd('--list-rules', self.chain)
ipt = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = ipt.communicate()
if ipt.returncode != 0:
raise IpTablesChainError(msg="Failed to get existing chain rules",
cmd=cmd, exit_code=ipt.returncode, output=err)
return list([entry for entry in out.split('\n') if entry and not entry.startswith('-N ')])
def set(self, rules):
'''Set all the rules of the chain to match the passed in rules'''
in_data = "*%s\n" % self.table
# create the chain (if it didn't exist, otherwise this has no effect except to reset the counters)
in_data += ":%s - [0:0]\n" % self.chain
# flush the chain (if it did exist and had rules, otherwise this has no effect)
in_data += "-F %s\n" % self.chain
in_data += ("\n".join(rules))+"\n"
in_data += "COMMIT\n"
cmd = self._build_restore_cmd()
# as seen on http://stackoverflow.com/questions/27803059/conditional-with-statement-in-python
with open('/run/xtables.lock', 'a+') if not self._check_restore_has_locks() else DummyContextMgr() as fdnum:
if not self._check_restore_has_locks():
# do the locking ourselves
start = time.time()
locked = False
while time.time() < start+600:
try:
# the lock will be released automatically when the with block goes out of scope
# and the file is closed.
fcntl.flock(fdnum, fcntl.LOCK_EX | fcntl.LOCK_NB)
locked = True
break
except IOError as ex:
if ex.errno != errno.EDEADLK:
raise IpTablesChainError(msg="Failed to acquire iptables lock", exit_code=1)
time.sleep(0.5)
if not locked:
raise IpTablesChainError(msg="Timed out trying to acquire iptables lock", exit_code=1)
ipt = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = ipt.communicate(in_data)
if ipt.returncode != 0:
raise IpTablesChainError(msg="Failed to set chain rules",
cmd=cmd, exit_code=ipt.returncode, output=out+"\n"+err)
def main():
'''Ansible module to entirely manage a single iptables chain'''
tables = ['filter', 'nat', 'mangle', 'raw', 'security']
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
rules=dict(required=True, type='list'),
table=dict(required=False, default='filter', choices=tables),
ip_version=dict(required=False, default='ipv4', choices=['ipv4', 'ipv6']),
),
supports_check_mode=True
)
ver = module.params['ip_version']
table = module.params['table']
name = module.params['name']
rules = module.params['rules']
iptchain = IpTablesChain(table, name, ver)
changed = False
try:
if (not iptchain.exists()) or iptchain.get() != rules:
iptchain.set(rules)
if not iptchain.exists():
module.fail_json(msg="Chain create failed")
elif iptchain.get() == rules:
changed = True
else:
module.fail_json(msg="Chain update failed. Do input rules match 'iptables -S %s' output?" % name)
except IpTablesChainError as ex:
module.fail_json(msg=ex.msg)
return module.exit_json(changed=changed, output="\n".join(rules))
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
|
import time
import unittest
from ate import testcase
from ate.exception import ParamsError
class TestcaseParserUnittest(unittest.TestCase):
def test_extract_variables(self):
self.assertEqual(
testcase.extract_variables("$var"),
["var"]
)
self.assertEqual(
testcase.extract_variables("$var123"),
["var123"]
)
self.assertEqual(
testcase.extract_variables("$var_name"),
["var_name"]
)
self.assertEqual(
testcase.extract_variables("var"),
[]
)
self.assertEqual(
testcase.extract_variables("a$var"),
["var"]
)
self.assertEqual(
testcase.extract_variables("$v ar"),
["v"]
)
self.assertEqual(
testcase.extract_variables(" "),
[]
)
self.assertEqual(
testcase.extract_variables("$abc*"),
["abc"]
)
self.assertEqual(
testcase.extract_variables("${func()}"),
[]
)
self.assertEqual(
testcase.extract_variables("${func(1,2)}"),
[]
)
self.assertEqual(
testcase.extract_variables("${gen_md5($TOKEN, $data, $random)}"),
["TOKEN", "data", "random"]
)
def test_eval_content_variables(self):
variable_binds = {
"var_1": "abc",
"var_2": "def",
"var_3": 123,
"var_4": {"a": 1},
"var_5": True,
"var_6": None
}
testcase_parser = testcase.TestcaseParser(variables_binds=variable_binds)
self.assertEqual(
testcase_parser.eval_content_variables("$var_1"),
"abc"
)
self.assertEqual(
testcase_parser.eval_content_variables("var_1"),
"var_1"
)
self.assertEqual(
testcase_parser.eval_content_variables("$var_1#XYZ"),
"abc#XYZ"
)
self.assertEqual(
testcase_parser.eval_content_variables("/$var_1/$var_2/var3"),
"/abc/def/var3"
)
self.assertEqual(
testcase_parser.eval_content_variables("/$var_1/$var_2/$var_1"),
"/abc/def/abc"
)
self.assertEqual(
testcase_parser.eval_content_variables("${func($var_1, $var_2, xyz)}"),
"${func(abc, def, xyz)}"
)
self.assertEqual(
testcase_parser.eval_content_variables("$var_3"),
123
)
self.assertEqual(
testcase_parser.eval_content_variables("$var_4"),
{"a": 1}
)
self.assertEqual(
testcase_parser.eval_content_variables("$var_5"),
True
)
self.assertEqual(
testcase_parser.eval_content_variables("abc$var_5"),
"abcTrue"
)
self.assertEqual(
testcase_parser.eval_content_variables("abc$var_4"),
"abc{'a': 1}"
)
self.assertEqual(
testcase_parser.eval_content_variables("$var_6"),
None
)
def test_eval_content_variables_search_upward(self):
testcase_parser = testcase.TestcaseParser()
with self.assertRaises(ParamsError):
testcase_parser.eval_content_variables("/api/$SECRET_KEY")
testcase_parser.file_path = "tests/data/demo_testset_hardcode.yml"
content = testcase_parser.eval_content_variables("/api/$SECRET_KEY")
self.assertEqual(content, "/api/DebugTalk")
def test_parse_string_value(self):
self.assertEqual(testcase.parse_string_value("123"), 123)
self.assertEqual(testcase.parse_string_value("12.3"), 12.3)
self.assertEqual(testcase.parse_string_value("a123"), "a123")
self.assertEqual(testcase.parse_string_value("$var"), "$var")
self.assertEqual(testcase.parse_string_value("${func}"), "${func}")
def test_parse_function(self):
self.assertEqual(
testcase.parse_function("${func()}"),
{'func_name': 'func', 'args': [], 'kwargs': {}}
)
self.assertEqual(
testcase.parse_function("${func(5)}"),
{'func_name': 'func', 'args': [5], 'kwargs': {}}
)
self.assertEqual(
testcase.parse_function("${func(1, 2)}"),
{'func_name': 'func', 'args': [1, 2], 'kwargs': {}}
)
self.assertEqual(
testcase.parse_function("${func(a=1, b=2)}"),
{'func_name': 'func', 'args': [], 'kwargs': {'a': 1, 'b': 2}}
)
self.assertEqual(
testcase.parse_function("${func(a= 1, b =2)}"),
{'func_name': 'func', 'args': [], 'kwargs': {'a': 1, 'b': 2}}
)
self.assertEqual(
testcase.parse_function("${func(1, 2, a=3, b=4)}"),
{'func_name': 'func', 'args': [1, 2], 'kwargs': {'a': 3, 'b': 4}}
)
def test_parse_content_with_bindings_variables(self):
variables_binds = {
"str_1": "str_value1",
"str_2": "str_value2"
}
testcase_parser = testcase.TestcaseParser(variables_binds=variables_binds)
self.assertEqual(
testcase_parser.parse_content_with_bindings("$str_1"),
"str_value1"
)
self.assertEqual(
testcase_parser.parse_content_with_bindings("123$str_1/456"),
"123str_value1/456"
)
with self.assertRaises(ParamsError):
testcase_parser.parse_content_with_bindings("$str_3")
self.assertEqual(
testcase_parser.parse_content_with_bindings(["$str_1", "str3"]),
["str_value1", "str3"]
)
self.assertEqual(
testcase_parser.parse_content_with_bindings({"key": "$str_1"}),
{"key": "str_value1"}
)
def test_parse_content_with_bindings_multiple_identical_variables(self):
variables_binds = {
"userid": 100,
"data": 1498
}
testcase_parser = testcase.TestcaseParser(variables_binds=variables_binds)
content = "/users/$userid/training/$data?userId=$userid&data=$data"
self.assertEqual(
testcase_parser.parse_content_with_bindings(content),
"/users/100/training/1498?userId=100&data=1498"
)
def test_parse_variables_multiple_identical_variables(self):
variables_binds = {
"user": 100,
"userid": 1000,
"data": 1498
}
testcase_parser = testcase.TestcaseParser(variables_binds=variables_binds)
content = "/users/$user/$userid/$data?userId=$userid&data=$data"
self.assertEqual(
testcase_parser.parse_content_with_bindings(content),
"/users/100/1000/1498?userId=1000&data=1498"
)
def test_parse_content_with_bindings_functions(self):
import random, string
functions_binds = {
"gen_random_string": lambda str_len: ''.join(random.choice(string.ascii_letters + string.digits) \
for _ in range(str_len))
}
testcase_parser = testcase.TestcaseParser(functions_binds=functions_binds)
result = testcase_parser.parse_content_with_bindings("${gen_random_string(5)}")
self.assertEqual(len(result), 5)
add_two_nums = lambda a, b=1: a + b
functions_binds["add_two_nums"] = add_two_nums
self.assertEqual(
testcase_parser.parse_content_with_bindings("${add_two_nums(1)}"),
2
)
self.assertEqual(
testcase_parser.parse_content_with_bindings("${add_two_nums(1, 2)}"),
3
)
def test_extract_functions(self):
self.assertEqual(
testcase.extract_functions("${func()}"),
["${func()}"]
)
self.assertEqual(
testcase.extract_functions("${func(5)}"),
["${func(5)}"]
)
self.assertEqual(
testcase.extract_functions("${func(a=1, b=2)}"),
["${func(a=1, b=2)}"]
)
self.assertEqual(
testcase.extract_functions("${func(1, $b, c=$x, d=4)}"),
["${func(1, $b, c=$x, d=4)}"]
)
self.assertEqual(
testcase.extract_functions("/api/1000?_t=${get_timestamp()}"),
["${get_timestamp()}"]
)
self.assertEqual(
testcase.extract_functions("/api/${add(1, 2)}"),
["${add(1, 2)}"]
)
self.assertEqual(
testcase.extract_functions("/api/${add(1, 2)}?_t=${get_timestamp()}"),
["${add(1, 2)}", "${get_timestamp()}"]
)
self.assertEqual(
testcase.extract_functions("abc${func(1, 2, a=3, b=4)}def"),
["${func(1, 2, a=3, b=4)}"]
)
def test_eval_content_functions(self):
functions_binds = {
"add_two_nums": lambda a, b=1: a + b
}
testcase_parser = testcase.TestcaseParser(functions_binds=functions_binds)
self.assertEqual(
testcase_parser.eval_content_functions("${add_two_nums(1, 2)}"),
3
)
self.assertEqual(
testcase_parser.eval_content_functions("/api/${add_two_nums(1, 2)}"),
"/api/3"
)
def test_eval_content_functions_search_upward(self):
testcase_parser = testcase.TestcaseParser()
with self.assertRaises(ParamsError):
testcase_parser.eval_content_functions("/api/${gen_md5(abc)}")
testcase_parser.file_path = "tests/data/demo_testset_hardcode.yml"
content = testcase_parser.eval_content_functions("/api/${gen_md5(abc)}")
self.assertEqual(content, "/api/900150983cd24fb0d6963f7d28e17f72")
def test_parse_content_with_bindings_testcase(self):
variables_binds = {
"uid": "1000",
"random": "A2dEx",
"authorization": "a83de0ff8d2e896dbd8efb81ba14e17d",
"data": {"name": "user", "password": "123456"}
}
functions_binds = {
"add_two_nums": lambda a, b=1: a + b,
"get_timestamp": lambda: int(time.time() * 1000)
}
testcase_template = {
"url": "http://127.0.0.1:5000/api/users/$uid/${add_two_nums(1,2)}",
"method": "POST",
"headers": {
"Content-Type": "application/json",
"authorization": "$authorization",
"random": "$random",
"sum": "${add_two_nums(1, 2)}"
},
"body": "$data"
}
parsed_testcase = testcase.TestcaseParser(variables_binds, functions_binds)\
.parse_content_with_bindings(testcase_template)
self.assertEqual(
parsed_testcase["url"],
"http://127.0.0.1:5000/api/users/1000/3"
)
self.assertEqual(
parsed_testcase["headers"]["authorization"],
variables_binds["authorization"]
)
self.assertEqual(
parsed_testcase["headers"]["random"],
variables_binds["random"]
)
self.assertEqual(
parsed_testcase["body"],
variables_binds["data"]
)
self.assertEqual(
parsed_testcase["headers"]["sum"],
3
)
|
|
# Copyright (c) 2009, 2010, 2011, 2012 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import getopt
import re
import os
import signal
import sys
import uuid
from ovs.db import error
import ovs.db.idl
import ovs.db.schema
from ovs.db import data
from ovs.db import types
import ovs.ovsuuid
import ovs.poller
import ovs.util
def unbox_json(json):
if type(json) == list and len(json) == 1:
return json[0]
else:
return json
def do_default_atoms():
for type_ in types.ATOMIC_TYPES:
if type_ == types.VoidType:
continue
sys.stdout.write("%s: " % type_.to_string())
atom = data.Atom.default(type_)
if atom != data.Atom.default(type_):
sys.stdout.write("wrong\n")
sys.exit(1)
sys.stdout.write("OK\n")
def do_default_data():
any_errors = False
for n_min in 0, 1:
for key in types.ATOMIC_TYPES:
if key == types.VoidType:
continue
for value in types.ATOMIC_TYPES:
if value == types.VoidType:
valueBase = None
else:
valueBase = types.BaseType(value)
type_ = types.Type(types.BaseType(key), valueBase, n_min, 1)
assert type_.is_valid()
sys.stdout.write("key %s, value %s, n_min %d: "
% (key.to_string(), value.to_string(), n_min))
datum = data.Datum.default(type_)
if datum != data.Datum.default(type_):
sys.stdout.write("wrong\n")
any_errors = True
else:
sys.stdout.write("OK\n")
if any_errors:
sys.exit(1)
def do_parse_atomic_type(type_string):
type_json = unbox_json(ovs.json.from_string(type_string))
atomic_type = types.AtomicType.from_json(type_json)
print ovs.json.to_string(atomic_type.to_json(), sort_keys=True)
def do_parse_base_type(type_string):
type_json = unbox_json(ovs.json.from_string(type_string))
base_type = types.BaseType.from_json(type_json)
print ovs.json.to_string(base_type.to_json(), sort_keys=True)
def do_parse_type(type_string):
type_json = unbox_json(ovs.json.from_string(type_string))
type_ = types.Type.from_json(type_json)
print ovs.json.to_string(type_.to_json(), sort_keys=True)
def do_parse_atoms(type_string, *atom_strings):
type_json = unbox_json(ovs.json.from_string(type_string))
base = types.BaseType.from_json(type_json)
for atom_string in atom_strings:
atom_json = unbox_json(ovs.json.from_string(atom_string))
try:
atom = data.Atom.from_json(base, atom_json)
print ovs.json.to_string(atom.to_json())
except error.Error, e:
print e.args[0].encode("utf8")
def do_parse_data(type_string, *data_strings):
type_json = unbox_json(ovs.json.from_string(type_string))
type_ = types.Type.from_json(type_json)
for datum_string in data_strings:
datum_json = unbox_json(ovs.json.from_string(datum_string))
datum = data.Datum.from_json(type_, datum_json)
print ovs.json.to_string(datum.to_json())
def do_sort_atoms(type_string, atom_strings):
type_json = unbox_json(ovs.json.from_string(type_string))
base = types.BaseType.from_json(type_json)
atoms = [data.Atom.from_json(base, atom_json)
for atom_json in unbox_json(ovs.json.from_string(atom_strings))]
print ovs.json.to_string([data.Atom.to_json(atom)
for atom in sorted(atoms)])
def do_parse_column(name, column_string):
column_json = unbox_json(ovs.json.from_string(column_string))
column = ovs.db.schema.ColumnSchema.from_json(column_json, name)
print ovs.json.to_string(column.to_json(), sort_keys=True)
def do_parse_table(name, table_string, default_is_root_string='false'):
default_is_root = default_is_root_string == 'true'
table_json = unbox_json(ovs.json.from_string(table_string))
table = ovs.db.schema.TableSchema.from_json(table_json, name)
print ovs.json.to_string(table.to_json(default_is_root), sort_keys=True)
def do_parse_schema(schema_string):
schema_json = unbox_json(ovs.json.from_string(schema_string))
schema = ovs.db.schema.DbSchema.from_json(schema_json)
print ovs.json.to_string(schema.to_json(), sort_keys=True)
def print_idl(idl, step):
simple = idl.tables["simple"].rows
l1 = idl.tables["link1"].rows
l2 = idl.tables["link2"].rows
n = 0
for row in simple.itervalues():
s = ("%03d: i=%s r=%s b=%s s=%s u=%s "
"ia=%s ra=%s ba=%s sa=%s ua=%s uuid=%s"
% (step, row.i, row.r, row.b, row.s, row.u,
row.ia, row.ra, row.ba, row.sa, row.ua, row.uuid))
s = re.sub('""|,|u?\'', "", s)
s = re.sub('UUID\(([^)]+)\)', r'\1', s)
s = re.sub('False', 'false', s)
s = re.sub('True', 'true', s)
s = re.sub(r'(ba)=([^[][^ ]*) ', r'\1=[\2] ', s)
print(s)
n += 1
for row in l1.itervalues():
s = ["%03d: i=%s k=" % (step, row.i)]
if row.k:
s.append(str(row.k.i))
s.append(" ka=[")
s.append(' '.join(sorted(str(ka.i) for ka in row.ka)))
s.append("] l2=")
if row.l2:
s.append(str(row.l2[0].i))
s.append(" uuid=%s" % row.uuid)
print(''.join(s))
n += 1
for row in l2.itervalues():
s = ["%03d: i=%s l1=" % (step, row.i)]
if row.l1:
s.append(str(row.l1[0].i))
s.append(" uuid=%s" % row.uuid)
print(''.join(s))
n += 1
if not n:
print("%03d: empty" % step)
sys.stdout.flush()
def substitute_uuids(json, symtab):
if type(json) in [str, unicode]:
symbol = symtab.get(json)
if symbol:
return str(symbol)
elif type(json) == list:
return [substitute_uuids(element, symtab) for element in json]
elif type(json) == dict:
d = {}
for key, value in json.iteritems():
d[key] = substitute_uuids(value, symtab)
return d
return json
def parse_uuids(json, symtab):
if type(json) in [str, unicode] and ovs.ovsuuid.is_valid_string(json):
name = "#%d#" % len(symtab)
sys.stderr.write("%s = %s\n" % (name, json))
symtab[name] = json
elif type(json) == list:
for element in json:
parse_uuids(element, symtab)
elif type(json) == dict:
for value in json.itervalues():
parse_uuids(value, symtab)
def idltest_find_simple(idl, i):
for row in idl.tables["simple"].rows.itervalues():
if row.i == i:
return row
return None
def idl_set(idl, commands, step):
txn = ovs.db.idl.Transaction(idl)
increment = False
for command in commands.split(','):
words = command.split()
name = words[0]
args = words[1:]
if name == "set":
if len(args) != 3:
sys.stderr.write('"set" command requires 3 arguments\n')
sys.exit(1)
s = idltest_find_simple(idl, int(args[0]))
if not s:
sys.stderr.write('"set" command asks for nonexistent i=%d\n'
% int(args[0]))
sys.exit(1)
if args[1] == "b":
s.b = args[2] == "1"
elif args[1] == "s":
s.s = args[2]
elif args[1] == "u":
s.u = uuid.UUID(args[2])
elif args[1] == "r":
s.r = float(args[2])
else:
sys.stderr.write('"set" comamnd asks for unknown column %s\n'
% args[2])
sys.stderr.exit(1)
elif name == "insert":
if len(args) != 1:
sys.stderr.write('"set" command requires 1 argument\n')
sys.exit(1)
s = txn.insert(idl.tables["simple"])
s.i = int(args[0])
elif name == "delete":
if len(args) != 1:
sys.stderr.write('"delete" command requires 1 argument\n')
sys.exit(1)
s = idltest_find_simple(idl, int(args[0]))
if not s:
sys.stderr.write('"delete" command asks for nonexistent i=%d\n'
% int(args[0]))
sys.exit(1)
s.delete()
elif name == "verify":
if len(args) != 2:
sys.stderr.write('"verify" command requires 2 arguments\n')
sys.exit(1)
s = idltest_find_simple(idl, int(args[0]))
if not s:
sys.stderr.write('"verify" command asks for nonexistent i=%d\n'
% int(args[0]))
sys.exit(1)
if args[1] in ("i", "b", "s", "u", "r"):
s.verify(args[1])
else:
sys.stderr.write('"verify" command asks for unknown column '
'"%s"\n' % args[1])
sys.exit(1)
elif name == "increment":
if len(args) != 1:
sys.stderr.write('"increment" command requires 1 argument\n')
sys.exit(1)
s = idltest_find_simple(idl, int(args[0]))
if not s:
sys.stderr.write('"set" command asks for nonexistent i=%d\n'
% int(args[0]))
sys.exit(1)
s.increment("i")
increment = True
elif name == "abort":
txn.abort()
break
elif name == "destroy":
print "%03d: destroy" % step
sys.stdout.flush()
txn.abort()
return
elif name == "linktest":
l1_0 = txn.insert(idl.tables["link1"])
l1_0.i = 1
l1_0.k = [l1_0]
l1_0.ka = [l1_0]
l1_1 = txn.insert(idl.tables["link1"])
l1_1.i = 2
l1_1.k = [l1_0]
l1_1.ka = [l1_0, l1_1]
elif name == 'getattrtest':
l1 = txn.insert(idl.tables["link1"])
i = getattr(l1, 'i', 1)
assert i == 1
l1.i = 2
i = getattr(l1, 'i', 1)
assert i == 2
l1.k = [l1]
else:
sys.stderr.write("unknown command %s\n" % name)
sys.exit(1)
status = txn.commit_block()
sys.stdout.write("%03d: commit, status=%s"
% (step, ovs.db.idl.Transaction.status_to_string(status)))
if increment and status == ovs.db.idl.Transaction.SUCCESS:
sys.stdout.write(", increment=%d" % txn.get_increment_new_value())
sys.stdout.write("\n")
sys.stdout.flush()
def do_idl(schema_file, remote, *commands):
schema_helper = ovs.db.idl.SchemaHelper(schema_file)
schema_helper.register_all()
idl = ovs.db.idl.Idl(remote, schema_helper)
if commands:
error, stream = ovs.stream.Stream.open_block(
ovs.stream.Stream.open(remote))
if error:
sys.stderr.write("failed to connect to \"%s\"" % remote)
sys.exit(1)
rpc = ovs.jsonrpc.Connection(stream)
else:
rpc = None
symtab = {}
seqno = 0
step = 0
for command in commands:
if command.startswith("+"):
# The previous transaction didn't change anything.
command = command[1:]
else:
# Wait for update.
while idl.change_seqno == seqno and not idl.run():
rpc.run()
poller = ovs.poller.Poller()
idl.wait(poller)
rpc.wait(poller)
poller.block()
print_idl(idl, step)
step += 1
seqno = idl.change_seqno
if command == "reconnect":
print("%03d: reconnect" % step)
sys.stdout.flush()
step += 1
idl.force_reconnect()
elif not command.startswith("["):
idl_set(idl, command, step)
step += 1
else:
json = ovs.json.from_string(command)
if type(json) in [str, unicode]:
sys.stderr.write("\"%s\": %s\n" % (command, json))
sys.exit(1)
json = substitute_uuids(json, symtab)
request = ovs.jsonrpc.Message.create_request("transact", json)
error, reply = rpc.transact_block(request)
if error:
sys.stderr.write("jsonrpc transaction failed: %s"
% os.strerror(error))
sys.exit(1)
elif reply.error is not None:
sys.stderr.write("jsonrpc transaction failed: %s"
% reply.error)
sys.exit(1)
sys.stdout.write("%03d: " % step)
sys.stdout.flush()
step += 1
if reply.result is not None:
parse_uuids(reply.result, symtab)
reply.id = None
sys.stdout.write("%s\n" % ovs.json.to_string(reply.to_json()))
sys.stdout.flush()
if rpc:
rpc.close()
while idl.change_seqno == seqno and not idl.run():
poller = ovs.poller.Poller()
idl.wait(poller)
poller.block()
print_idl(idl, step)
step += 1
idl.close()
print("%03d: done" % step)
def usage():
print """\
%(program_name)s: test utility for Open vSwitch database Python bindings
usage: %(program_name)s [OPTIONS] COMMAND ARG...
The following commands are supported:
default-atoms
test ovsdb_atom_default()
default-data
test ovsdb_datum_default()
parse-atomic-type TYPE
parse TYPE as OVSDB atomic type, and re-serialize
parse-base-type TYPE
parse TYPE as OVSDB base type, and re-serialize
parse-type JSON
parse JSON as OVSDB type, and re-serialize
parse-atoms TYPE ATOM...
parse JSON ATOMs as atoms of TYPE, and re-serialize
parse-atom-strings TYPE ATOM...
parse string ATOMs as atoms of given TYPE, and re-serialize
sort-atoms TYPE ATOM...
print JSON ATOMs in sorted order
parse-data TYPE DATUM...
parse JSON DATUMs as data of given TYPE, and re-serialize
parse-column NAME OBJECT
parse column NAME with info OBJECT, and re-serialize
parse-table NAME OBJECT [DEFAULT-IS-ROOT]
parse table NAME with info OBJECT
parse-schema JSON
parse JSON as an OVSDB schema, and re-serialize
idl SCHEMA SERVER [TRANSACTION...]
connect to SERVER (which has the specified SCHEMA) and dump the
contents of the database as seen initially by the IDL implementation
and after executing each TRANSACTION. (Each TRANSACTION must modify
the database or this command will hang.)
The following options are also available:
-t, --timeout=SECS give up after SECS seconds
-h, --help display this help message\
""" % {'program_name': ovs.util.PROGRAM_NAME}
sys.exit(0)
def main(argv):
try:
options, args = getopt.gnu_getopt(argv[1:], 't:h',
['timeout',
'help'])
except getopt.GetoptError, geo:
sys.stderr.write("%s: %s\n" % (ovs.util.PROGRAM_NAME, geo.msg))
sys.exit(1)
for key, value in options:
if key in ['-h', '--help']:
usage()
elif key in ['-t', '--timeout']:
try:
timeout = int(value)
if timeout < 1:
raise TypeError
except TypeError:
raise error.Error("value %s on -t or --timeout is not at "
"least 1" % value)
signal.alarm(timeout)
else:
sys.exit(0)
if not args:
sys.stderr.write("%s: missing command argument "
"(use --help for help)\n" % ovs.util.PROGRAM_NAME)
sys.exit(1)
commands = {"default-atoms": (do_default_atoms, 0),
"default-data": (do_default_data, 0),
"parse-atomic-type": (do_parse_atomic_type, 1),
"parse-base-type": (do_parse_base_type, 1),
"parse-type": (do_parse_type, 1),
"parse-atoms": (do_parse_atoms, (2,)),
"parse-data": (do_parse_data, (2,)),
"sort-atoms": (do_sort_atoms, 2),
"parse-column": (do_parse_column, 2),
"parse-table": (do_parse_table, (2, 3)),
"parse-schema": (do_parse_schema, 1),
"idl": (do_idl, (2,))}
command_name = args[0]
args = args[1:]
if not command_name in commands:
sys.stderr.write("%s: unknown command \"%s\" "
"(use --help for help)\n" % (ovs.util.PROGRAM_NAME,
command_name))
sys.exit(1)
func, n_args = commands[command_name]
if type(n_args) == tuple:
if len(args) < n_args[0]:
sys.stderr.write("%s: \"%s\" requires at least %d arguments but "
"only %d provided\n"
% (ovs.util.PROGRAM_NAME, command_name,
n_args, len(args)))
sys.exit(1)
elif type(n_args) == int:
if len(args) != n_args:
sys.stderr.write("%s: \"%s\" requires %d arguments but %d "
"provided\n"
% (ovs.util.PROGRAM_NAME, command_name,
n_args, len(args)))
sys.exit(1)
else:
assert False
func(*args)
if __name__ == '__main__':
try:
main(sys.argv)
except error.Error, e:
sys.stderr.write("%s\n" % e)
sys.exit(1)
|
|
"""Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true, assert_greater
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_oneclass_adaboost_proba():
# Test predict_proba robustness for one class label input.
# In response to issue #7501
# https://github.com/scikit-learn/scikit-learn/issues/7501
y_t = np.ones(len(X))
clf = AdaBoostClassifier().fit(X, y_t)
assert_array_almost_equal(clf.predict_proba(X), np.ones((len(X), 1)))
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Check we used multiple estimators
assert_greater(len(clf.estimators_), 1)
# Check for distinct random states (see issue #7408)
assert_equal(len(set(est.random_state for est in clf.estimators_)),
len(clf.estimators_))
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
reg = AdaBoostRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
assert score > 0.85
# Check we used multiple estimators
assert_true(len(reg.estimators_) > 1)
# Check for distinct random states (see issue #7408)
assert_equal(len(set(est.random_state for est in reg.estimators_)),
len(reg.estimators_))
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_almost_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_almost_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_almost_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_almost_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_almost_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_almost_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_almost_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_almost_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
|
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.core.urlresolvers import reverse
from django.shortcuts import resolve_url
from django.test import TestCase
from django.test.utils import override_settings
from django_otp import DEVICE_ID_SESSION_KEY
from django_otp.oath import totp
from django_otp.util import random_hex
from .utils import UserMixin
try:
from unittest import mock
except ImportError:
import mock
class LoginTest(UserMixin, TestCase):
def _post(self, data=None):
return self.client.post(reverse('two_factor:login'), data=data)
def test_form(self):
response = self.client.get(reverse('two_factor:login'))
self.assertContains(response, 'Password:')
def test_invalid_login(self):
response = self._post({'auth-username': 'unknown',
'auth-password': 'secret',
'login_view-current_step': 'auth'})
self.assertContains(response, 'Please enter a correct')
self.assertContains(response, 'and password.')
@mock.patch('two_factor.views.core.signals.user_verified.send')
def test_valid_login(self, mock_signal):
self.create_user()
response = self._post({'auth-username': 'bouke@example.com',
'auth-password': 'secret',
'login_view-current_step': 'auth'})
self.assertRedirects(response, resolve_url(settings.LOGIN_REDIRECT_URL))
# No signal should be fired for non-verified user logins.
self.assertFalse(mock_signal.called)
def test_valid_login_with_custom_redirect(self):
redirect_url = reverse('two_factor:setup')
self.create_user()
response = self.client.post(
'%s?%s' % (reverse('two_factor:login'), 'next=' + redirect_url),
{'auth-username': 'bouke@example.com',
'auth-password': 'secret',
'login_view-current_step': 'auth'})
self.assertRedirects(response, redirect_url)
def test_valid_login_with_redirect_field_name(self):
redirect_url = reverse('two_factor:setup')
self.create_user()
response = self.client.post(
'%s?%s' % (reverse('custom-login'), 'next_page=' + redirect_url),
{'auth-username': 'bouke@example.com',
'auth-password': 'secret',
'login_view-current_step': 'auth'})
self.assertRedirects(response, redirect_url)
@mock.patch('two_factor.views.core.signals.user_verified.send')
def test_with_generator(self, mock_signal):
user = self.create_user()
device = user.totpdevice_set.create(name='default',
key=random_hex().decode())
response = self._post({'auth-username': 'bouke@example.com',
'auth-password': 'secret',
'login_view-current_step': 'auth'})
self.assertContains(response, 'Token:')
response = self._post({'token-otp_token': '123456',
'login_view-current_step': 'token'})
self.assertEqual(response.context_data['wizard']['form'].errors,
{'__all__': ['Invalid token. Please make sure you '
'have entered it correctly.']})
response = self._post({'token-otp_token': totp(device.bin_key),
'login_view-current_step': 'token'})
self.assertRedirects(response, resolve_url(settings.LOGIN_REDIRECT_URL))
self.assertEqual(device.persistent_id,
self.client.session.get(DEVICE_ID_SESSION_KEY))
# Check that the signal was fired.
mock_signal.assert_called_with(sender=mock.ANY, request=mock.ANY, user=user, device=device)
@mock.patch('two_factor.gateways.fake.Fake')
@mock.patch('two_factor.views.core.signals.user_verified.send')
@override_settings(
TWO_FACTOR_SMS_GATEWAY='two_factor.gateways.fake.Fake',
TWO_FACTOR_CALL_GATEWAY='two_factor.gateways.fake.Fake',
)
def test_with_backup_phone(self, mock_signal, fake):
user = self.create_user()
for no_digits in (6, 8):
with self.settings(TWO_FACTOR_TOTP_DIGITS=no_digits):
user.totpdevice_set.create(name='default', key=random_hex().decode(),
digits=no_digits)
device = user.phonedevice_set.create(name='backup', number='+31101234567',
method='sms',
key=random_hex().decode())
# Backup phones should be listed on the login form
response = self._post({'auth-username': 'bouke@example.com',
'auth-password': 'secret',
'login_view-current_step': 'auth'})
self.assertContains(response, 'Send text message to +31 ** *** **67')
# Ask for challenge on invalid device
response = self._post({'auth-username': 'bouke@example.com',
'auth-password': 'secret',
'challenge_device': 'MALICIOUS/INPUT/666'})
self.assertContains(response, 'Send text message to +31 ** *** **67')
# Ask for SMS challenge
response = self._post({'auth-username': 'bouke@example.com',
'auth-password': 'secret',
'challenge_device': device.persistent_id})
self.assertContains(response, 'We sent you a text message')
fake.return_value.send_sms.assert_called_with(
device=device,
token=str(totp(device.bin_key, digits=no_digits)).zfill(no_digits))
# Ask for phone challenge
device.method = 'call'
device.save()
response = self._post({'auth-username': 'bouke@example.com',
'auth-password': 'secret',
'challenge_device': device.persistent_id})
self.assertContains(response, 'We are calling your phone right now')
fake.return_value.make_call.assert_called_with(
device=device,
token=str(totp(device.bin_key, digits=no_digits)).zfill(no_digits))
# Valid token should be accepted.
response = self._post({'token-otp_token': totp(device.bin_key),
'login_view-current_step': 'token'})
self.assertRedirects(response, resolve_url(settings.LOGIN_REDIRECT_URL))
self.assertEqual(device.persistent_id,
self.client.session.get(DEVICE_ID_SESSION_KEY))
# Check that the signal was fired.
mock_signal.assert_called_with(sender=mock.ANY, request=mock.ANY, user=user, device=device)
@mock.patch('two_factor.views.core.signals.user_verified.send')
def test_with_backup_token(self, mock_signal):
user = self.create_user()
user.totpdevice_set.create(name='default', key=random_hex().decode())
device = user.staticdevice_set.create(name='backup')
device.token_set.create(token='abcdef123')
# Backup phones should be listed on the login form
response = self._post({'auth-username': 'bouke@example.com',
'auth-password': 'secret',
'login_view-current_step': 'auth'})
self.assertContains(response, 'Backup Token')
# Should be able to go to backup tokens step in wizard
response = self._post({'wizard_goto_step': 'backup'})
self.assertContains(response, 'backup tokens')
# Wrong codes should not be accepted
response = self._post({'backup-otp_token': 'WRONG',
'login_view-current_step': 'backup'})
self.assertEqual(response.context_data['wizard']['form'].errors,
{'__all__': ['Invalid token. Please make sure you '
'have entered it correctly.']})
# Valid token should be accepted.
response = self._post({'backup-otp_token': 'abcdef123',
'login_view-current_step': 'backup'})
self.assertRedirects(response, resolve_url(settings.LOGIN_REDIRECT_URL))
# Check that the signal was fired.
mock_signal.assert_called_with(sender=mock.ANY, request=mock.ANY, user=user, device=device)
@mock.patch('two_factor.views.utils.logger')
def test_change_password_in_between(self, mock_logger):
"""
When the password of the user is changed while trying to login, should
not result in errors. Refs #63.
"""
user = self.create_user()
self.enable_otp()
response = self._post({'auth-username': 'bouke@example.com',
'auth-password': 'secret',
'login_view-current_step': 'auth'})
self.assertContains(response, 'Token:')
# Now, the password is changed. When the form is submitted, the
# credentials should be checked again. If that's the case, the
# login form should note that the credentials are invalid.
user.set_password('secret2')
user.save()
response = self._post({'login_view-current_step': 'token'})
self.assertContains(response, 'Please enter a correct')
self.assertContains(response, 'and password.')
# Check that a message was logged.
mock_logger.warning.assert_called_with(
"Current step '%s' is no longer valid, returning to last valid "
"step in the wizard.",
'token')
@mock.patch('two_factor.views.utils.logger')
def test_reset_wizard_state(self, mock_logger):
self.create_user()
self.enable_otp()
response = self._post({'auth-username': 'bouke@example.com',
'auth-password': 'secret',
'login_view-current_step': 'auth'})
self.assertContains(response, 'Token:')
# A GET request resets the state of the wizard...
self.client.get(reverse('two_factor:login'))
# ...so there is no user in this request anymore. As the login flow
# depends on a user being present, this should be handled gracefully.
response = self._post({'token-otp_token': '123456',
'login_view-current_step': 'token'})
self.assertContains(response, 'Password:')
# Check that a message was logged.
mock_logger.warning.assert_called_with(
"Requested step '%s' is no longer valid, returning to last valid "
"step in the wizard.",
'token')
@mock.patch('two_factor.views.utils.logger')
def test_login_different_user_on_existing_session(self, mock_logger):
"""
This test reproduces the issue where a user is logged in and a different user
attempts to login.
"""
self.create_user()
self.create_user(username='vedran@example.com')
response = self._post({'auth-username': 'bouke@example.com',
'auth-password': 'secret',
'login_view-current_step': 'auth'})
self.assertRedirects(response, resolve_url(settings.LOGIN_REDIRECT_URL))
response = self._post({'auth-username': 'vedran@example.com',
'auth-password': 'secret',
'login_view-current_step': 'auth'})
self.assertRedirects(response, resolve_url(settings.LOGIN_REDIRECT_URL))
class BackupTokensTest(UserMixin, TestCase):
def setUp(self):
super(BackupTokensTest, self).setUp()
self.create_user()
self.enable_otp()
self.login_user()
def test_empty(self):
response = self.client.get(reverse('two_factor:backup_tokens'))
self.assertContains(response, 'You don\'t have any backup codes yet.')
def test_generate(self):
url = reverse('two_factor:backup_tokens')
response = self.client.post(url)
self.assertRedirects(response, url)
response = self.client.get(url)
first_set = set([token.token for token in
response.context_data['device'].token_set.all()])
self.assertNotContains(response, 'You don\'t have any backup codes '
'yet.')
self.assertEqual(10, len(first_set))
# Generating the tokens should give a fresh set
self.client.post(url)
response = self.client.get(url)
second_set = set([token.token for token in
response.context_data['device'].token_set.all()])
self.assertNotEqual(first_set, second_set)
|
|
# -*- coding: utf-8 -*-
import os
import csv
import fudge
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test import TestCase
from compat.tests.helpers import patch_identifier_index, restore_patched
from editor.models import Source, Category, Format
from editor.tests.factories import SourceFactory, CategoryFactory, FormatFactory
class LoadSourcesTest(TestCase):
@classmethod
def setUpClass(cls):
# cache sources.csv rows for all tests
sources = os.path.join(
settings.BASE_DIR, '../', 'editor', 'data', 'sources.csv')
cls._csv_rows = []
with open(sources) as csvfile:
reader = csv.DictReader(csvfile)
for node in reader:
cls._csv_rows.append(node)
cls._csv_id_to_name_map = {}
for row in cls._csv_rows:
cls._csv_id_to_name_map[row['id']] = row['name']
def test_raises_command_error_if_sources_exist(self):
SourceFactory()
self.assertRaises(CommandError, call_command, 'load_sources', verbosity=0)
def _assert_all_nodes_imported(self):
missed = []
for row in self._csv_rows:
qs = Source.objects.filter(name=row['name'])
if not qs.exists():
missed.append(row['name'])
self.assertFalse(bool(missed), '%s missed' % missed)
self.assertEqual(
Source.objects.all().count(),
len(self._csv_rows))
for node in self._csv_rows:
node_instance = Source.objects.get(
name=node['name'],
abbreviation=node['abbreviation'],
domain=node['domain'],
homepage=node['homepage'],
about=node['about'])
if node['parent_id']:
self.assertEquals(
node_instance.parent.name,
self._csv_id_to_name_map[node['parent_id']])
else:
# root node
self.assertIsNone(node_instance.parent)
def test_deletes_existing_and_creates_new_nodes(self):
s1 = SourceFactory()
s2 = SourceFactory()
self.assertEqual(Source.objects.all().count(), 2)
call_command('load_sources', verbosity=0, delete=True)
self.assertEquals(Source.objects.filter(name=s1.name).count(), 0)
self.assertEquals(Source.objects.filter(name=s2.name).count(), 0)
self._assert_all_nodes_imported()
def test_saves_all_nodes(self):
self.assertEqual(Source.objects.all().count(), 0)
call_command('load_sources', verbosity=0)
self._assert_all_nodes_imported()
class CreateRootsTest(TestCase):
def _assert_creates_root(self, model_class):
call_command('create_roots', verbosity=0)
qs = model_class.objects.filter(name='!ROOT!')
self.assertEquals(qs.count(), 1)
self.assertIsNone(qs[0].parent)
def _assert_changes_existing_roots(self, model_class):
if model_class == Source:
model_factory = SourceFactory
elif model_class == Category:
model_factory = CategoryFactory
elif model_class == Format:
model_factory = FormatFactory
else:
raise Exception('Do not know the factory of the %s model' % model_class)
# create some root instances
instance1 = model_factory(parent=None)
instance2 = model_factory(parent=None)
instance3 = model_factory(parent=None)
assert model_class.objects.filter(parent__isnull=True).count(), 3
call_command('create_roots', verbosity=0)
# now testing
qs = model_class.objects.filter(parent__isnull=True)
self.assertEquals(qs.count(), 1)
self.assertEquals(qs[0].name, '!ROOT!')
# all old roots moved to new root children
self.assertIsNotNone(model_class.objects.get(id=instance1.id).parent)
self.assertEquals(
model_class.objects.get(id=instance1.id).parent.name,
'!ROOT!')
self.assertIsNotNone(model_class.objects.get(id=instance2.id).parent)
self.assertEquals(
model_class.objects.get(id=instance2.id).parent.name,
'!ROOT!')
self.assertIsNotNone(model_class.objects.get(id=instance3.id).parent)
self.assertEquals(
model_class.objects.get(id=instance3.id).parent.name,
'!ROOT!')
def test_creates_source_root(self):
self._assert_creates_root(Source)
def test_creates_format_root(self):
self._assert_creates_root(Format)
def test_creates_category_root(self):
self._assert_creates_root(Category)
def test_changes_existing_source_roots(self):
self._assert_changes_existing_roots(Source)
def test_changes_existing_category_roots(self):
self._assert_changes_existing_roots(Category)
def test_changes_existing_format_roots(self):
self._assert_changes_existing_roots(Format)
def test_does_not_create_source_root_twice(self):
call_command('create_roots', verbosity=0)
qs = Source.objects.filter(name='!ROOT!')
self.assertEquals(qs.count(), 1)
self.assertIsNone(qs[0].parent)
call_command('create_roots', verbosity=0)
qs = Source.objects.filter(name='!ROOT!')
self.assertEquals(qs.count(), 1)
self.assertIsNone(qs[0].parent)
class SetupAmbrySearchTest(TestCase):
# helpers
def _patch_identifier_index(self, result):
""" Patches ambry search identifier to return given result. """
patch_identifier_index(result)
def _restore_patched(self):
restore_patched()
@fudge.patch('ambry._meta')
def test_raises_commanderror_if_old_version_of_ambry_found(self, fake_meta):
fake_meta.has_attr(__version__='0.3.704')
try:
call_command('setup_ambry_search', verbosity=0)
except CommandError as exc:
self.assertIn('ambry >= 0.3.705', exc.message)
@fudge.patch(
'ambry._meta',
'editor.management.commands.setup_ambry_search.call')
def test_sets_up_search_system(self, fake_meta, fake_call):
fake_meta.has_attr(__version__='0.3.705')
fake_call.expects_call()\
.with_args(['ambry', 'config', 'install'])\
.next_call()\
.with_args(['ambry', 'sync'])\
.next_call()\
.with_args(['ambry', 'search', '-R'])
call_command('setup_ambry_search', verbosity=0)
@fudge.patch(
'ambry._meta',
'editor.management.commands.setup_ambry_search.call')
def test_raises_exception_if_search_failed(self, fake_meta, fake_call):
fake_meta.has_attr(__version__='0.3.705')
fake_call.expects_call()
# fudge failed to patch methods from library package because
# of ambry/__init__.py/library function. That is why I patch it here.
search_result = []
self._patch_identifier_index(search_result)
try:
call_command('setup_ambry_search', verbosity=0)
except CommandError as exc:
self.assertIn('I couldn\'t find California term', exc.message)
finally:
self._restore_patched()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_solve."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class MatrixSolveOpTest(test.TestCase):
def _verifySolve(self, x, y, batch_dims=None):
for np_type in [np.float32, np.float64, np.complex64, np.complex128]:
if np_type == np.float32 or np_type == np.complex64:
tol = 1e-5
else:
tol = 1e-12
for adjoint in False, True:
if np_type in (np.float32, np.float64):
a = x.real.astype(np_type)
b = y.real.astype(np_type)
a_np = np.transpose(a) if adjoint else a
else:
a = x.astype(np_type)
b = y.astype(np_type)
a_np = np.conj(np.transpose(a)) if adjoint else a
if batch_dims is not None:
a = np.tile(a, batch_dims + [1, 1])
a_np = np.tile(a_np, batch_dims + [1, 1])
b = np.tile(b, batch_dims + [1, 1])
np_ans = np.linalg.solve(a_np, b)
for use_placeholder in set((False, not context.executing_eagerly())):
if use_placeholder:
a_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
b_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
tf_ans = linalg_ops.matrix_solve(a_ph, b_ph, adjoint=adjoint)
with self.cached_session() as sess:
out = sess.run(tf_ans, {a_ph: a, b_ph: b})
else:
tf_ans = linalg_ops.matrix_solve(a, b, adjoint=adjoint)
out = self.evaluate(tf_ans)
self.assertEqual(tf_ans.get_shape(), out.shape)
self.assertEqual(np_ans.shape, out.shape)
self.assertAllClose(np_ans, out, atol=tol, rtol=tol)
def _generateMatrix(self, m, n):
matrix = (np.random.normal(-5, 5,
m * n).astype(np.complex128).reshape([m, n]))
matrix.imag = (np.random.normal(-5, 5, m * n).astype(np.complex128).reshape(
[m, n]))
return matrix
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testSolve(self):
for n in 1, 2, 4, 9:
matrix = self._generateMatrix(n, n)
for nrhs in 1, 2, n:
rhs = self._generateMatrix(n, nrhs)
self._verifySolve(matrix, rhs)
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testSolveBatch(self):
for n in 2, 5:
matrix = self._generateMatrix(n, n)
for nrhs in 1, n:
rhs = self._generateMatrix(n, nrhs)
for batch_dims in [[2], [2, 2], [7, 4]]:
self._verifySolve(matrix, rhs, batch_dims=batch_dims)
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testNonSquareMatrix(self):
# When the solve of a non-square matrix is attempted we should return
# an error
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
matrix = constant_op.constant([[1., 2., 3.], [3., 4., 5.]])
self.evaluate(linalg_ops.matrix_solve(matrix, matrix))
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testWrongDimensions(self):
# The matrix and right-hand sides should have the same number of rows.
matrix = constant_op.constant([[1., 0.], [0., 1.]])
rhs = constant_op.constant([[1., 0.]])
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
self.evaluate(linalg_ops.matrix_solve(matrix, rhs))
# The matrix and right-hand side should have the same batch dimensions
matrix = np.random.normal(size=(2, 6, 2, 2))
rhs = np.random.normal(size=(2, 3, 2, 2))
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
self.evaluate(linalg_ops.matrix_solve(matrix, rhs))
def testNotInvertible(self):
# The input should be invertible.
with self.assertRaisesOpError("Input matrix is not invertible."):
# All rows of the matrix below add to zero
matrix = constant_op.constant([[1., 0., -1.], [-1., 1., 0.],
[0., -1., 1.]])
self.evaluate(linalg_ops.matrix_solve(matrix, matrix))
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testConcurrent(self):
seed = [42, 24]
matrix_shape = [3, 3]
all_ops = []
for adjoint_ in False, True:
lhs1 = stateless_random_ops.stateless_random_normal(
matrix_shape, seed=seed)
lhs2 = stateless_random_ops.stateless_random_normal(
matrix_shape, seed=seed)
rhs1 = stateless_random_ops.stateless_random_normal(
matrix_shape, seed=seed)
rhs2 = stateless_random_ops.stateless_random_normal(
matrix_shape, seed=seed)
s1 = linalg_ops.matrix_solve(lhs1, rhs1, adjoint=adjoint_)
s2 = linalg_ops.matrix_solve(lhs2, rhs2, adjoint=adjoint_)
all_ops += [s1, s2]
val = self.evaluate(all_ops)
for i in range(0, len(all_ops), 2):
self.assertAllEqual(val[i], val[i + 1])
class MatrixSolveBenchmark(test.Benchmark):
matrix_shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1001, 1001),
(1024, 1024),
(2048, 2048),
(513, 4, 4),
(513, 16, 16),
(513, 256, 256),
]
def _GenerateTestData(self, matrix_shape, num_rhs):
batch_shape = matrix_shape[:-2]
matrix_shape = matrix_shape[-2:]
assert matrix_shape[0] == matrix_shape[1]
n = matrix_shape[0]
matrix = (np.ones(matrix_shape).astype(np.float32) /
(2.0 * n) + np.diag(np.ones(n).astype(np.float32)))
rhs = np.ones([n, num_rhs]).astype(np.float32)
matrix = variables.Variable(
np.tile(matrix, batch_shape + (1, 1)), trainable=False)
rhs = variables.Variable(
np.tile(rhs, batch_shape + (1, 1)), trainable=False)
return matrix, rhs
def benchmarkMatrixSolveOp(self):
run_gpu_test = test.is_gpu_available(True)
for adjoint in False, True:
for matrix_shape in self.matrix_shapes:
for num_rhs in 1, 2, matrix_shape[-1]:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix, rhs = self._GenerateTestData(matrix_shape, num_rhs)
x = linalg_ops.matrix_solve(matrix, rhs, adjoint=adjoint)
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
sess,
control_flow_ops.group(x),
min_iters=25,
store_memory_usage=False,
name=("matrix_solve_cpu_shape_{matrix_shape}_num_rhs_{num_rhs}_"
"adjoint_{adjoint}").format(
matrix_shape=matrix_shape,
num_rhs=num_rhs,
adjoint=adjoint))
if run_gpu_test:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/gpu:0"):
matrix, rhs = self._GenerateTestData(matrix_shape, num_rhs)
x = linalg_ops.matrix_solve(matrix, rhs, adjoint=adjoint)
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
sess,
control_flow_ops.group(x),
min_iters=25,
store_memory_usage=False,
name=("matrix_solve_gpu_shape_{matrix_shape}_num_rhs_"
"{num_rhs}_adjoint_{adjoint}").format(
matrix_shape=matrix_shape, num_rhs=num_rhs,
adjoint=adjoint))
if __name__ == "__main__":
test.main()
|
|
from django.contrib.contenttypes.models import ContentType
from nose.tools import eq_
from pyquery import PyQuery as pq
from kitsune.access.tests import permission
from kitsune.forums.models import Post
from kitsune.forums.tests import (
ForumTestCase, forum, thread, post as forum_post)
from kitsune.sumo.tests import get, post
from kitsune.users.tests import user, group
class PostsTemplateTests(ForumTestCase):
def test_empty_reply_errors(self):
"""Posting an empty reply shows errors."""
u = user(save=True)
t = forum_post(save=True).thread
self.client.login(username=u.username, password='testpass')
response = post(self.client, 'forums.reply', {'content': ''},
args=[t.forum.slug, t.id])
doc = pq(response.content)
error_msg = doc('ul.errorlist li a')[0]
eq_(error_msg.text, 'Please provide a message.')
def test_edit_post_errors(self):
"""Changing post content works."""
p = forum_post(save=True)
t = p.thread
u = p.author
self.client.login(username=u.username, password='testpass')
response = post(self.client, 'forums.edit_post',
{'content': 'wha?'}, args=[t.forum.slug, t.id, p.id])
doc = pq(response.content)
errors = doc('ul.errorlist li a')
eq_(errors[0].text,
'Your message is too short (4 characters). ' +
'It must be at least 5 characters.')
def test_edit_thread_template(self):
"""The edit-post template should render."""
p = forum_post(save=True)
u = p.author
self.client.login(username=u.username, password='testpass')
res = get(self.client, 'forums.edit_post',
args=[p.thread.forum.slug, p.thread.id, p.id])
doc = pq(res.content)
eq_(len(doc('form.edit-post')), 1)
def test_edit_post(self):
"""Changing post content works."""
p = forum_post(save=True)
t = p.thread
u = p.author
self.client.login(username=u.username, password='testpass')
post(self.client, 'forums.edit_post', {'content': 'Some new content'},
args=[t.forum.slug, t.id, p.id])
edited_p = Post.objects.get(id=p.id)
eq_('Some new content', edited_p.content)
def test_posts_fr(self):
"""Posts render for [fr] locale."""
t = forum_post(save=True).thread
response = get(self.client, 'forums.posts', args=[t.forum.slug, t.id],
locale='fr')
eq_(200, response.status_code)
eq_('/forums/{f}/{t}'.format(f=t.forum.slug, t=t.id),
pq(response.content)('link[rel="canonical"]')[0].attrib['href'])
def test_long_title_truncated_in_crumbs(self):
"""A very long thread title gets truncated in the breadcrumbs"""
t = thread(title='A thread with a very very very very long title', save=True)
forum_post(thread=t, save=True)
response = get(self.client, 'forums.posts', args=[t.forum.slug, t.id])
doc = pq(response.content)
crumb = doc('#breadcrumbs li:last-child')
eq_(crumb.text(), 'A thread with a very very very very ...')
def test_edit_post_moderator(self):
"""Editing post as a moderator works."""
p = forum_post(save=True)
t = p.thread
f = t.forum
# Create the moderator group, give it the edit permission
# and add a moderator.
moderator_group = group(save=True)
ct = ContentType.objects.get_for_model(f)
permission(codename='forums_forum.post_edit_forum', content_type=ct,
object_id=f.id, group=moderator_group, save=True)
moderator = user(save=True)
moderator_group.user_set.add(moderator)
self.client.login(username=moderator.username, password='testpass')
r = post(self.client, 'forums.edit_post',
{'content': 'More new content'}, args=[f.slug, t.id, p.id])
eq_(200, r.status_code)
edited_p = Post.objects.get(pk=p.pk)
eq_('More new content', edited_p.content)
def test_preview_reply(self):
"""Preview a reply."""
t = forum_post(save=True).thread
u = t.creator
content = 'Full of awesome.'
self.client.login(username=u.username, password='testpass')
response = post(self.client, 'forums.reply',
{'content': content, 'preview': 'any string'},
args=[t.forum.slug, t.id])
eq_(200, response.status_code)
doc = pq(response.content)
eq_(content, doc('#post-preview div.content').text())
eq_(1, t.post_set.count())
def test_watch_thread(self):
"""Watch and unwatch a thread."""
t = forum_post(save=True).thread
u = user(save=True)
self.client.login(username=u.username, password='testpass')
response = post(self.client, 'forums.watch_thread', {'watch': 'yes'},
args=[t.forum.slug, t.id])
self.assertContains(response, 'Stop watching this thread')
response = post(self.client, 'forums.watch_thread', {'watch': 'no'},
args=[t.forum.slug, t.id])
self.assertNotContains(response, 'Stop watching this thread')
def test_show_reply_fields(self):
"""Reply fields show if user has permission to post."""
t = forum_post(save=True).thread
u = user(save=True)
self.client.login(username=u.username, password='testpass')
response = get(self.client, 'forums.posts', args=[t.forum.slug, t.pk])
self.assertContains(response, 'thread-reply')
def test_restricted_hide_reply(self):
"""Reply fields don't show if user has no permission to post."""
t = forum_post(save=True).thread
f = t.forum
ct = ContentType.objects.get_for_model(f)
# If the forum has the permission and the user isn't assigned said
# permission, then they can't post.
permission(codename='forums_forum.post_in_forum', content_type=ct,
object_id=f.id, save=True)
u = user(save=True)
self.client.login(username=u.username, password='testpass')
response = get(self.client, 'forums.posts', args=[f.slug, t.pk])
self.assertNotContains(response, 'thread-reply')
def test_links_nofollow(self):
"""Links posted should have rel=nofollow."""
p = forum_post(content='linking http://test.org', save=True)
t = p.thread
f = t.forum
response = get(self.client, 'forums.posts', args=[f.slug, t.pk])
doc = pq(response.content)
eq_('nofollow', doc('ol.posts div.content a')[0].attrib['rel'])
def test_num_replies(self):
"""Verify the number of replies label."""
t = forum_post(save=True).thread
response = get(self.client, 'forums.posts', args=[t.forum.slug, t.id])
eq_(200, response.status_code)
assert '0 Replies' in response.content
forum_post(thread=t, save=True)
forum_post(thread=t, save=True)
response = get(self.client, 'forums.posts', args=[t.forum.slug, t.id])
eq_(200, response.status_code)
assert '2 Replies' in response.content
def test_youtube_in_post(self):
"""Verify youtube video embedding."""
u = user(save=True)
t = forum_post(save=True).thread
self.client.login(username=u.username, password='testpass')
response = post(
self.client,
'forums.reply',
{'content': '[[V:http://www.youtube.com/watch?v=oHg5SJYRHA0]]'},
args=[t.forum.slug, t.id])
doc = pq(response.content)
assert doc('iframe')[0].attrib['src'].startswith(
'//www.youtube.com/embed/oHg5SJYRHA0')
class ThreadsTemplateTests(ForumTestCase):
def test_last_thread_post_link_has_post_id(self):
"""Make sure the last post url links to the last post (#post-<id>).
"""
t = forum_post(save=True).thread
last = forum_post(thread=t, save=True)
response = get(self.client, 'forums.threads', args=[t.forum.slug])
doc = pq(response.content)
last_post_link = doc('ol.threads div.last-post a:not(.username)')[0]
href = last_post_link.attrib['href']
eq_(href.split('#')[1], 'post-%s' % last.id)
def test_empty_thread_errors(self):
"""Posting an empty thread shows errors."""
f = forum(save=True)
u = user(save=True)
self.client.login(username=u.username, password='testpass')
response = post(self.client, 'forums.new_thread',
{'title': '', 'content': ''}, args=[f.slug])
doc = pq(response.content)
errors = doc('ul.errorlist li a')
eq_(errors[0].text, 'Please provide a title.')
eq_(errors[1].text, 'Please provide a message.')
def test_new_short_thread_errors(self):
"""Posting a short new thread shows errors."""
f = forum(save=True)
u = user(save=True)
self.client.login(username=u.username, password='testpass')
response = post(self.client, 'forums.new_thread',
{'title': 'wha?', 'content': 'wha?'}, args=[f.slug])
doc = pq(response.content)
errors = doc('ul.errorlist li a')
eq_(errors[0].text,
'Your title is too short (4 characters). ' +
'It must be at least 5 characters.')
eq_(errors[1].text,
'Your message is too short (4 characters). ' +
'It must be at least 5 characters.')
def test_edit_thread_errors(self):
"""Editing thread with too short of a title shows errors."""
t = forum_post(save=True).thread
creator = t.creator
self.client.login(username=creator.username, password='testpass')
response = post(self.client, 'forums.edit_thread',
{'title': 'wha?'}, args=[t.forum.slug, t.id])
doc = pq(response.content)
errors = doc('ul.errorlist li a')
eq_(errors[0].text,
'Your title is too short (4 characters). ' +
'It must be at least 5 characters.')
def test_edit_thread_template(self):
"""The edit-thread template should render."""
t = forum_post(save=True).thread
creator = t.creator
self.client.login(username=creator.username, password='testpass')
res = get(self.client, 'forums.edit_thread',
args=[t.forum.slug, t.id])
doc = pq(res.content)
eq_(len(doc('form.edit-thread')), 1)
def test_watch_forum(self):
"""Watch and unwatch a forum."""
f = forum(save=True)
u = user(save=True)
self.client.login(username=u.username, password='testpass')
response = post(self.client, 'forums.watch_forum', {'watch': 'yes'},
args=[f.slug])
self.assertContains(response, 'Stop watching this forum')
response = post(self.client, 'forums.watch_forum', {'watch': 'no'},
args=[f.slug])
self.assertNotContains(response, 'Stop watching this forum')
def test_canonical_url(self):
"""Verify the canonical URL is set correctly."""
f = forum(save=True)
response = get(self.client, 'forums.threads', args=[f.slug])
eq_('/forums/%s' % f.slug,
pq(response.content)('link[rel="canonical"]')[0].attrib['href'])
def test_show_new_thread(self):
"""'Post new thread' shows if user has permission to post."""
f = forum(save=True)
u = user(save=True)
self.client.login(username=u.username, password='testpass')
response = get(self.client, 'forums.threads', args=[f.slug])
self.assertContains(response, 'Post a new thread')
def test_restricted_hide_new_thread(self):
"""'Post new thread' doesn't show if user has no permission to post.
"""
f = forum(save=True)
ct = ContentType.objects.get_for_model(f)
# If the forum has the permission and the user isn't assigned said
# permission, then they can't post.
permission(codename='forums_forum.post_in_forum', content_type=ct,
object_id=f.id, save=True)
u = user(save=True)
self.client.login(username=u.username, password='testpass')
response = get(self.client, 'forums.threads', args=[f.slug])
self.assertNotContains(response, 'Post a new thread')
class ForumsTemplateTests(ForumTestCase):
def test_last_post_link_has_post_id(self):
"""Make sure the last post url links to the last post (#post-<id>).
"""
p = forum_post(save=True)
response = get(self.client, 'forums.forums')
doc = pq(response.content)
last_post_link = doc('ol.forums div.last-post a:not(.username)')[0]
href = last_post_link.attrib['href']
eq_(href.split('#')[1], 'post-%s' % p.id)
def test_restricted_is_invisible(self):
"""Forums with restricted view_in permission shouldn't show up."""
restricted_forum = forum(save=True)
# Make it restricted.
ct = ContentType.objects.get_for_model(restricted_forum)
permission(codename='forums_forum.view_in_forum', content_type=ct,
object_id=restricted_forum.id, save=True)
response = get(self.client, 'forums.forums')
self.assertNotContains(response, restricted_forum.slug)
def test_canonical_url(self):
response = get(self.client, 'forums.forums')
eq_('/forums',
pq(response.content)('link[rel="canonical"]')[0].attrib['href'])
def test_display_order(self):
"""Verify the display_order is respected."""
forum1 = forum(display_order=1, save=True)
forum2 = forum(display_order=2, save=True)
# forum1 should be listed first
r = get(self.client, 'forums.forums')
eq_(200, r.status_code)
doc = pq(r.content)
eq_(forum1.name, doc('ol.forums > li a:first').text())
forum1.display_order = 3
forum1.save()
# forum2 should be listed first
r = get(self.client, 'forums.forums')
eq_(200, r.status_code)
doc = pq(r.content)
eq_(forum2.name, doc('ol.forums > li a:first').text())
def test_is_listed(self):
"""Verify is_listed is respected."""
forum1 = forum(is_listed=True, save=True)
forum2 = forum(is_listed=True, save=True)
# Both forums should be listed.
r = get(self.client, 'forums.forums')
eq_(200, r.status_code)
doc = pq(r.content)
eq_(2, len(doc('ol.forums > li')))
forum1.is_listed = False
forum1.save()
# Only forum2 should be listed.
r = get(self.client, 'forums.forums')
eq_(200, r.status_code)
doc = pq(r.content)
eq_(1, len(doc('ol.forums > li')))
eq_(forum2.name, doc('ol.forums > li a').text())
class NewThreadTemplateTests(ForumTestCase):
def test_preview(self):
"""Preview the thread post."""
f = forum(save=True)
u = user(save=True)
self.client.login(username=u.username, password='testpass')
content = 'Full of awesome.'
response = post(self.client, 'forums.new_thread',
{'title': 'Topic', 'content': content,
'preview': 'any string'}, args=[f.slug])
eq_(200, response.status_code)
doc = pq(response.content)
eq_(content, doc('#post-preview div.content').text())
eq_(0, f.thread_set.count()) # No thread was created.
|
|
import json
import django
from django.contrib.admin import ModelAdmin, SimpleListFilter
from django.contrib import messages
from django.conf.urls import url
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.utils.encoding import force_text
from django.utils.html import escape
from django.utils.translation import ugettext_lazy as _
from django import forms
from django.template import loader, Context
def make_published(modeladmin, request, queryset):
for row in queryset.all():
row.publish()
make_published.short_description = _('Publish')
def make_unpublished(modeladmin, request, queryset):
for row in queryset.all():
row.unpublish()
make_unpublished.short_description = _('Unpublish')
def http_json_response(data):
return HttpResponse(json.dumps(data), content_type='application/json')
class PublisherForm(forms.ModelForm):
def clean(self):
data = super(PublisherForm, self).clean()
cleaned_data = self.cleaned_data
instance = self.instance
# work out which fields are unique_together
unique_fields_set = instance.get_unique_together()
if not unique_fields_set:
return data
for unique_fields in unique_fields_set:
unique_filter = {}
for unique_field in unique_fields:
field = instance.get_field(unique_field)
# Get value from the form or the model
if field.editable:
unique_filter[unique_field] = cleaned_data[unique_field]
else:
unique_filter[unique_field] = getattr(instance, unique_field)
# try to find if any models already exist in the db;
# I find all models and then exclude those matching the current model.
existing_instances = type(instance).objects \
.filter(**unique_filter) \
.exclude(pk=instance.pk)
if instance.publisher_linked:
existing_instances = existing_instances.exclude(pk=instance.publisher_linked.pk)
if existing_instances:
for unique_field in unique_fields:
self._errors[unique_field] = self.error_class(
[_('This value must be unique.')])
return data
class PublisherAdmin(ModelAdmin):
form = PublisherForm
change_form_template = 'publisher/change_form.html'
# publish or unpublish actions sometime makes the plugins disappear from page
# so we disable it for now, until we can investigate it further.
# actions = (make_published, make_unpublished, )
list_display = ('publisher_object_title', 'publisher_publish', 'publisher_status', )
url_name_prefix = None
class Media:
js = (
'publisher/publisher.js',
)
css = {
'all': ('publisher/publisher.css', ),
}
def __init__(self, model, admin_site):
super(PublisherAdmin, self).__init__(model, admin_site)
self.request = None
self.url_name_prefix = '%(app_label)s_%(module_name)s_' % {
'app_label': self.model._meta.app_label,
'module_name': self.model._meta.model_name,
}
# Reverse URL strings used in multiple places..
self.publish_reverse = '%s:%spublish' % (
self.admin_site.name,
self.url_name_prefix, )
self.unpublish_reverse = '%s:%sunpublish' % (
self.admin_site.name,
self.url_name_prefix, )
self.revert_reverse = '%s:%srevert' % (
self.admin_site.name,
self.url_name_prefix, )
self.changelist_reverse = '%s:%schangelist' % (
self.admin_site.name,
self.url_name_prefix, )
def has_publish_permission(self, request, obj=None):
opts = self.opts
return request.user.has_perm('%s.can_publish' % opts.app_label)
def publisher_object_title(self, obj):
return u'%s' % obj
publisher_object_title.short_description = 'Title'
def publisher_status(self, obj):
if not self.has_publish_permission(self.request, obj):
return ''
template_name = 'publisher/change_list_publish_status.html'
publish_btn = None
if obj.is_dirty:
publish_btn = reverse(self.publish_reverse, args=(obj.pk, ))
t = loader.get_template(template_name)
c = Context({
'publish_btn': publish_btn,
})
if django.VERSION >= (1, 10):
return t.render(c.flatten())
else:
return t.render(c)
publisher_status.short_description = 'Last Changes'
publisher_status.allow_tags = True
def publisher_publish(self, obj):
template_name = 'publisher/change_list_publish.html'
is_published = False
if obj.publisher_linked and obj.is_draft:
is_published = True
t = loader.get_template(template_name)
c = Context({
'object': obj,
'is_published': is_published,
'has_publish_permission': self.has_publish_permission(self.request, obj),
'publish_url': reverse(self.publish_reverse, args=(obj.pk, )),
'unpublish_url': reverse(self.unpublish_reverse, args=(obj.pk, )),
})
if django.VERSION >= (1, 10):
return t.render(c.flatten())
else:
return t.render(c)
publisher_publish.short_description = 'Published'
publisher_publish.allow_tags = True
def get_queryset(self, request):
# hack! We need request.user to check user publish perms
self.request = request
qs = self.model.publisher_manager.drafts()
ordering = self.get_ordering(request)
if ordering:
qs = qs.order_by(*ordering)
return qs
queryset = get_queryset
def get_urls(self):
urls = super(PublisherAdmin, self).get_urls()
publish_name = '%spublish' % (self.url_name_prefix, )
unpublish_name = '%sunpublish' % (self.url_name_prefix, )
revert_name = '%srevert' % (self.url_name_prefix, )
publish_urls = [
url(r'^(?P<object_id>\d+)/publish/$', self.publish_view, name=publish_name),
url(r'^(?P<object_id>\d+)/unpublish/$', self.unpublish_view, name=unpublish_name),
url(r'^(?P<object_id>\d+)/revert/$', self.revert_view, name=revert_name),
]
return publish_urls + urls
def get_model_object(self, request, object_id):
obj = self.model.objects.get(pk=object_id)
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%s object with primary key %s does not exist.') % (
force_text(self.model._meta.verbose_name),
escape(object_id)
))
if not self.has_change_permission(request) and not self.has_add_permission(request):
raise PermissionDenied
return obj
def revert_view(self, request, object_id):
obj = self.get_model_object(request, object_id)
if not self.has_publish_permission(request, obj):
raise PermissionDenied
obj.revert_to_public()
if not request.is_ajax():
messages.success(request, _('Draft has been revert to the public version.'))
return HttpResponseRedirect(reverse(self.changelist_reverse))
return http_json_response({'success': True})
def unpublish_view(self, request, object_id):
obj = self.get_model_object(request, object_id)
if not self.has_publish_permission(request, obj):
raise PermissionDenied
obj.unpublish()
if not request.is_ajax():
messages.success(request, _('Published version has been deleted.'))
return HttpResponseRedirect(reverse(self.changelist_reverse))
return http_json_response({'success': True})
def publish_view(self, request, object_id):
obj = self.get_model_object(request, object_id)
if not self.has_publish_permission(request, obj):
raise PermissionDenied
obj.publish()
if not request.is_ajax():
messages.success(request, _('Draft version has been published.'))
return HttpResponseRedirect(reverse(self.changelist_reverse))
return http_json_response({'success': True})
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
obj = context.get('original', None)
if not obj:
return super(PublisherAdmin, self).render_change_form(
request, context, add, change, form_url, obj=None)
if not self.has_publish_permission(request, obj):
context['has_publish_permission'] = False
else:
context['has_publish_permission'] = True
publish_btn = None
if obj.is_dirty:
publish_btn = reverse(self.publish_reverse, args=(obj.pk, ))
preview_draft_btn = None
if callable(getattr(obj, 'get_absolute_url', None)):
preview_draft_btn = True
unpublish_btn = None
if obj.is_draft and obj.publisher_linked:
unpublish_btn = reverse(self.unpublish_reverse, args=(obj.pk, ))
revert_btn = None
if obj.is_dirty and obj.publisher_linked:
revert_btn = reverse(self.revert_reverse, args=(obj.pk, ))
context.update({
'publish_btn_live': publish_btn,
'preview_draft_btn': preview_draft_btn,
'unpublish_btn': unpublish_btn,
'revert_btn': revert_btn,
})
return super(PublisherAdmin, self).render_change_form(
request, context, add, change, form_url, obj=None)
try:
from hvad.admin import TranslatableAdmin
from hvad.manager import FALLBACK_LANGUAGES
except ImportError:
pass
else:
class PublisherHvadAdmin(TranslatableAdmin, PublisherAdmin):
change_form_template = 'publisher/hvad/change_form.html'
def queryset(self, request):
# hack! We need request.user to check user publish perms
self.request = request
language = self._language(request)
languages = [language]
for lang in FALLBACK_LANGUAGES:
if lang not in languages:
languages.append(lang)
qs = self.model._default_manager.untranslated().use_fallbacks(*languages)
qs = qs.filter(publisher_is_draft=True)
ordering = getattr(self, 'ordering', None) or ()
if ordering:
qs = qs.order_by(*ordering)
return qs
try:
from parler.admin import TranslatableAdmin as PTranslatableAdmin
except ImportError:
pass
else:
class PublisherParlerAdmin(PTranslatableAdmin, PublisherAdmin):
change_form_template = 'publisher/parler/change_form.html'
def queryset(self, request):
# hack! We need request.user to check user publish perms
self.request = request
qs = self.model.objects
qs_language = self.get_queryset_language(request)
if qs_language:
qs = qs.language(qs_language)
qs = qs.filter(publisher_is_draft=True)
ordering = getattr(self, 'ordering', None) or ()
if ordering:
qs = qs.order_by(*ordering)
return qs
class PublisherPublishedFilter(SimpleListFilter):
title = _('Published')
parameter_name = 'published'
def lookups(self, request, model_admin):
return (
('1', _('Yes')),
('0', _('No'))
)
def queryset(self, request, queryset):
try:
value = int(self.value())
except TypeError:
return queryset
isnull = not value
return queryset.filter(publisher_linked__isnull=isnull)
|
|
from __future__ import absolute_import
import datetime
from decimal import Decimal
from django.db.models import Avg, Sum, Count, Max, Min
from django.test import TestCase, Approximate
from .models import Author, Publisher, Book, Store
class BaseAggregateTestCase(TestCase):
fixtures = ["aggregation.json"]
def test_empty_aggregate(self):
self.assertEqual(Author.objects.all().aggregate(), {})
def test_single_aggregate(self):
vals = Author.objects.aggregate(Avg("age"))
self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)})
def test_multiple_aggregates(self):
vals = Author.objects.aggregate(Sum("age"), Avg("age"))
self.assertEqual(vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)})
def test_filter_aggregate(self):
vals = Author.objects.filter(age__gt=29).aggregate(Sum("age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["age__sum"], 254)
def test_related_aggregate(self):
vals = Author.objects.aggregate(Avg("friends__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["friends__age__avg"], 34.07, places=2)
vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["authors__age__avg"], 38.2857, places=2)
vals = Author.objects.all().filter(name__contains="a").aggregate(Avg("book__rating"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__rating__avg"], 4.0)
vals = Book.objects.aggregate(Sum("publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["publisher__num_awards__sum"], 30)
vals = Publisher.objects.aggregate(Sum("book__price"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__price__sum"], Decimal("270.27"))
def test_aggregate_multi_join(self):
vals = Store.objects.aggregate(Max("books__authors__age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["books__authors__age__max"], 57)
vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__publisher__num_awards__min"], 1)
def test_aggregate_alias(self):
vals = Store.objects.filter(name="Amazon.com").aggregate(amazon_mean=Avg("books__rating"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["amazon_mean"], 4.08, places=2)
def test_annotate_basic(self):
self.assertQuerysetEqual(
Book.objects.annotate().order_by('pk'), [
"The Definitive Guide to Django: Web Development Done Right",
"Sams Teach Yourself Django in 24 Hours",
"Practical Django Projects",
"Python Web Development with Django",
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp"
],
lambda b: b.name
)
books = Book.objects.annotate(mean_age=Avg("authors__age"))
b = books.get(pk=1)
self.assertEqual(
b.name,
'The Definitive Guide to Django: Web Development Done Right'
)
self.assertEqual(b.mean_age, 34.5)
def test_annotate_m2m(self):
books = Book.objects.filter(rating__lt=4.5).annotate(Avg("authors__age")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 51.5),
('Practical Django Projects', 29.0),
('Python Web Development with Django', Approximate(30.3, places=1)),
('Sams Teach Yourself Django in 24 Hours', 45.0)
],
lambda b: (b.name, b.authors__age__avg),
)
books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
],
lambda b: (b.name, b.num_authors)
)
def test_backwards_m2m_annotate(self):
authors = Author.objects.filter(name__contains="a").annotate(Avg("book__rating")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 4.5),
('Brad Dayley', 3.0),
('Jacob Kaplan-Moss', 4.5),
('James Bennett', 4.0),
('Paul Bissex', 4.0),
('Stuart Russell', 4.0)
],
lambda a: (a.name, a.book__rating__avg)
)
authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 1),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 1),
('Peter Norvig', 2),
('Stuart Russell', 1),
('Wesley J. Chun', 1)
],
lambda a: (a.name, a.num_books)
)
def test_reverse_fkey_annotate(self):
books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 7),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9),
('Practical Django Projects', 3),
('Python Web Development with Django', 7),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 3)
],
lambda b: (b.name, b.publisher__num_awards__sum)
)
publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
self.assertQuerysetEqual(
publishers, [
('Apress', Decimal("59.69")),
("Jonno's House of Books", None),
('Morgan Kaufmann', Decimal("75.00")),
('Prentice Hall', Decimal("112.49")),
('Sams', Decimal("23.09"))
],
lambda p: (p.name, p.book__price__sum)
)
def test_annotate_values(self):
books = list(Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values())
self.assertEqual(
books, [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg('authors__age')).values('pk', 'isbn', 'mean_age')
self.assertEqual(
list(books), [
{
"pk": 1,
"isbn": "159059725",
"mean_age": 34.5,
}
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values("name")
self.assertEqual(
list(books), [
{
"name": "The Definitive Guide to Django: Web Development Done Right"
}
]
)
books = Book.objects.filter(pk=1).values().annotate(mean_age=Avg('authors__age'))
self.assertEqual(
list(books), [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.values("rating").annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age")).order_by("rating")
self.assertEqual(
list(books), [
{
"rating": 3.0,
"n_authors": 1,
"mean_age": 45.0,
},
{
"rating": 4.0,
"n_authors": 6,
"mean_age": Approximate(37.16, places=1)
},
{
"rating": 4.5,
"n_authors": 2,
"mean_age": 34.5,
},
{
"rating": 5.0,
"n_authors": 1,
"mean_age": 57.0,
}
]
)
authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
self.assertEqual(len(authors), 9)
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 32.0),
('Brad Dayley', None),
('Jacob Kaplan-Moss', 29.5),
('James Bennett', 34.0),
('Jeffrey Forcier', 27.0),
('Paul Bissex', 31.0),
('Peter Norvig', 46.0),
('Stuart Russell', 57.0),
('Wesley J. Chun', Approximate(33.66, places=1))
],
lambda a: (a.name, a.friends__age__avg)
)
def test_count(self):
vals = Book.objects.aggregate(Count("rating"))
self.assertEqual(vals, {"rating__count": 6})
vals = Book.objects.aggregate(Count("rating", distinct=True))
self.assertEqual(vals, {"rating__count": 4})
def test_fkey_aggregate(self):
explicit = list(Author.objects.annotate(Count('book__id')))
implicit = list(Author.objects.annotate(Count('book')))
self.assertEqual(explicit, implicit)
def test_annotate_ordering(self):
books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating')
self.assertEqual(
list(books), [
{
"rating": 4.5,
"oldest": 35,
},
{
"rating": 3.0,
"oldest": 45
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 5.0,
"oldest": 57,
}
]
)
books = Book.objects.values("rating").annotate(oldest=Max("authors__age")).order_by("-oldest", "-rating")
self.assertEqual(
list(books), [
{
"rating": 5.0,
"oldest": 57,
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 3.0,
"oldest": 45,
},
{
"rating": 4.5,
"oldest": 35,
}
]
)
def test_aggregate_annotation(self):
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(Avg("num_authors"))
self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)})
def test_filtering(self):
p = Publisher.objects.create(name='Expensive Publisher', num_awards=0)
Book.objects.create(
name='ExpensiveBook1',
pages=1,
isbn='111',
rating=3.5,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008,12,1)
)
Book.objects.create(
name='ExpensiveBook2',
pages=1,
isbn='222',
rating=4.0,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008,12,2)
)
Book.objects.create(
name='ExpensiveBook3',
pages=1,
isbn='333',
rating=4.5,
price=Decimal("35"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008,12,3)
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Apress",
"Sams",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1, book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Sams",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True)
self.assertEqual(len(publishers), 0)
def test_annotation(self):
vals = Author.objects.filter(pk=1).aggregate(Count("friends__id"))
self.assertEqual(vals, {"friends__id__count": 2})
books = Book.objects.annotate(num_authors=Count("authors__name")).filter(num_authors__ge=2).order_by("pk")
self.assertQuerysetEqual(
books, [
"The Definitive Guide to Django: Web Development Done Right",
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
authors = Author.objects.annotate(num_friends=Count("friends__id", distinct=True)).filter(num_friends=0).order_by("pk")
self.assertQuerysetEqual(
authors, [
"Brad Dayley",
],
lambda a: a.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
],
lambda p: p.name
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1)
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
books = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1)
self.assertQuerysetEqual(
books, [
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
def test_more_aggregation(self):
a = Author.objects.get(name__contains='Norvig')
b = Book.objects.get(name__contains='Done Right')
b.authors.add(a)
b.save()
vals = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1).aggregate(Avg("rating"))
self.assertEqual(vals, {"rating__avg": 4.25})
def test_even_more_aggregate(self):
publishers = Publisher.objects.annotate(earliest_book=Min("book__pubdate")).exclude(earliest_book=None).order_by("earliest_book").values()
self.assertEqual(
list(publishers), [
{
'earliest_book': datetime.date(1991, 10, 15),
'num_awards': 9,
'id': 4,
'name': 'Morgan Kaufmann'
},
{
'earliest_book': datetime.date(1995, 1, 15),
'num_awards': 7,
'id': 3,
'name': 'Prentice Hall'
},
{
'earliest_book': datetime.date(2007, 12, 6),
'num_awards': 3,
'id': 1,
'name': 'Apress'
},
{
'earliest_book': datetime.date(2008, 3, 3),
'num_awards': 1,
'id': 2,
'name': 'Sams'
}
]
)
vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening"))
self.assertEqual(
vals,
{
"friday_night_closing__max": datetime.time(23, 59, 59),
"original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
}
)
def test_annotate_values_list(self):
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("pk", "isbn", "mean_age")
self.assertEqual(
list(books), [
(1, "159059725", 34.5),
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("isbn")
self.assertEqual(
list(books), [
('159059725',)
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("mean_age")
self.assertEqual(
list(books), [
(34.5,)
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("mean_age", flat=True)
self.assertEqual(list(books), [34.5])
books = Book.objects.values_list("price").annotate(count=Count("price")).order_by("-count", "price")
self.assertEqual(
list(books), [
(Decimal("29.69"), 2),
(Decimal('23.09'), 1),
(Decimal('30'), 1),
(Decimal('75'), 1),
(Decimal('82.8'), 1),
]
)
def test_dates_with_aggregation(self):
"""
Test that .dates() returns a distinct set of dates when applied to a
QuerySet with aggregation.
Refs #18056. Previously, .dates() would return distinct (date_kind,
aggregation) sets, in this case (year, num_authors), so 2008 would be
returned twice because there are books from 2008 with a different
number of authors.
"""
dates = Book.objects.annotate(num_authors=Count("authors")).dates('pubdate', 'year')
self.assertQuerysetEqual(
dates, [
"datetime.datetime(1991, 1, 1, 0, 0)",
"datetime.datetime(1995, 1, 1, 0, 0)",
"datetime.datetime(2007, 1, 1, 0, 0)",
"datetime.datetime(2008, 1, 1, 0, 0)"
]
)
|
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from rbuild import errors
from collections import namedtuple
from rbuild_test import rbuildhelp
from testutils import mock
class CreateProjectBranchTest(rbuildhelp.RbuildHelper):
def testCreateProjectArgParse(self):
self.getRbuildHandle()
self.checkRbuild('create project --name=title --short-name=short '
'--domain-name=project.domain --description=description',
'rbuild_plugins.createprojectbranch.CreateProjectCommand.runCommand',
[None, None, {
'name': 'title',
'short-name': 'short',
'domain-name': 'project.domain',
'description': 'description',
}, ['create', 'project']])
self.checkRbuild('create project --name=title --short-name=short '
'--domain-name=project.domain --description=description '
'--external --upstream-url other.domain --auth-type auth '
'--username user --password secret --entitlement entitle',
'rbuild_plugins.createprojectbranch.CreateProjectCommand.runCommand',
[None, None, {
'name': 'title',
'short-name': 'short',
'domain-name': 'project.domain',
'description': 'description',
'external': True,
'upstream-url': 'other.domain',
'auth-type': 'auth',
'username': 'user',
'password': 'secret',
'entitlement': 'entitle',
}, ['create', 'project']])
def testCreateProjectCmdline(self):
handle = self.getRbuildHandle(mock.MockObject())
handle.Create.registerCommands()
handle.CreateProjectBranch.initialize()
mock.mockMethod(handle.facade.rbuilder.createProject)
cmd = handle.Commands.getCommandClass('create')()
cmd.runCommand(handle, {
'name': 'project name',
'short-name': 'shortname',
'domain-name': '',
}, ['rbuild', 'create', 'project'])
handle.facade.rbuilder.createProject._mock.assertCalled(
title='project name',
shortName='shortname',
domainName='',
description='',
)
def testCreateExternalProjectCmdline(self):
handle = self.getRbuildHandle(mock.MockObject())
handle.Create.registerCommands()
handle.CreateProjectBranch.initialize()
mock.mockMethod(handle.facade.rbuilder.createProject)
cmd = handle.Commands.getCommandClass('create')()
# no auth
cmd.runCommand(handle, {
'name': 'project name',
'short-name': 'shortname',
'domain-name': '',
'external': True,
'label': 'repo@n:branch',
'upstream-url': 'http://foo.com',
'auth-type': 'none',
}, ['rbuild', 'create', 'project'])
handle.facade.rbuilder.createProject._mock.assertCalled(
title='project name',
shortName='shortname',
domainName='',
description='',
external=True,
external_params=(
['repo@n:branch'], 'http://foo.com', 'none', None,
None, None),
)
# userpass auth
cmd.runCommand(handle, {
'name': 'project name',
'short-name': 'shortname',
'domain-name': '',
'external': True,
'label': 'repo@n:branch',
'upstream-url': 'http://foo.com',
'auth-type': 'userpass',
'username': 'user',
'password': 'secret',
}, ['rbuild', 'create', 'project'])
handle.facade.rbuilder.createProject._mock.assertCalled(
title='project name',
shortName='shortname',
domainName='',
description='',
external=True,
external_params=(
['repo@n:branch'], 'http://foo.com', 'userpass', 'user',
'secret', None),
)
# entitlement auth
cmd.runCommand(handle, {
'name': 'project name',
'short-name': 'shortname',
'domain-name': '',
'external': True,
'label': 'repo@n:branch',
'upstream-url': 'http://foo.com',
'auth-type': 'entitlement',
'entitlement': 'entitle',
}, ['rbuild', 'create', 'project'])
handle.facade.rbuilder.createProject._mock.assertCalled(
title='project name',
shortName='shortname',
domainName='',
description='',
external=True,
external_params=(
['repo@n:branch'], 'http://foo.com', 'entitlement', None,
None, 'entitle'),
)
def testCreateProjectInteractive(self):
handle = self.getRbuildHandle(mock.MockObject())
handle.Create.registerCommands()
handle.CreateProjectBranch.registerCommands()
handle.CreateProjectBranch.initialize()
mock.mockMethod(handle.facade.rbuilder.createProject)
mock.mock(handle, 'ui')
handle.ui.getResponse._mock.appendReturn(
'project name', "Project name (required)", required=True)
handle.ui.getResponse._mock.appendReturn(
'desc', "Project description (optional)")
handle.ui.getResponse._mock.appendReturn(
'shortname', "Unique name (required)",
validationFn=handle.facade.rbuilder.isValidShortName,
required=True)
handle.ui.getResponse._mock.appendReturn(
'domain.name', "Domain name (blank for default)",
validationFn=handle.facade.rbuilder.isValidDomainName)
cmd = handle.Commands.getCommandClass('create')()
cmd.runCommand(handle, {}, ['rbuild', 'create', 'project'])
handle.facade.rbuilder.createProject._mock.assertCalled(
title='project name',
shortName='shortname',
domainName='domain.name',
description='desc',
)
def testCreateExternalProjectInteractive(self):
handle = self.getRbuildHandle(mock.MockObject())
handle.Create.registerCommands()
handle.CreateProjectBranch.registerCommands()
handle.CreateProjectBranch.initialize()
mock.mockMethod(handle.facade.rbuilder.createProject)
mock.mock(handle.facade.rbuilder, 'isValidUrl')
mock.mock(handle, 'ui')
handle.facade.rbuilder.isValidUrl._mock.setReturn(True, "http://foo.com")
handle.ui.getResponse._mock.appendReturn(
'project name', "Project name (required)", required=True)
handle.ui.getResponse._mock.appendReturn(
'desc', "Project description (optional)")
handle.ui.getResponse._mock.appendReturn(
'shortname', "Unique name (required)",
validationFn=handle.facade.rbuilder.isValidShortName,
required=True)
handle.ui.getResponse._mock.appendReturn(
'domain.name', "Domain name (blank for default)",
validationFn=handle.facade.rbuilder.isValidDomainName)
handle.ui.getResponse._mock.appendReturn(
'repo@n:branch', 'Upstream label (required)', required=True,
validationFn=handle.facade.conary.isValidLabel)
handle.ui.getResponse._mock.appendReturn(
'http://foo.com', "URL of upstream repository (optional)")
handle.ui.getResponse._mock.appendReturn(
'user', "External username", required=True)
handle.ui.getPassword._mock.appendReturn(
'secret', "External password", verify=True)
handle.ui.getResponse._mock.appendReturn(
'entitle', 'External entitlement', required=True)
cmd = handle.Commands.getCommandClass('create')()
# auth-type none
handle.ui.getChoice._mock.setReturn(
0, "External authentication type",
['None', 'Username and Password', 'Entitlement key'], default=0)
cmd.runCommand(handle, {'external': True},
['rbuild', 'create', 'project'])
handle.facade.rbuilder.createProject._mock.assertCalled(
title='project name',
shortName='shortname',
domainName='domain.name',
description='desc',
external=True,
external_params=(
['repo@n:branch'], 'http://foo.com', 'none', None, None,
None),
)
# auth-type userpass
handle.ui.getChoice._mock.setReturn(
1, "External authentication type",
['None', 'Username and Password', 'Entitlement key'], default=0)
cmd.runCommand(handle, {'external': True},
['rbuild', 'create', 'project'])
handle.facade.rbuilder.createProject._mock.assertCalled(
title='project name',
shortName='shortname',
domainName='domain.name',
description='desc',
external=True,
external_params=(
['repo@n:branch'], 'http://foo.com', 'userpass', 'user',
'secret', None),
)
# auth-type entitlement
handle.ui.getChoice._mock.setReturn(
2, "External authentication type",
['None', 'Username and Password', 'Entitlement key'], default=0)
cmd.runCommand(handle, {'external': True},
['rbuild', 'create', 'project'])
handle.facade.rbuilder.createProject._mock.assertCalled(
title='project name',
shortName='shortname',
domainName='domain.name',
description='desc',
external=True,
external_params=(
['repo@n:branch'], 'http://foo.com', 'entitlement', None,
None, 'entitle'),
)
def testCreateBranchArgParse(self):
self.getRbuildHandle()
self.checkRbuild('create branch --project=proj --branch=branch',
'rbuild_plugins.createprojectbranch.CreateBranchCommand.runCommand',
[None, None, {
'project': 'proj',
'branch': 'branch',
}, ['create', 'branch']])
def testCreateBranchCmdline(self):
handle = self.getRbuildHandle(mock.MockObject())
handle.Create.registerCommands()
handle.CreateProjectBranch.initialize()
mock.mockMethod(handle.facade.rbuilder.listPlatforms)
mock.mockMethod(handle.facade.rbuilder.createBranch)
mock.mock(handle, 'ui')
Platform = namedtuple('Platform', 'platformName label id')
handle.facade.rbuilder.listPlatforms._mock.setReturn([
Platform('the platform', 'the@platform', 'http://the/platform'),
Platform('not platform', 'not@platform', 'http://not/platform'),
])
cmd = handle.Commands.getCommandClass('create')()
cmd.runCommand(handle, {
'project': 'proj',
'branch': 'branch',
'platform': 'the platform',
}, ['rbuild', 'create', 'branch'])
handle.facade.rbuilder.createBranch._mock.assertCalled(
project='proj',
name='branch',
platformLabel='the@platform',
namespace=None,
description='',
)
err = self.assertRaises(errors.PluginError,
cmd.runCommand, handle, {
'project': 'proj',
'branch': 'branch',
'platform': 'missing platform',
}, ['rbuild', 'create', 'branch'])
self.assertEquals(str(err),
"No platform matching term 'missing platform' was found")
def testCreateBranchInteractive(self):
handle = self.getRbuildHandle(mock.MockObject())
handle.Create.registerCommands()
handle.CreateProjectBranch.registerCommands()
handle.CreateProjectBranch.initialize()
mock.mockMethod(handle.facade.rbuilder.listPlatforms)
mock.mockMethod(handle.facade.rbuilder.createBranch)
mock.mock(handle, 'ui')
Platform = namedtuple('Platform', 'platformName label id')
rb = handle.facade.rbuilder
rb.listPlatforms._mock.setReturn([
Platform('the platform', 'the@platform', 'http://the/platform'),
Platform('not platform', 'not@platform', 'http://not/platform'),
])
handle.ui.getResponse._mock.appendReturn('proj',
"Project name (required)", validationFn=rb.isValidShortName,
required=True)
handle.ui.getResponse._mock.appendReturn('branch',
"Branch name (required)", validationFn=rb.isValidBranchName,
required=True)
handle.ui.getResponse._mock.appendReturn('desc',
"Branch description (optional)")
handle.ui.getResponse._mock.appendReturn('nsp',
"Namespace (blank for default)")
choiceArgs = ("Platform", [
'the platform - the@platform',
'not platform - not@platform'],
"The following platforms are available:")
handle.ui.getChoice._mock.setReturn(0, *choiceArgs)
cmd = handle.Commands.getCommandClass('create')()
cmd.runCommand(handle, {}, ['rbuild', 'create', 'branch'])
rb.createBranch._mock.assertCalled(
project='proj',
name='branch',
platformLabel='the@platform',
namespace='nsp',
description='desc',
)
|
|
# -*- coding: utf-8 -*-
"""
jinja2.testsuite.filters
~~~~~~~~~~~~~~~~~~~~~~~~
Tests for the jinja filters.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Markup, Environment
env = Environment()
class FilterTestCase(JinjaTestCase):
def test_capitalize(self):
tmpl = env.from_string('{{ "foo bar"|capitalize }}')
assert tmpl.render() == 'Foo bar'
def test_center(self):
tmpl = env.from_string('{{ "foo"|center(9) }}')
assert tmpl.render() == ' foo '
def test_default(self):
tmpl = env.from_string(
"{{ missing|default('no') }}|{{ false|default('no') }}|"
"{{ false|default('no', true) }}|{{ given|default('no') }}"
)
assert tmpl.render(given='yes') == 'no|False|no|yes'
def test_dictsort(self):
tmpl = env.from_string(
'{{ foo|dictsort }}|'
'{{ foo|dictsort(true) }}|'
'{{ foo|dictsort(false, "value") }}'
)
out = tmpl.render(foo={"aa": 0, "b": 1, "c": 2, "AB": 3})
assert out == ("[('aa', 0), ('AB', 3), ('b', 1), ('c', 2)]|"
"[('AB', 3), ('aa', 0), ('b', 1), ('c', 2)]|"
"[('aa', 0), ('b', 1), ('c', 2), ('AB', 3)]")
def test_batch(self):
tmpl = env.from_string("{{ foo|batch(3)|list }}|"
"{{ foo|batch(3, 'X')|list }}")
out = tmpl.render(foo=range(10))
assert out == ("[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]|"
"[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 'X', 'X']]")
def test_slice(self):
tmpl = env.from_string('{{ foo|slice(3)|list }}|'
'{{ foo|slice(3, "X")|list }}')
out = tmpl.render(foo=range(10))
assert out == ("[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9]]|"
"[[0, 1, 2, 3], [4, 5, 6, 'X'], [7, 8, 9, 'X']]")
def test_escape(self):
tmpl = env.from_string('''{{ '<">&'|escape }}''')
out = tmpl.render()
assert out == '<">&'
def test_striptags(self):
tmpl = env.from_string('''{{ foo|striptags }}''')
out = tmpl.render(foo=' <p>just a small \n <a href="#">'
'example</a> link</p>\n<p>to a webpage</p> '
'<!-- <p>and some commented stuff</p> -->')
assert out == 'just a small example link to a webpage'
def test_filesizeformat(self):
tmpl = env.from_string(
'{{ 100|filesizeformat }}|'
'{{ 1000|filesizeformat }}|'
'{{ 1000000|filesizeformat }}|'
'{{ 1000000000|filesizeformat }}|'
'{{ 1000000000000|filesizeformat }}|'
'{{ 100|filesizeformat(true) }}|'
'{{ 1000|filesizeformat(true) }}|'
'{{ 1000000|filesizeformat(true) }}|'
'{{ 1000000000|filesizeformat(true) }}|'
'{{ 1000000000000|filesizeformat(true) }}'
)
out = tmpl.render()
self.assert_equal(out, (
'100 Bytes|1.0 kB|1.0 MB|1.0 GB|1.0 TB|100 Bytes|'
'1000 Bytes|1.0 MiB|0.9 GiB|0.9 TiB'
))
def test_first(self):
tmpl = env.from_string('{{ foo|first }}')
out = tmpl.render(foo=range(10))
assert out == '0'
def test_float(self):
tmpl = env.from_string('{{ "42"|float }}|'
'{{ "ajsghasjgd"|float }}|'
'{{ "32.32"|float }}')
out = tmpl.render()
assert out == '42.0|0.0|32.32'
def test_format(self):
tmpl = env.from_string('''{{ "%s|%s"|format("a", "b") }}''')
out = tmpl.render()
assert out == 'a|b'
def test_indent(self):
tmpl = env.from_string('{{ foo|indent(2) }}|{{ foo|indent(2, true) }}')
text = '\n'.join([' '.join(['foo', 'bar'] * 2)] * 2)
out = tmpl.render(foo=text)
assert out == ('foo bar foo bar\n foo bar foo bar| '
'foo bar foo bar\n foo bar foo bar')
def test_int(self):
tmpl = env.from_string('{{ "42"|int }}|{{ "ajsghasjgd"|int }}|'
'{{ "32.32"|int }}')
out = tmpl.render()
assert out == '42|0|32'
def test_join(self):
tmpl = env.from_string('{{ [1, 2, 3]|join("|") }}')
out = tmpl.render()
assert out == '1|2|3'
env2 = Environment(autoescape=True)
tmpl = env2.from_string('{{ ["<foo>", "<span>foo</span>"|safe]|join }}')
assert tmpl.render() == '<foo><span>foo</span>'
def test_join_attribute(self):
class User(object):
def __init__(self, username):
self.username = username
tmpl = env.from_string('''{{ users|join(', ', 'username') }}''')
assert tmpl.render(users=map(User, ['foo', 'bar'])) == 'foo, bar'
def test_last(self):
tmpl = env.from_string('''{{ foo|last }}''')
out = tmpl.render(foo=range(10))
assert out == '9'
def test_length(self):
tmpl = env.from_string('''{{ "hello world"|length }}''')
out = tmpl.render()
assert out == '11'
def test_lower(self):
tmpl = env.from_string('''{{ "FOO"|lower }}''')
out = tmpl.render()
assert out == 'foo'
def test_pprint(self):
from pprint import pformat
tmpl = env.from_string('''{{ data|pprint }}''')
data = range(1000)
assert tmpl.render(data=data) == pformat(data)
def test_random(self):
tmpl = env.from_string('''{{ seq|random }}''')
seq = range(100)
for _ in range(10):
assert int(tmpl.render(seq=seq)) in seq
def test_reverse(self):
tmpl = env.from_string('{{ "foobar"|reverse|join }}|'
'{{ [1, 2, 3]|reverse|list }}')
assert tmpl.render() == 'raboof|[3, 2, 1]'
def test_string(self):
x = [1, 2, 3, 4, 5]
tmpl = env.from_string('''{{ obj|string }}''')
assert tmpl.render(obj=x) == unicode(x)
def test_title(self):
tmpl = env.from_string('''{{ "foo bar"|title }}''')
assert tmpl.render() == "Foo Bar"
def test_truncate(self):
tmpl = env.from_string(
'{{ data|truncate(15, true, ">>>") }}|'
'{{ data|truncate(15, false, ">>>") }}|'
'{{ smalldata|truncate(15) }}'
)
out = tmpl.render(data='foobar baz bar' * 1000,
smalldata='foobar baz bar')
assert out == 'foobar baz barf>>>|foobar baz >>>|foobar baz bar'
def test_upper(self):
tmpl = env.from_string('{{ "foo"|upper }}')
assert tmpl.render() == 'FOO'
def test_urlize(self):
tmpl = env.from_string('{{ "foo http://www.example.com/ bar"|urlize }}')
assert tmpl.render() == 'foo <a href="http://www.example.com/">'\
'http://www.example.com/</a> bar'
def test_wordcount(self):
tmpl = env.from_string('{{ "foo bar baz"|wordcount }}')
assert tmpl.render() == '3'
def test_block(self):
tmpl = env.from_string('{% filter lower|escape %}<HEHE>{% endfilter %}')
assert tmpl.render() == '<hehe>'
def test_chaining(self):
tmpl = env.from_string('''{{ ['<foo>', '<bar>']|first|upper|escape }}''')
assert tmpl.render() == '<FOO>'
def test_sum(self):
tmpl = env.from_string('''{{ [1, 2, 3, 4, 5, 6]|sum }}''')
assert tmpl.render() == '21'
def test_sum_attributes(self):
tmpl = env.from_string('''{{ values|sum('value') }}''')
assert tmpl.render(values=[
{'value': 23},
{'value': 1},
{'value': 18},
]) == '42'
def test_sum_attributes_nested(self):
tmpl = env.from_string('''{{ values|sum('real.value') }}''')
assert tmpl.render(values=[
{'real': {'value': 23}},
{'real': {'value': 1}},
{'real': {'value': 18}},
]) == '42'
def test_abs(self):
tmpl = env.from_string('''{{ -1|abs }}|{{ 1|abs }}''')
assert tmpl.render() == '1|1', tmpl.render()
def test_round_positive(self):
tmpl = env.from_string('{{ 2.7|round }}|{{ 2.1|round }}|'
"{{ 2.1234|round(3, 'floor') }}|"
"{{ 2.1|round(0, 'ceil') }}")
assert tmpl.render() == '3.0|2.0|2.123|3.0', tmpl.render()
def test_round_negative(self):
tmpl = env.from_string('{{ 21.3|round(-1)}}|'
"{{ 21.3|round(-1, 'ceil')}}|"
"{{ 21.3|round(-1, 'floor')}}")
assert tmpl.render() == '20.0|30.0|20.0',tmpl.render()
def test_xmlattr(self):
tmpl = env.from_string("{{ {'foo': 42, 'bar': 23, 'fish': none, "
"'spam': missing, 'blub:blub': '<?>'}|xmlattr }}")
out = tmpl.render().split()
assert len(out) == 3
assert 'foo="42"' in out
assert 'bar="23"' in out
assert 'blub:blub="<?>"' in out
def test_sort1(self):
tmpl = env.from_string('{{ [2, 3, 1]|sort }}|{{ [2, 3, 1]|sort(true) }}')
assert tmpl.render() == '[1, 2, 3]|[3, 2, 1]'
def test_sort2(self):
tmpl = env.from_string('{{ "".join(["c", "A", "b", "D"]|sort) }}')
assert tmpl.render() == 'AbcD'
def test_sort3(self):
tmpl = env.from_string('''{{ ['foo', 'Bar', 'blah']|sort }}''')
assert tmpl.render() == "['Bar', 'blah', 'foo']"
def test_sort4(self):
class Magic(object):
def __init__(self, value):
self.value = value
def __unicode__(self):
return unicode(self.value)
tmpl = env.from_string('''{{ items|sort(attribute='value')|join }}''')
assert tmpl.render(items=map(Magic, [3, 2, 4, 1])) == '1234'
def test_groupby(self):
tmpl = env.from_string('''
{%- for grouper, list in [{'foo': 1, 'bar': 2},
{'foo': 2, 'bar': 3},
{'foo': 1, 'bar': 1},
{'foo': 3, 'bar': 4}]|groupby('foo') -%}
{{ grouper }}{% for x in list %}: {{ x.foo }}, {{ x.bar }}{% endfor %}|
{%- endfor %}''')
assert tmpl.render().split('|') == [
"1: 1, 2: 1, 1",
"2: 2, 3",
"3: 3, 4",
""
]
def test_groupby_tuple_index(self):
tmpl = env.from_string('''
{%- for grouper, list in [('a', 1), ('a', 2), ('b', 1)]|groupby(0) -%}
{{ grouper }}{% for x in list %}:{{ x.1 }}{% endfor %}|
{%- endfor %}''')
assert tmpl.render() == 'a:1:2|b:1|'
def test_groupby_multidot(self):
class Date(object):
def __init__(self, day, month, year):
self.day = day
self.month = month
self.year = year
class Article(object):
def __init__(self, title, *date):
self.date = Date(*date)
self.title = title
articles = [
Article('aha', 1, 1, 1970),
Article('interesting', 2, 1, 1970),
Article('really?', 3, 1, 1970),
Article('totally not', 1, 1, 1971)
]
tmpl = env.from_string('''
{%- for year, list in articles|groupby('date.year') -%}
{{ year }}{% for x in list %}[{{ x.title }}]{% endfor %}|
{%- endfor %}''')
assert tmpl.render(articles=articles).split('|') == [
'1970[aha][interesting][really?]',
'1971[totally not]',
''
]
def test_filtertag(self):
tmpl = env.from_string("{% filter upper|replace('FOO', 'foo') %}"
"foobar{% endfilter %}")
assert tmpl.render() == 'fooBAR'
def test_replace(self):
env = Environment()
tmpl = env.from_string('{{ string|replace("o", 42) }}')
assert tmpl.render(string='<foo>') == '<f4242>'
env = Environment(autoescape=True)
tmpl = env.from_string('{{ string|replace("o", 42) }}')
assert tmpl.render(string='<foo>') == '<f4242>'
tmpl = env.from_string('{{ string|replace("<", 42) }}')
assert tmpl.render(string='<foo>') == '42foo>'
tmpl = env.from_string('{{ string|replace("o", ">x<") }}')
assert tmpl.render(string=Markup('foo')) == 'f>x<>x<'
def test_forceescape(self):
tmpl = env.from_string('{{ x|forceescape }}')
assert tmpl.render(x=Markup('<div />')) == u'<div />'
def test_safe(self):
env = Environment(autoescape=True)
tmpl = env.from_string('{{ "<div>foo</div>"|safe }}')
assert tmpl.render() == '<div>foo</div>'
tmpl = env.from_string('{{ "<div>foo</div>" }}')
assert tmpl.render() == '<div>foo</div>'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(FilterTestCase))
return suite
|
|
import os
import boto3
import botocore
from botocore.exceptions import ClientError
import pytest
import sure # noqa # pylint: disable=unused-import
from moto import mock_elbv2, mock_ec2, mock_acm
from moto.elbv2 import elbv2_backends
from moto.core import ACCOUNT_ID
from tests import EXAMPLE_AMI_ID
@mock_elbv2
@mock_ec2
def test_create_load_balancer():
response, _, security_group, subnet1, subnet2, conn = create_load_balancer()
lb = response.get("LoadBalancers")[0]
lb.get("DNSName").should.equal("my-lb-1.us-east-1.elb.amazonaws.com")
lb.get("LoadBalancerArn").should.equal(
f"arn:aws:elasticloadbalancing:us-east-1:{ACCOUNT_ID}:loadbalancer/my-lb/50dc6c495c0c9188"
)
lb.get("SecurityGroups").should.equal([security_group.id])
lb.get("AvailabilityZones").should.equal(
[
{"SubnetId": subnet1.id, "ZoneName": "us-east-1a"},
{"SubnetId": subnet2.id, "ZoneName": "us-east-1b"},
]
)
lb.get("CreatedTime").tzinfo.should_not.be.none
lb.get("State").get("Code").should.equal("provisioning")
# Ensure the tags persisted
response = conn.describe_tags(ResourceArns=[lb.get("LoadBalancerArn")])
tags = {d["Key"]: d["Value"] for d in response["TagDescriptions"][0]["Tags"]}
tags.should.equal({"key_name": "a_value"})
def create_load_balancer():
conn = boto3.client("elbv2", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
security_group = ec2.create_security_group(
GroupName="a-security-group", Description="First One"
)
vpc = ec2.create_vpc(CidrBlock="172.28.7.0/24", InstanceTenancy="default")
subnet1 = ec2.create_subnet(
VpcId=vpc.id, CidrBlock="172.28.7.192/26", AvailabilityZone="us-east-1a"
)
subnet2 = ec2.create_subnet(
VpcId=vpc.id, CidrBlock="172.28.7.0/26", AvailabilityZone="us-east-1b"
)
response = conn.create_load_balancer(
Name="my-lb",
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme="internal",
Tags=[{"Key": "key_name", "Value": "a_value"}],
)
return response, vpc, security_group, subnet1, subnet2, conn
@mock_elbv2
@mock_ec2
def test_describe_load_balancers():
response, _, _, _, _, conn = create_load_balancer()
response = conn.describe_load_balancers()
response.get("LoadBalancers").should.have.length_of(1)
lb = response.get("LoadBalancers")[0]
lb.get("LoadBalancerName").should.equal("my-lb")
lb.get("State").get("Code").should.equal("active")
response = conn.describe_load_balancers(
LoadBalancerArns=[lb.get("LoadBalancerArn")]
)
response.get("LoadBalancers")[0].get("LoadBalancerName").should.equal("my-lb")
response = conn.describe_load_balancers(Names=["my-lb"])
response.get("LoadBalancers")[0].get("LoadBalancerName").should.equal("my-lb")
with pytest.raises(ClientError):
conn.describe_load_balancers(LoadBalancerArns=["not-a/real/arn"])
with pytest.raises(ClientError):
conn.describe_load_balancers(Names=["nope"])
@mock_elbv2
@mock_ec2
def test_add_remove_tags():
_, _, _, _, _, conn = create_load_balancer()
lbs = conn.describe_load_balancers()["LoadBalancers"]
lbs.should.have.length_of(1)
lb = lbs[0]
with pytest.raises(ClientError):
conn.add_tags(ResourceArns=["missing-arn"], Tags=[{"Key": "a", "Value": "b"}])
conn.add_tags(
ResourceArns=[lb.get("LoadBalancerArn")], Tags=[{"Key": "a", "Value": "b"}]
)
tags = {
d["Key"]: d["Value"]
for d in conn.describe_tags(ResourceArns=[lb.get("LoadBalancerArn")])[
"TagDescriptions"
][0]["Tags"]
}
tags.should.have.key("a").which.should.equal("b")
conn.add_tags(
ResourceArns=[lb.get("LoadBalancerArn")],
Tags=[
{"Key": "a", "Value": "b"},
{"Key": "b", "Value": "b"},
{"Key": "c", "Value": "b"},
{"Key": "d", "Value": "b"},
{"Key": "e", "Value": "b"},
{"Key": "f", "Value": "b"},
{"Key": "g", "Value": "b"},
{"Key": "h", "Value": "b"},
{"Key": "j", "Value": "b"},
],
)
conn.add_tags.when.called_with(
ResourceArns=[lb.get("LoadBalancerArn")], Tags=[{"Key": "k", "Value": "b"}]
).should.throw(botocore.exceptions.ClientError)
conn.add_tags(
ResourceArns=[lb.get("LoadBalancerArn")], Tags=[{"Key": "j", "Value": "c"}]
)
tags = {
d["Key"]: d["Value"]
for d in conn.describe_tags(ResourceArns=[lb.get("LoadBalancerArn")])[
"TagDescriptions"
][0]["Tags"]
}
tags.should.have.key("a").which.should.equal("b")
tags.should.have.key("b").which.should.equal("b")
tags.should.have.key("c").which.should.equal("b")
tags.should.have.key("d").which.should.equal("b")
tags.should.have.key("e").which.should.equal("b")
tags.should.have.key("f").which.should.equal("b")
tags.should.have.key("g").which.should.equal("b")
tags.should.have.key("h").which.should.equal("b")
tags.should.have.key("j").which.should.equal("c")
tags.shouldnt.have.key("k")
conn.remove_tags(ResourceArns=[lb.get("LoadBalancerArn")], TagKeys=["a"])
tags = {
d["Key"]: d["Value"]
for d in conn.describe_tags(ResourceArns=[lb.get("LoadBalancerArn")])[
"TagDescriptions"
][0]["Tags"]
}
tags.shouldnt.have.key("a")
tags.should.have.key("b").which.should.equal("b")
tags.should.have.key("c").which.should.equal("b")
tags.should.have.key("d").which.should.equal("b")
tags.should.have.key("e").which.should.equal("b")
tags.should.have.key("f").which.should.equal("b")
tags.should.have.key("g").which.should.equal("b")
tags.should.have.key("h").which.should.equal("b")
tags.should.have.key("j").which.should.equal("c")
@mock_elbv2
@mock_ec2
def test_create_elb_in_multiple_region():
for region in ["us-west-1", "us-west-2"]:
conn = boto3.client("elbv2", region_name=region)
ec2 = boto3.resource("ec2", region_name=region)
security_group = ec2.create_security_group(
GroupName="a-security-group", Description="First One"
)
vpc = ec2.create_vpc(CidrBlock="172.28.7.0/24", InstanceTenancy="default")
subnet1 = ec2.create_subnet(
VpcId=vpc.id, CidrBlock="172.28.7.0/26", AvailabilityZone=region + "a"
)
subnet2 = ec2.create_subnet(
VpcId=vpc.id, CidrBlock="172.28.7.192/26", AvailabilityZone=region + "b"
)
conn.create_load_balancer(
Name="my-lb",
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme="internal",
Tags=[{"Key": "key_name", "Value": "a_value"}],
)
list(
boto3.client("elbv2", region_name="us-west-1")
.describe_load_balancers()
.get("LoadBalancers")
).should.have.length_of(1)
list(
boto3.client("elbv2", region_name="us-west-2")
.describe_load_balancers()
.get("LoadBalancers")
).should.have.length_of(1)
@mock_elbv2
@mock_ec2
def test_create_listeners_without_port():
response, vpc, _, _, _, conn = create_load_balancer()
load_balancer_arn = response.get("LoadBalancers")[0].get("LoadBalancerArn")
response = conn.create_target_group(
Name="a-target",
Protocol="HTTP",
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol="HTTP",
HealthCheckPort="8080",
HealthCheckPath="/",
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={"HttpCode": "200"},
)
target_group = response.get("TargetGroups")[0]
target_group_arn = target_group["TargetGroupArn"]
response = conn.create_listener(
LoadBalancerArn=load_balancer_arn,
Protocol="HTTP",
DefaultActions=[{"Type": "forward", "TargetGroupArn": target_group_arn}],
)
listener = response.get("Listeners")[0]
listener.get("Port").should.equal(None)
listener.get("Protocol").should.equal("HTTP")
listener.get("DefaultActions").should.equal(
[{"TargetGroupArn": target_group_arn, "Type": "forward"}]
)
@mock_ec2
@mock_elbv2
def test_create_rule_forward_config_as_second_arg():
# https://github.com/spulec/moto/issues/4123
# Necessary because there was some convoluted way of parsing arguments
# Actions with type=forward had to be the first action specified
response, vpc, _, _, _, elbv2 = create_load_balancer()
load_balancer_arn = response.get("LoadBalancers")[0].get("LoadBalancerArn")
response = elbv2.create_listener(
LoadBalancerArn=load_balancer_arn, Protocol="HTTP", Port=80, DefaultActions=[],
)
http_listener_arn = response.get("Listeners")[0]["ListenerArn"]
priority = 100
response = elbv2.create_target_group(
Name="a-target",
Protocol="HTTP",
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol="HTTP",
HealthCheckPort="8080",
HealthCheckPath="/",
Matcher={"HttpCode": "200"},
)
target_group = response.get("TargetGroups")[0]
# No targets registered yet
target_group_arn = target_group.get("TargetGroupArn")
elbv2.create_rule(
ListenerArn=http_listener_arn,
Conditions=[
{"Field": "path-pattern", "PathPatternConfig": {"Values": [f"/sth*",]},},
],
Priority=priority,
Actions=[
{
"Type": "authenticate-cognito",
"Order": 1,
"AuthenticateCognitoConfig": {
"UserPoolArn": "?1",
"UserPoolClientId": "?2",
"UserPoolDomain": "?2",
"SessionCookieName": "AWSELBAuthSessionCookie",
"Scope": "openid",
"SessionTimeout": 604800,
"OnUnauthenticatedRequest": "authenticate",
},
},
{
"Type": "forward",
"Order": 2,
"ForwardConfig": {
"TargetGroups": [
{"TargetGroupArn": target_group_arn, "Weight": 1},
],
"TargetGroupStickinessConfig": {"Enabled": False,},
},
},
],
)
all_rules = elbv2.describe_rules(ListenerArn=http_listener_arn)["Rules"]
our_rule = all_rules[0]
actions = our_rule["Actions"]
forward_action = [a for a in actions if "ForwardConfig" in a.keys()][0]
forward_action.should.equal(
{
"ForwardConfig": {
"TargetGroups": [{"TargetGroupArn": target_group_arn, "Weight": 1}]
},
"Type": "forward",
}
)
@mock_elbv2
@mock_ec2
def test_describe_paginated_balancers():
conn = boto3.client("elbv2", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
security_group = ec2.create_security_group(
GroupName="a-security-group", Description="First One"
)
vpc = ec2.create_vpc(CidrBlock="172.28.7.0/24", InstanceTenancy="default")
subnet1 = ec2.create_subnet(
VpcId=vpc.id, CidrBlock="172.28.7.192/26", AvailabilityZone="us-east-1a"
)
subnet2 = ec2.create_subnet(
VpcId=vpc.id, CidrBlock="172.28.7.0/26", AvailabilityZone="us-east-1b"
)
for i in range(51):
conn.create_load_balancer(
Name="my-lb%d" % i,
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme="internal",
Tags=[{"Key": "key_name", "Value": "a_value"}],
)
resp = conn.describe_load_balancers()
resp["LoadBalancers"].should.have.length_of(50)
resp["NextMarker"].should.equal(resp["LoadBalancers"][-1]["LoadBalancerName"])
resp2 = conn.describe_load_balancers(Marker=resp["NextMarker"])
resp2["LoadBalancers"].should.have.length_of(1)
assert "NextToken" not in resp2.keys()
@mock_elbv2
@mock_ec2
def test_delete_load_balancer():
response, _, _, _, _, conn = create_load_balancer()
response.get("LoadBalancers").should.have.length_of(1)
lb = response.get("LoadBalancers")[0]
conn.delete_load_balancer(LoadBalancerArn=lb.get("LoadBalancerArn"))
balancers = conn.describe_load_balancers().get("LoadBalancers")
balancers.should.have.length_of(0)
@mock_ec2
@mock_elbv2
def test_register_targets():
conn = boto3.client("elbv2", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
security_group = ec2.create_security_group(
GroupName="a-security-group", Description="First One"
)
vpc = ec2.create_vpc(CidrBlock="172.28.7.0/24", InstanceTenancy="default")
subnet1 = ec2.create_subnet(
VpcId=vpc.id, CidrBlock="172.28.7.192/26", AvailabilityZone="us-east-1a"
)
subnet2 = ec2.create_subnet(
VpcId=vpc.id, CidrBlock="172.28.7.0/26", AvailabilityZone="us-east-1b"
)
conn.create_load_balancer(
Name="my-lb",
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme="internal",
Tags=[{"Key": "key_name", "Value": "a_value"}],
)
response = conn.create_target_group(
Name="a-target",
Protocol="HTTP",
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol="HTTP",
HealthCheckPort="8080",
HealthCheckPath="/",
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={"HttpCode": "200"},
)
target_group = response.get("TargetGroups")[0]
# No targets registered yet
response = conn.describe_target_health(
TargetGroupArn=target_group.get("TargetGroupArn")
)
response.get("TargetHealthDescriptions").should.have.length_of(0)
response = ec2.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=2, MaxCount=2)
instance_id1 = response[0].id
instance_id2 = response[1].id
response = conn.register_targets(
TargetGroupArn=target_group.get("TargetGroupArn"),
Targets=[
{"Id": instance_id1, "Port": 5060},
{"Id": instance_id2, "Port": 4030},
],
)
response = conn.describe_target_health(
TargetGroupArn=target_group.get("TargetGroupArn")
)
response.get("TargetHealthDescriptions").should.have.length_of(2)
response = conn.deregister_targets(
TargetGroupArn=target_group.get("TargetGroupArn"),
Targets=[{"Id": instance_id2}],
)
response = conn.describe_target_health(
TargetGroupArn=target_group.get("TargetGroupArn")
)
response.get("TargetHealthDescriptions").should.have.length_of(1)
@mock_ec2
@mock_elbv2
def test_stopped_instance_target():
target_group_port = 8080
conn = boto3.client("elbv2", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
security_group = ec2.create_security_group(
GroupName="a-security-group", Description="First One"
)
vpc = ec2.create_vpc(CidrBlock="172.28.7.0/24", InstanceTenancy="default")
subnet1 = ec2.create_subnet(
VpcId=vpc.id, CidrBlock="172.28.7.192/26", AvailabilityZone="us-east-1a"
)
subnet2 = ec2.create_subnet(
VpcId=vpc.id, CidrBlock="172.28.7.0/26", AvailabilityZone="us-east-1b"
)
conn.create_load_balancer(
Name="my-lb",
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme="internal",
Tags=[{"Key": "key_name", "Value": "a_value"}],
)
response = conn.create_target_group(
Name="a-target",
Protocol="HTTP",
Port=target_group_port,
VpcId=vpc.id,
HealthCheckProtocol="HTTP",
HealthCheckPath="/",
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={"HttpCode": "200"},
)
target_group = response.get("TargetGroups")[0]
# No targets registered yet
response = conn.describe_target_health(
TargetGroupArn=target_group.get("TargetGroupArn")
)
response.get("TargetHealthDescriptions").should.have.length_of(0)
response = ec2.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)
instance = response[0]
target_dict = {"Id": instance.id, "Port": 500}
response = conn.register_targets(
TargetGroupArn=target_group.get("TargetGroupArn"), Targets=[target_dict]
)
response = conn.describe_target_health(
TargetGroupArn=target_group.get("TargetGroupArn")
)
response.get("TargetHealthDescriptions").should.have.length_of(1)
target_health_description = response.get("TargetHealthDescriptions")[0]
target_health_description["Target"].should.equal(target_dict)
target_health_description["HealthCheckPort"].should.equal(str(target_group_port))
target_health_description["TargetHealth"].should.equal({"State": "healthy"})
instance.stop()
response = conn.describe_target_health(
TargetGroupArn=target_group.get("TargetGroupArn")
)
response.get("TargetHealthDescriptions").should.have.length_of(1)
target_health_description = response.get("TargetHealthDescriptions")[0]
target_health_description["Target"].should.equal(target_dict)
target_health_description["HealthCheckPort"].should.equal(str(target_group_port))
target_health_description["TargetHealth"].should.equal(
{
"State": "unused",
"Reason": "Target.InvalidState",
"Description": "Target is in the stopped state",
}
)
@mock_ec2
@mock_elbv2
def test_terminated_instance_target():
target_group_port = 8080
conn = boto3.client("elbv2", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
security_group = ec2.create_security_group(
GroupName="a-security-group", Description="First One"
)
vpc = ec2.create_vpc(CidrBlock="172.28.7.0/24", InstanceTenancy="default")
subnet1 = ec2.create_subnet(
VpcId=vpc.id, CidrBlock="172.28.7.192/26", AvailabilityZone="us-east-1a"
)
subnet2 = ec2.create_subnet(
VpcId=vpc.id, CidrBlock="172.28.7.0/26", AvailabilityZone="us-east-1b"
)
conn.create_load_balancer(
Name="my-lb",
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme="internal",
Tags=[{"Key": "key_name", "Value": "a_value"}],
)
response = conn.create_target_group(
Name="a-target",
Protocol="HTTP",
Port=target_group_port,
VpcId=vpc.id,
HealthCheckProtocol="HTTP",
HealthCheckPath="/",
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={"HttpCode": "200"},
)
target_group = response.get("TargetGroups")[0]
# No targets registered yet
response = conn.describe_target_health(
TargetGroupArn=target_group.get("TargetGroupArn")
)
response.get("TargetHealthDescriptions").should.have.length_of(0)
response = ec2.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)
instance = response[0]
target_dict = {"Id": instance.id, "Port": 500}
response = conn.register_targets(
TargetGroupArn=target_group.get("TargetGroupArn"), Targets=[target_dict]
)
response = conn.describe_target_health(
TargetGroupArn=target_group.get("TargetGroupArn")
)
response.get("TargetHealthDescriptions").should.have.length_of(1)
target_health_description = response.get("TargetHealthDescriptions")[0]
target_health_description["Target"].should.equal(target_dict)
target_health_description["HealthCheckPort"].should.equal(str(target_group_port))
target_health_description["TargetHealth"].should.equal({"State": "healthy"})
instance.terminate()
response = conn.describe_target_health(
TargetGroupArn=target_group.get("TargetGroupArn")
)
response.get("TargetHealthDescriptions").should.have.length_of(0)
@mock_elbv2
@mock_ec2
def test_create_rule_priority_in_use():
response, _, _, _, _, elbv2 = create_load_balancer()
load_balancer_arn = response.get("LoadBalancers")[0].get("LoadBalancerArn")
response = elbv2.create_listener(
LoadBalancerArn=load_balancer_arn, Protocol="HTTP", Port=80, DefaultActions=[],
)
http_listener_arn = response.get("Listeners")[0]["ListenerArn"]
priority = 100
elbv2.create_rule(
ListenerArn=http_listener_arn, Priority=priority, Conditions=[], Actions=[],
)
# test for PriorityInUse
with pytest.raises(ClientError) as ex:
elbv2.create_rule(
ListenerArn=http_listener_arn, Priority=priority, Conditions=[], Actions=[],
)
err = ex.value.response["Error"]
err["Code"].should.equal("PriorityInUse")
err["Message"].should.equal("The specified priority is in use.")
@mock_elbv2
@mock_ec2
def test_modify_rule_conditions():
response, _, _, _, _, elbv2 = create_load_balancer()
load_balancer_arn = response.get("LoadBalancers")[0].get("LoadBalancerArn")
action = {
"Type": "redirect",
"RedirectConfig": {
"Protocol": "HTTPS",
"Port": "443",
"StatusCode": "HTTP_301",
},
}
condition = {
"Field": "path-pattern",
"PathPatternConfig": {"Values": [f"/sth*",]},
}
response = elbv2.create_listener(
LoadBalancerArn=load_balancer_arn,
Protocol="HTTP",
Port=80,
DefaultActions=[action],
)
http_listener_arn = response.get("Listeners")[0]["ListenerArn"]
response = elbv2.create_rule(
ListenerArn=http_listener_arn, Priority=100, Conditions=[], Actions=[],
)
rule = response["Rules"][0]
assert len(rule["Actions"]) == 0
assert len(rule["Conditions"]) == 0
response = elbv2.modify_rule(RuleArn=rule["RuleArn"], Actions=[action],)
rule = response["Rules"][0]
assert len(rule["Actions"]) == 1
assert len(rule["Conditions"]) == 0
response = elbv2.modify_rule(RuleArn=rule["RuleArn"], Conditions=[condition])
rule = response["Rules"][0]
assert len(rule["Actions"]) == 1
assert len(rule["Conditions"]) == 1
response = elbv2.modify_rule(
RuleArn=rule["RuleArn"],
Conditions=[condition, condition],
Actions=[action, action],
)
rule = response["Rules"][0]
assert len(rule["Actions"]) == 2
assert len(rule["Conditions"]) == 2
@mock_elbv2
@mock_ec2
def test_handle_listener_rules():
response, vpc, _, _, _, conn = create_load_balancer()
load_balancer_arn = response.get("LoadBalancers")[0].get("LoadBalancerArn")
response = conn.create_target_group(
Name="a-target",
Protocol="HTTP",
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol="HTTP",
HealthCheckPort="8080",
HealthCheckPath="/",
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={"HttpCode": "200"},
)
target_group = response.get("TargetGroups")[0]
# Plain HTTP listener
response = conn.create_listener(
LoadBalancerArn=load_balancer_arn,
Protocol="HTTP",
Port=80,
DefaultActions=[
{"Type": "forward", "TargetGroupArn": target_group.get("TargetGroupArn")}
],
)
listener = response.get("Listeners")[0]
listener.get("Port").should.equal(80)
listener.get("Protocol").should.equal("HTTP")
listener.get("DefaultActions").should.equal(
[{"TargetGroupArn": target_group.get("TargetGroupArn"), "Type": "forward"}]
)
http_listener_arn = listener.get("ListenerArn")
# create first rule
priority = 100
host = "xxx.example.com"
path_pattern = "foobar"
pathpatternconfig_pattern = "foobar2"
created_rule = conn.create_rule(
ListenerArn=http_listener_arn,
Priority=priority,
Conditions=[
{"Field": "host-header", "Values": [host]},
{"Field": "path-pattern", "Values": [path_pattern]},
{
"Field": "path-pattern",
"PathPatternConfig": {"Values": [pathpatternconfig_pattern]},
},
],
Actions=[
{"TargetGroupArn": target_group.get("TargetGroupArn"), "Type": "forward"}
],
)
rule = created_rule.get("Rules")[0]
rule["Priority"].should.equal("100")
# check if rules is sorted by priority
priority = 500
host = "yyy.example.com"
path_pattern = "foobar"
rules = conn.create_rule(
ListenerArn=http_listener_arn,
Priority=priority,
Conditions=[
{"Field": "host-header", "Values": [host]},
{"Field": "path-pattern", "Values": [path_pattern]},
{
"Field": "path-pattern",
"PathPatternConfig": {"Values": [pathpatternconfig_pattern]},
},
],
Actions=[
{"TargetGroupArn": target_group.get("TargetGroupArn"), "Type": "forward"}
],
)
# add rule that uses forward_config
priority = 550
host = "aaa.example.com"
path_pattern = "barfoo"
rules = conn.create_rule(
ListenerArn=http_listener_arn,
Priority=priority,
Conditions=[
{"Field": "host-header", "Values": [host]},
{"Field": "path-pattern", "Values": [path_pattern]},
{
"Field": "path-pattern",
"PathPatternConfig": {"Values": [pathpatternconfig_pattern]},
},
],
Actions=[
{
"Type": "forward",
"ForwardConfig": {
"TargetGroups": [
{
"TargetGroupArn": target_group.get("TargetGroupArn"),
"Weight": 1,
},
{
"TargetGroupArn": target_group.get("TargetGroupArn"),
"Weight": 2,
},
]
},
},
],
)
# test for PriorityInUse
with pytest.raises(ClientError):
conn.create_rule(
ListenerArn=http_listener_arn,
Priority=priority,
Conditions=[
{"Field": "host-header", "Values": [host]},
{"Field": "path-pattern", "Values": [path_pattern]},
{
"Field": "path-pattern",
"PathPatternConfig": {"Values": [pathpatternconfig_pattern]},
},
],
Actions=[
{
"TargetGroupArn": target_group.get("TargetGroupArn"),
"Type": "forward",
}
],
)
# test for describe listeners
obtained_rules = conn.describe_rules(ListenerArn=http_listener_arn)
obtained_rules["Rules"].should.have.length_of(4)
priorities = [rule["Priority"] for rule in obtained_rules["Rules"]]
priorities.should.equal(["100", "500", "550", "default"])
first_rule = obtained_rules["Rules"][0]
second_rule = obtained_rules["Rules"][1]
third_rule = obtained_rules["Rules"][2]
default_rule = obtained_rules["Rules"][3]
first_rule["IsDefault"].should.equal(False)
default_rule["IsDefault"].should.equal(True)
obtained_rules = conn.describe_rules(RuleArns=[first_rule["RuleArn"]])
obtained_rules["Rules"].should.equal([first_rule])
# test for pagination
obtained_rules = conn.describe_rules(ListenerArn=http_listener_arn, PageSize=1)
len(obtained_rules["Rules"]).should.equal(1)
obtained_rules.should.have.key("NextMarker")
next_marker = obtained_rules["NextMarker"]
following_rules = conn.describe_rules(
ListenerArn=http_listener_arn, PageSize=1, Marker=next_marker
)
len(following_rules["Rules"]).should.equal(1)
following_rules["Rules"][0]["RuleArn"].should_not.equal(
obtained_rules["Rules"][0]["RuleArn"]
)
# test for invalid describe rule request
with pytest.raises(ClientError):
conn.describe_rules()
with pytest.raises(ClientError):
conn.describe_rules(RuleArns=[])
with pytest.raises(ClientError):
conn.describe_rules(
ListenerArn=http_listener_arn, RuleArns=[first_rule["RuleArn"]]
)
# modify rule partially
new_host = "new.example.com"
new_path_pattern = "new_path"
new_pathpatternconfig_pattern = "new_path2"
conn.modify_rule(
RuleArn=first_rule["RuleArn"],
Conditions=[
{"Field": "host-header", "Values": [new_host]},
{"Field": "path-pattern", "Values": [new_path_pattern]},
{
"Field": "path-pattern",
"PathPatternConfig": {"Values": [new_pathpatternconfig_pattern]},
},
],
)
rules = conn.describe_rules(ListenerArn=http_listener_arn)
obtained_rule = rules["Rules"][0]
obtained_rule["Conditions"][0]["Values"][0].should.equal(new_host)
obtained_rule["Conditions"][1]["Values"][0].should.equal(new_path_pattern)
obtained_rule["Conditions"][2]["PathPatternConfig"]["Values"][0].should.equal(
new_pathpatternconfig_pattern
)
obtained_rule["Actions"][0]["TargetGroupArn"].should.equal(
target_group.get("TargetGroupArn")
)
# modify priority
conn.set_rule_priorities(
RulePriorities=[
{
"RuleArn": first_rule["RuleArn"],
"Priority": int(first_rule["Priority"]) - 1,
}
]
)
# modify forward_config rule partially rule
new_host_2 = "new.examplewebsite.com"
new_path_pattern_2 = "new_path_2"
new_pathpatternconfig_pattern_2 = "new_path_2"
conn.modify_rule(
RuleArn=third_rule["RuleArn"],
Conditions=[
{"Field": "host-header", "Values": [new_host_2]},
{"Field": "path-pattern", "Values": [new_path_pattern_2]},
{
"Field": "path-pattern",
"PathPatternConfig": {"Values": [new_pathpatternconfig_pattern_2]},
},
],
Actions=[
{"TargetGroupArn": target_group.get("TargetGroupArn"), "Type": "forward",}
],
)
rules = conn.describe_rules(ListenerArn=http_listener_arn)
obtained_rule = rules["Rules"][2]
obtained_rule["Conditions"][0]["Values"][0].should.equal(new_host_2)
obtained_rule["Conditions"][1]["Values"][0].should.equal(new_path_pattern_2)
obtained_rule["Conditions"][2]["PathPatternConfig"]["Values"][0].should.equal(
new_pathpatternconfig_pattern_2
)
obtained_rule["Actions"][0]["TargetGroupArn"].should.equal(
target_group.get("TargetGroupArn")
)
# modify priority
conn.set_rule_priorities(
RulePriorities=[
{
"RuleArn": third_rule["RuleArn"],
"Priority": int(third_rule["Priority"]) - 1,
}
]
)
with pytest.raises(ClientError):
conn.set_rule_priorities(
RulePriorities=[
{"RuleArn": first_rule["RuleArn"], "Priority": 999},
{"RuleArn": second_rule["RuleArn"], "Priority": 999},
{"RuleArn": third_rule["RuleArn"], "Priority": 999},
]
)
# delete
arn = first_rule["RuleArn"]
conn.delete_rule(RuleArn=arn)
rules = conn.describe_rules(ListenerArn=http_listener_arn)["Rules"]
len(rules).should.equal(3)
# test for invalid action type
safe_priority = 2
with pytest.raises(ClientError):
conn.create_rule(
ListenerArn=http_listener_arn,
Priority=safe_priority,
Conditions=[
{"Field": "host-header", "Values": [host]},
{"Field": "path-pattern", "Values": [path_pattern]},
],
Actions=[
{
"TargetGroupArn": target_group.get("TargetGroupArn"),
"Type": "forward2",
}
],
)
# test for invalid action type
safe_priority = 2
invalid_target_group_arn = target_group.get("TargetGroupArn") + "x"
with pytest.raises(ClientError):
conn.create_rule(
ListenerArn=http_listener_arn,
Priority=safe_priority,
Conditions=[
{"Field": "host-header", "Values": [host]},
{"Field": "path-pattern", "Values": [path_pattern]},
],
Actions=[{"TargetGroupArn": invalid_target_group_arn, "Type": "forward"}],
)
# test for invalid condition field_name
safe_priority = 2
with pytest.raises(ClientError):
conn.create_rule(
ListenerArn=http_listener_arn,
Priority=safe_priority,
Conditions=[{"Field": "xxxxxxx", "Values": [host]}],
Actions=[
{
"TargetGroupArn": target_group.get("TargetGroupArn"),
"Type": "forward",
}
],
)
# test for emptry condition value
safe_priority = 2
with pytest.raises(ClientError):
conn.create_rule(
ListenerArn=http_listener_arn,
Priority=safe_priority,
Conditions=[{"Field": "host-header", "Values": []}],
Actions=[
{
"TargetGroupArn": target_group.get("TargetGroupArn"),
"Type": "forward",
}
],
)
# test for multiple condition value
safe_priority = 2
with pytest.raises(ClientError):
conn.create_rule(
ListenerArn=http_listener_arn,
Priority=safe_priority,
Conditions=[{"Field": "host-header", "Values": [host, host]}],
Actions=[
{
"TargetGroupArn": target_group.get("TargetGroupArn"),
"Type": "forward",
}
],
)
@mock_elbv2
def test_describe_account_limits():
client = boto3.client("elbv2", region_name="eu-central-1")
resp = client.describe_account_limits()
resp["Limits"][0].should.contain("Name")
resp["Limits"][0].should.contain("Max")
@mock_elbv2
def test_describe_ssl_policies():
client = boto3.client("elbv2", region_name="eu-central-1")
resp = client.describe_ssl_policies()
len(resp["SslPolicies"]).should.equal(6)
resp = client.describe_ssl_policies(
Names=["ELBSecurityPolicy-TLS-1-2-2017-01", "ELBSecurityPolicy-2016-08",]
)
len(resp["SslPolicies"]).should.equal(2)
resp = client.describe_ssl_policies(
Names=[
"ELBSecurityPolicy-TLS-1-2-2017-01",
"ELBSecurityPolicy-2016-08",
"ELBSecurityPolicy-2016-08",
]
)
len(resp["SslPolicies"]).should.equal(2)
@mock_elbv2
@mock_ec2
def test_set_ip_address_type():
response, _, security_group, subnet1, subnet2, client = create_load_balancer()
arn = response["LoadBalancers"][0]["LoadBalancerArn"]
# Internal LBs cant be dualstack yet
with pytest.raises(ClientError):
client.set_ip_address_type(LoadBalancerArn=arn, IpAddressType="dualstack")
# Create internet facing one
response = client.create_load_balancer(
Name="my-lb2",
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme="internet-facing",
Tags=[{"Key": "key_name", "Value": "a_value"}],
)
arn = response["LoadBalancers"][0]["LoadBalancerArn"]
client.set_ip_address_type(LoadBalancerArn=arn, IpAddressType="dualstack")
@mock_elbv2
@mock_ec2
def test_set_security_groups():
client = boto3.client("elbv2", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
security_group = ec2.create_security_group(
GroupName="a-security-group", Description="First One"
)
security_group2 = ec2.create_security_group(
GroupName="b-security-group", Description="Second One"
)
vpc = ec2.create_vpc(CidrBlock="172.28.7.0/24", InstanceTenancy="default")
subnet1 = ec2.create_subnet(
VpcId=vpc.id, CidrBlock="172.28.7.192/26", AvailabilityZone="us-east-1a"
)
subnet2 = ec2.create_subnet(
VpcId=vpc.id, CidrBlock="172.28.7.0/26", AvailabilityZone="us-east-1b"
)
response = client.create_load_balancer(
Name="my-lb",
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme="internal",
Tags=[{"Key": "key_name", "Value": "a_value"}],
)
arn = response["LoadBalancers"][0]["LoadBalancerArn"]
client.set_security_groups(
LoadBalancerArn=arn, SecurityGroups=[security_group.id, security_group2.id]
)
resp = client.describe_load_balancers(LoadBalancerArns=[arn])
len(resp["LoadBalancers"][0]["SecurityGroups"]).should.equal(2)
with pytest.raises(ClientError):
client.set_security_groups(LoadBalancerArn=arn, SecurityGroups=["non_existent"])
@mock_elbv2
@mock_ec2
def test_set_subnets_errors():
client = boto3.client("elbv2", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
security_group = ec2.create_security_group(
GroupName="a-security-group", Description="First One"
)
vpc = ec2.create_vpc(CidrBlock="172.28.7.0/24", InstanceTenancy="default")
subnet1 = ec2.create_subnet(
VpcId=vpc.id, CidrBlock="172.28.7.0/26", AvailabilityZone="us-east-1a"
)
subnet2 = ec2.create_subnet(
VpcId=vpc.id, CidrBlock="172.28.7.64/26", AvailabilityZone="us-east-1b"
)
subnet3 = ec2.create_subnet(
VpcId=vpc.id, CidrBlock="172.28.7.192/26", AvailabilityZone="us-east-1c"
)
response = client.create_load_balancer(
Name="my-lb",
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme="internal",
Tags=[{"Key": "key_name", "Value": "a_value"}],
)
arn = response["LoadBalancers"][0]["LoadBalancerArn"]
client.set_subnets(
LoadBalancerArn=arn, Subnets=[subnet1.id, subnet2.id, subnet3.id]
)
resp = client.describe_load_balancers(LoadBalancerArns=[arn])
len(resp["LoadBalancers"][0]["AvailabilityZones"]).should.equal(3)
# Only 1 AZ
with pytest.raises(ClientError) as ex:
client.set_subnets(LoadBalancerArn=arn, Subnets=[subnet1.id])
err = ex.value.response["Error"]
err["Message"].should.equal("More than 1 availability zone must be specified")
# Multiple subnets in same AZ
with pytest.raises(ClientError) as ex:
client.set_subnets(
LoadBalancerArn=arn, Subnets=[subnet1.id, subnet2.id, subnet2.id]
)
err = ex.value.response["Error"]
err["Message"].should.equal("The specified subnet does not exist.")
@mock_elbv2
@mock_ec2
def test_modify_load_balancer_attributes_idle_timeout():
response, _, _, _, _, client = create_load_balancer()
arn = response["LoadBalancers"][0]["LoadBalancerArn"]
client.modify_load_balancer_attributes(
LoadBalancerArn=arn,
Attributes=[{"Key": "idle_timeout.timeout_seconds", "Value": "600"}],
)
# Check its 600 not 60
response = client.describe_load_balancer_attributes(LoadBalancerArn=arn)
idle_timeout = list(
filter(
lambda item: item["Key"] == "idle_timeout.timeout_seconds",
response["Attributes"],
)
)[0]
idle_timeout["Value"].should.equal("600")
@mock_elbv2
@mock_ec2
def test_modify_load_balancer_attributes_routing_http2_enabled():
response, _, _, _, _, client = create_load_balancer()
arn = response["LoadBalancers"][0]["LoadBalancerArn"]
client.modify_load_balancer_attributes(
LoadBalancerArn=arn,
Attributes=[{"Key": "routing.http2.enabled", "Value": "false"}],
)
response = client.describe_load_balancer_attributes(LoadBalancerArn=arn)
routing_http2_enabled = list(
filter(
lambda item: item["Key"] == "routing.http2.enabled", response["Attributes"],
)
)[0]
routing_http2_enabled["Value"].should.equal("false")
@mock_elbv2
@mock_ec2
def test_modify_load_balancer_attributes_crosszone_enabled():
response, _, _, _, _, client = create_load_balancer()
arn = response["LoadBalancers"][0]["LoadBalancerArn"]
client.modify_load_balancer_attributes(
LoadBalancerArn=arn,
Attributes=[
{"Key": "load_balancing.cross_zone.enabled", "Value": "false"},
{"Key": "deletion_protection.enabled", "Value": "false"},
],
)
attrs = client.describe_load_balancer_attributes(LoadBalancerArn=arn)["Attributes"]
attrs.should.contain({"Key": "deletion_protection.enabled", "Value": "false"})
attrs.should.contain({"Key": "load_balancing.cross_zone.enabled", "Value": "false"})
@mock_elbv2
@mock_ec2
def test_modify_load_balancer_attributes_routing_http_drop_invalid_header_fields_enabled():
response, _, _, _, _, client = create_load_balancer()
arn = response["LoadBalancers"][0]["LoadBalancerArn"]
client.modify_load_balancer_attributes(
LoadBalancerArn=arn,
Attributes=[
{"Key": "routing.http.drop_invalid_header_fields.enabled", "Value": "false"}
],
)
response = client.describe_load_balancer_attributes(LoadBalancerArn=arn)
routing_http_drop_invalid_header_fields_enabled = list(
filter(
lambda item: item["Key"]
== "routing.http.drop_invalid_header_fields.enabled",
response["Attributes"],
)
)[0]
routing_http_drop_invalid_header_fields_enabled["Value"].should.equal("false")
@mock_elbv2
@mock_ec2
@mock_acm
def test_modify_listener_http_to_https():
client = boto3.client("elbv2", region_name="eu-central-1")
acm = boto3.client("acm", region_name="eu-central-1")
ec2 = boto3.resource("ec2", region_name="eu-central-1")
security_group = ec2.create_security_group(
GroupName="a-security-group", Description="First One"
)
vpc = ec2.create_vpc(CidrBlock="172.28.7.0/24", InstanceTenancy="default")
subnet1 = ec2.create_subnet(
VpcId=vpc.id, CidrBlock="172.28.7.192/26", AvailabilityZone="eu-central-1a"
)
subnet2 = ec2.create_subnet(
VpcId=vpc.id, CidrBlock="172.28.7.0/26", AvailabilityZone="eu-central-1b"
)
response = client.create_load_balancer(
Name="my-lb",
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme="internal",
Tags=[{"Key": "key_name", "Value": "a_value"}],
)
load_balancer_arn = response.get("LoadBalancers")[0].get("LoadBalancerArn")
response = client.create_target_group(
Name="a-target",
Protocol="HTTP",
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol="HTTP",
HealthCheckPort="8080",
HealthCheckPath="/",
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={"HttpCode": "200"},
)
target_group = response.get("TargetGroups")[0]
target_group_arn = target_group["TargetGroupArn"]
# Plain HTTP listener
response = client.create_listener(
LoadBalancerArn=load_balancer_arn,
Protocol="HTTP",
Port=80,
DefaultActions=[{"Type": "forward", "TargetGroupArn": target_group_arn}],
)
listener_arn = response["Listeners"][0]["ListenerArn"]
response = acm.request_certificate(
DomainName="google.com",
SubjectAlternativeNames=["google.com", "www.google.com", "mail.google.com"],
)
response = acm.request_certificate(
DomainName="yahoo.com",
SubjectAlternativeNames=["yahoo.com", "www.yahoo.com", "mail.yahoo.com"],
)
yahoo_arn = response["CertificateArn"]
response = client.modify_listener(
ListenerArn=listener_arn,
Port=443,
Protocol="HTTPS",
SslPolicy="ELBSecurityPolicy-TLS-1-2-2017-01",
Certificates=[{"CertificateArn": yahoo_arn,},],
DefaultActions=[{"Type": "forward", "TargetGroupArn": target_group_arn}],
)
response["Listeners"][0]["Port"].should.equal(443)
response["Listeners"][0]["Protocol"].should.equal("HTTPS")
response["Listeners"][0]["SslPolicy"].should.equal(
"ELBSecurityPolicy-TLS-1-2-2017-01"
)
len(response["Listeners"][0]["Certificates"]).should.equal(1)
# Check default cert, can't do this in server mode
if os.environ.get("TEST_SERVER_MODE", "false").lower() == "false":
listener = (
elbv2_backends["eu-central-1"]
.load_balancers[load_balancer_arn]
.listeners[listener_arn]
)
listener.certificate.should.equal(yahoo_arn)
# No default cert
with pytest.raises(ClientError) as ex:
client.modify_listener(
ListenerArn=listener_arn,
Port=443,
Protocol="HTTPS",
SslPolicy="ELBSecurityPolicy-TLS-1-2-2017-01",
Certificates=[],
DefaultActions=[{"Type": "forward", "TargetGroupArn": target_group_arn}],
)
err = ex.value.response["Error"]
err["Code"].should.equal("CertificateWereNotPassed")
err["Message"].should.equal(
"You must provide a list containing exactly one certificate if the listener protocol is HTTPS."
)
# Bad cert
with pytest.raises(ClientError) as exc:
client.modify_listener(
ListenerArn=listener_arn,
Port=443,
Protocol="HTTPS",
SslPolicy="ELBSecurityPolicy-TLS-1-2-2017-01",
Certificates=[{"CertificateArn": "lalala", "IsDefault": True}],
DefaultActions=[{"Type": "forward", "TargetGroupArn": target_group_arn}],
)
err = exc.value.response["Error"]
err["Message"].should.equal("Certificate lalala not found")
# Unknown protocol
with pytest.raises(ClientError) as exc:
client.modify_listener(
ListenerArn=listener_arn,
Port=443,
Protocol="HTP",
SslPolicy="ELBSecurityPolicy-TLS-1-2-2017-01",
Certificates=[{"CertificateArn": yahoo_arn, "IsDefault": True}],
DefaultActions=[{"Type": "forward", "TargetGroupArn": target_group_arn}],
)
err = exc.value.response["Error"]
err["Message"].should.equal("Protocol HTP is not supported")
@mock_acm
@mock_ec2
@mock_elbv2
def test_modify_listener_of_https_target_group():
# Verify we can add a listener for a TargetGroup that is already HTTPS
client = boto3.client("elbv2", region_name="eu-central-1")
acm = boto3.client("acm", region_name="eu-central-1")
ec2 = boto3.resource("ec2", region_name="eu-central-1")
security_group = ec2.create_security_group(
GroupName="a-security-group", Description="First One"
)
vpc = ec2.create_vpc(CidrBlock="172.28.7.0/24", InstanceTenancy="default")
subnet1 = ec2.create_subnet(
VpcId=vpc.id, CidrBlock="172.28.7.192/26", AvailabilityZone="eu-central-1a"
)
response = client.create_load_balancer(
Name="my-lb",
Subnets=[subnet1.id],
SecurityGroups=[security_group.id],
Scheme="internal",
Tags=[{"Key": "key_name", "Value": "a_value"}],
)
load_balancer_arn = response.get("LoadBalancers")[0].get("LoadBalancerArn")
response = client.create_target_group(
Name="a-target", Protocol="HTTPS", Port=8443, VpcId=vpc.id,
)
target_group = response.get("TargetGroups")[0]
target_group_arn = target_group["TargetGroupArn"]
# HTTPS listener
response = acm.request_certificate(
DomainName="google.com", SubjectAlternativeNames=["google.com"],
)
google_arn = response["CertificateArn"]
response = client.create_listener(
LoadBalancerArn=load_balancer_arn,
Protocol="HTTPS",
Port=443,
Certificates=[{"CertificateArn": google_arn}],
DefaultActions=[{"Type": "forward", "TargetGroupArn": target_group_arn}],
)
listener_arn = response["Listeners"][0]["ListenerArn"]
# Now modify the HTTPS listener with a different certificate
response = acm.request_certificate(
DomainName="yahoo.com", SubjectAlternativeNames=["yahoo.com"],
)
yahoo_arn = response["CertificateArn"]
listener = client.modify_listener(
ListenerArn=listener_arn,
Certificates=[{"CertificateArn": yahoo_arn,},],
DefaultActions=[{"Type": "forward", "TargetGroupArn": target_group_arn}],
)["Listeners"][0]
listener["Certificates"].should.equal([{"CertificateArn": yahoo_arn}])
listener = client.describe_listeners(ListenerArns=[listener_arn])["Listeners"][0]
listener["Certificates"].should.equal([{"CertificateArn": yahoo_arn}])
@mock_elbv2
@mock_ec2
def test_redirect_action_listener_rule():
response, _, _, _, _, conn = create_load_balancer()
load_balancer_arn = response.get("LoadBalancers")[0].get("LoadBalancerArn")
action = {
"Type": "redirect",
"RedirectConfig": {
"Protocol": "HTTPS",
"Port": "443",
"StatusCode": "HTTP_301",
},
}
response = conn.create_listener(
LoadBalancerArn=load_balancer_arn,
Protocol="HTTP",
Port=80,
DefaultActions=[action],
)
listener = response.get("Listeners")[0]
expected_default_actions = [
{
"Type": "redirect",
"RedirectConfig": {
"Protocol": "HTTPS",
"Port": "443",
"StatusCode": "HTTP_301",
},
}
]
listener.get("DefaultActions").should.equal(expected_default_actions)
listener_arn = listener.get("ListenerArn")
conn.create_rule(
ListenerArn=listener_arn,
Conditions=[{"Field": "path-pattern", "Values": ["/*"]},],
Priority=3,
Actions=[action],
)
describe_rules_response = conn.describe_rules(ListenerArn=listener_arn)
describe_rules_response["Rules"][0]["Actions"].should.equal(
expected_default_actions
)
describe_listener_response = conn.describe_listeners(ListenerArns=[listener_arn])
describe_listener_actions = describe_listener_response["Listeners"][0][
"DefaultActions"
]
describe_listener_actions.should.equal(expected_default_actions)
modify_listener_response = conn.modify_listener(ListenerArn=listener_arn, Port=81)
modify_listener_actions = modify_listener_response["Listeners"][0]["DefaultActions"]
modify_listener_actions.should.equal(expected_default_actions)
@mock_elbv2
@mock_ec2
def test_cognito_action_listener_rule():
response, _, _, _, _, conn = create_load_balancer()
load_balancer_arn = response.get("LoadBalancers")[0].get("LoadBalancerArn")
action = {
"Type": "authenticate-cognito",
"AuthenticateCognitoConfig": {
"UserPoolArn": "arn:aws:cognito-idp:us-east-1:{}:userpool/us-east-1_ABCD1234".format(
ACCOUNT_ID
),
"UserPoolClientId": "abcd1234abcd",
"UserPoolDomain": "testpool",
},
}
response = conn.create_listener(
LoadBalancerArn=load_balancer_arn,
Protocol="HTTP",
Port=80,
DefaultActions=[action],
)
listener = response.get("Listeners")[0]
listener.get("DefaultActions")[0].should.equal(action)
listener_arn = listener.get("ListenerArn")
conn.create_rule(
ListenerArn=listener_arn,
Conditions=[{"Field": "path-pattern", "Values": ["/*"]},],
Priority=3,
Actions=[action],
)
describe_rules_response = conn.describe_rules(ListenerArn=listener_arn)
describe_rules_response["Rules"][0]["Actions"][0].should.equal(action)
describe_listener_response = conn.describe_listeners(ListenerArns=[listener_arn])
describe_listener_actions = describe_listener_response["Listeners"][0][
"DefaultActions"
][0]
describe_listener_actions.should.equal(action)
@mock_elbv2
@mock_ec2
def test_fixed_response_action_listener_rule():
response, _, _, _, _, conn = create_load_balancer()
load_balancer_arn = response.get("LoadBalancers")[0].get("LoadBalancerArn")
action = {
"Type": "fixed-response",
"FixedResponseConfig": {
"ContentType": "text/plain",
"MessageBody": "This page does not exist",
"StatusCode": "404",
},
}
response = conn.create_listener(
LoadBalancerArn=load_balancer_arn,
Protocol="HTTP",
Port=80,
DefaultActions=[action],
)
listener = response.get("Listeners")[0]
listener.get("DefaultActions")[0].should.equal(action)
listener_arn = listener.get("ListenerArn")
conn.create_rule(
ListenerArn=listener_arn,
Conditions=[{"Field": "path-pattern", "Values": ["/*"]},],
Priority=3,
Actions=[action],
)
describe_rules_response = conn.describe_rules(ListenerArn=listener_arn)
describe_rules_response["Rules"][0]["Actions"][0].should.equal(action)
describe_listener_response = conn.describe_listeners(ListenerArns=[listener_arn])
describe_listener_actions = describe_listener_response["Listeners"][0][
"DefaultActions"
][0]
describe_listener_actions.should.equal(action)
@mock_elbv2
@mock_ec2
def test_fixed_response_action_listener_rule_validates_status_code():
response, _, _, _, _, conn = create_load_balancer()
load_balancer_arn = response.get("LoadBalancers")[0].get("LoadBalancerArn")
invalid_status_code_action = {
"Type": "fixed-response",
"FixedResponseConfig": {
"ContentType": "text/plain",
"MessageBody": "This page does not exist",
"StatusCode": "100",
},
}
with pytest.raises(ClientError) as invalid_status_code_exception:
conn.create_listener(
LoadBalancerArn=load_balancer_arn,
Protocol="HTTP",
Port=80,
DefaultActions=[invalid_status_code_action],
)
invalid_status_code_exception.value.response["Error"]["Code"].should.equal(
"ValidationError"
)
@mock_elbv2
@mock_ec2
def test_fixed_response_action_listener_rule_validates_content_type():
response, _, _, _, _, conn = create_load_balancer()
load_balancer_arn = response.get("LoadBalancers")[0].get("LoadBalancerArn")
invalid_content_type_action = {
"Type": "fixed-response",
"FixedResponseConfig": {
"ContentType": "Fake content type",
"MessageBody": "This page does not exist",
"StatusCode": "200",
},
}
with pytest.raises(ClientError) as invalid_content_type_exception:
conn.create_listener(
LoadBalancerArn=load_balancer_arn,
Protocol="HTTP",
Port=80,
DefaultActions=[invalid_content_type_action],
)
invalid_content_type_exception.value.response["Error"]["Code"].should.equal(
"InvalidLoadBalancerAction"
)
|
|
from rest_framework import serializers as ser
from rest_framework import exceptions
from rest_framework.exceptions import ValidationError
from modularodm import Q
from modularodm.exceptions import ValidationValueError
from framework.auth.core import Auth
from framework.exceptions import PermissionsError
from website.models import Node, User, Comment
from website.exceptions import NodeStateError
from website.util import permissions as osf_permissions
from api.base.utils import get_object_or_error, absolute_reverse
from api.base.serializers import (JSONAPISerializer, WaterbutlerLink, NodeFileHyperLinkField, IDField, TypeField,
TargetTypeField, JSONAPIListField, LinksField, RelationshipField, DevOnly,
HideIfRegistration)
from api.base.exceptions import InvalidModelValueError
class NodeTagField(ser.Field):
def to_representation(self, obj):
if obj is not None:
return obj._id
return None
def to_internal_value(self, data):
return data
class NodeSerializer(JSONAPISerializer):
# TODO: If we have to redo this implementation in any of the other serializers, subclass ChoiceField and make it
# handle blank choices properly. Currently DRF ChoiceFields ignore blank options, which is incorrect in this
# instance
filterable_fields = frozenset([
'id',
'title',
'description',
'public',
'tags',
'category',
'date_created',
'date_modified',
'registration',
'root',
'parent'
])
non_anonymized_fields = [
'id',
'title',
'description',
'category',
'date_created',
'date_modified',
'registration',
'tags',
'public',
'links',
'children',
'comments',
'contributors',
'files',
'node_links',
'parent',
'root',
'logs',
]
id = IDField(source='_id', read_only=True)
type = TypeField()
category_choices = Node.CATEGORY_MAP.keys()
category_choices_string = ', '.join(["'{}'".format(choice) for choice in category_choices])
title = ser.CharField(required=True)
description = ser.CharField(required=False, allow_blank=True, allow_null=True)
category = ser.ChoiceField(choices=category_choices, help_text="Choices: " + category_choices_string)
date_created = ser.DateTimeField(read_only=True)
date_modified = ser.DateTimeField(read_only=True)
registration = ser.BooleanField(read_only=True, source='is_registration')
fork = ser.BooleanField(read_only=True, source='is_fork')
collection = DevOnly(ser.BooleanField(read_only=True, source='is_folder'))
dashboard = ser.BooleanField(read_only=True, source='is_dashboard')
tags = JSONAPIListField(child=NodeTagField(), required=False)
# Public is only write-able by admins--see update method
public = ser.BooleanField(source='is_public', required=False,
help_text='Nodes that are made public will give read-only access '
'to everyone. Private nodes require explicit read '
'permission. Write and admin access are the same for '
'public and private nodes. Administrators on a parent '
'node have implicit read permissions for all child nodes')
links = LinksField({'html': 'get_absolute_url'})
# TODO: When we have osf_permissions.ADMIN permissions, make this writable for admins
children = RelationshipField(
related_view='nodes:node-children',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_node_count'},
)
comments = RelationshipField(
related_view='nodes:node-comments',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'unread': 'get_unread_comments_count'})
contributors = RelationshipField(
related_view='nodes:node-contributors',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_contrib_count'},
)
files = RelationshipField(
related_view='nodes:node-providers',
related_view_kwargs={'node_id': '<pk>'}
)
forked_from = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<forked_from_id>'}
)
node_links = DevOnly(RelationshipField(
related_view='nodes:node-pointers',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_pointers_count'},
))
parent = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<parent_node._id>'},
filter_key='parent_node'
)
registrations = DevOnly(HideIfRegistration(RelationshipField(
related_view='nodes:node-registrations',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_registration_count'}
)))
root = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<root._id>'}
)
logs = RelationshipField(
related_view='nodes:node-logs',
related_view_kwargs={'node_id': '<pk>'},
)
class Meta:
type_ = 'nodes'
def get_absolute_url(self, obj):
return obj.absolute_url
# TODO: See if we can get the count filters into the filter rather than the serializer.
def get_user_auth(self, request):
user = request.user
if user.is_anonymous():
auth = Auth(None)
else:
auth = Auth(user)
return auth
def get_node_count(self, obj):
auth = self.get_user_auth(self.context['request'])
nodes = [node for node in obj.nodes if node.can_view(auth) and node.primary and not node.is_deleted]
return len(nodes)
def get_contrib_count(self, obj):
return len(obj.contributors)
def get_registration_count(self, obj):
auth = self.get_user_auth(self.context['request'])
registrations = [node for node in obj.node__registrations if node.can_view(auth)]
return len(registrations)
def get_pointers_count(self, obj):
return len(obj.nodes_pointer)
def get_unread_comments_count(self, obj):
auth = self.get_user_auth(self.context['request'])
user = auth.user
return Comment.find_unread(user=user, node=obj)
def create(self, validated_data):
node = Node(**validated_data)
try:
node.save()
except ValidationValueError as e:
raise InvalidModelValueError(detail=e.message)
return node
def update(self, node, validated_data):
"""Update instance with the validated data. Requires
the request to be in the serializer context.
"""
assert isinstance(node, Node), 'node must be a Node'
auth = self.get_user_auth(self.context['request'])
old_tags = set([tag._id for tag in node.tags])
if 'tags' in validated_data:
current_tags = set(validated_data.get('tags'))
del validated_data['tags']
elif self.partial:
current_tags = set(old_tags)
else:
current_tags = set()
for new_tag in (current_tags - old_tags):
node.add_tag(new_tag, auth=auth)
for deleted_tag in (old_tags - current_tags):
node.remove_tag(deleted_tag, auth=auth)
if validated_data:
try:
node.update(validated_data, auth=auth)
except ValidationValueError as e:
raise InvalidModelValueError(detail=e.message)
except PermissionsError:
raise exceptions.PermissionDenied
return node
class NodeDetailSerializer(NodeSerializer):
"""
Overrides NodeSerializer to make id required.
"""
id = IDField(source='_id', required=True)
class NodeContributorsSerializer(JSONAPISerializer):
""" Separate from UserSerializer due to necessity to override almost every field as read only
"""
non_anonymized_fields = ['bibliographic', 'permission']
filterable_fields = frozenset([
'id',
'bibliographic',
'permission'
])
id = IDField(source='_id', required=True)
type = TypeField()
bibliographic = ser.BooleanField(help_text='Whether the user will be included in citations for this node or not.',
default=True)
permission = ser.ChoiceField(choices=osf_permissions.PERMISSIONS, required=False, allow_null=True,
default=osf_permissions.reduce_permissions(osf_permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS),
help_text='User permission level. Must be "read", "write", or "admin". Defaults to "write".')
links = LinksField({
'self': 'get_absolute_url'
})
users = RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<pk>'},
always_embed=True
)
class Meta:
type_ = 'contributors'
def get_absolute_url(self, obj):
node_id = self.context['request'].parser_context['kwargs']['node_id']
return absolute_reverse(
'nodes:node-contributor-detail',
kwargs={
'node_id': node_id,
'user_id': obj._id
}
)
class NodeContributorsCreateSerializer(NodeContributorsSerializer):
"""
Overrides NodeContributorsSerializer to add target_type field
"""
target_type = TargetTypeField(target_type='users')
def create(self, validated_data):
auth = Auth(self.context['request'].user)
node = self.context['view'].get_node()
contributor = get_object_or_error(User, validated_data['_id'], display_name='user')
# Node object checks for contributor existence but can still change permissions anyway
if contributor in node.contributors:
raise exceptions.ValidationError('{} is already a contributor'.format(contributor.fullname))
bibliographic = validated_data['bibliographic']
permissions = osf_permissions.expand_permissions(validated_data.get('permission')) or osf_permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS
node.add_contributor(contributor=contributor, auth=auth, visible=bibliographic, permissions=permissions, save=True)
contributor.permission = osf_permissions.reduce_permissions(node.get_permissions(contributor))
contributor.bibliographic = node.get_visible(contributor)
contributor.node_id = node._id
return contributor
class NodeContributorDetailSerializer(NodeContributorsSerializer):
"""
Overrides node contributor serializer to add additional methods
"""
def update(self, instance, validated_data):
contributor = instance
auth = Auth(self.context['request'].user)
node = self.context['view'].get_node()
visible = validated_data.get('bibliographic')
permission = validated_data.get('permission')
try:
node.update_contributor(contributor, permission, visible, auth, save=True)
except NodeStateError as e:
raise exceptions.ValidationError(e)
contributor.permission = osf_permissions.reduce_permissions(node.get_permissions(contributor))
contributor.bibliographic = node.get_visible(contributor)
contributor.node_id = node._id
return contributor
class NodeLinksSerializer(JSONAPISerializer):
id = IDField(source='_id')
type = TypeField()
target_type = TargetTypeField(target_type='nodes')
# TODO: We don't show the title because the current user may not have access to this node. We may want to conditionally
# include this field in the future.
# title = ser.CharField(read_only=True, source='node.title', help_text='The title of the node that this Node Link '
# 'points to')
target_node = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<pk>'},
always_embed=True
)
class Meta:
type_ = 'node_links'
links = LinksField({
'self': 'get_absolute_url'
})
def get_absolute_url(self, obj):
node_id = self.context['request'].parser_context['kwargs']['node_id']
return absolute_reverse(
'nodes:node-pointer-detail',
kwargs={
'node_id': node_id,
'node_link_id': obj._id
}
)
def create(self, validated_data):
request = self.context['request']
user = request.user
auth = Auth(user)
node = self.context['view'].get_node()
target_node_id = validated_data['_id']
pointer_node = Node.load(target_node_id)
if not pointer_node or pointer_node.is_folder:
raise InvalidModelValueError(
source={'pointer': '/data/relationships/node_links/data/id'},
detail='Target Node \'{}\' not found.'.format(target_node_id)
)
try:
pointer = node.add_pointer(pointer_node, auth, save=True)
return pointer
except ValueError:
raise InvalidModelValueError(
source={'pointer': '/data/relationships/node_links/data/id'},
detail='Target Node \'{}\' already pointed to by \'{}\'.'.format(target_node_id, node._id)
)
def update(self, instance, validated_data):
pass
class NodeProviderSerializer(JSONAPISerializer):
id = ser.SerializerMethodField(read_only=True)
kind = ser.CharField(read_only=True)
name = ser.CharField(read_only=True)
path = ser.CharField(read_only=True)
node = ser.CharField(source='node_id', read_only=True)
provider = ser.CharField(read_only=True)
files = NodeFileHyperLinkField(
related_view='nodes:node-files',
related_view_kwargs={'node_id': '<node_id>', 'path': '<path>', 'provider': '<provider>'},
kind='folder',
never_embed=True
)
links = LinksField({
'upload': WaterbutlerLink(),
'new_folder': WaterbutlerLink(kind='folder')
})
class Meta:
type_ = 'files'
@staticmethod
def get_id(obj):
return '{}:{}'.format(obj.node._id, obj.provider)
class NodeAlternativeCitationSerializer(JSONAPISerializer):
id = IDField(source="_id", read_only=True)
type = TypeField()
name = ser.CharField(required=True)
text = ser.CharField(required=True)
class Meta:
type_ = 'citations'
def create(self, validated_data):
errors = self.error_checker(validated_data)
if len(errors) > 0:
raise ValidationError(detail=errors)
node = self.context['view'].get_node()
auth = Auth(self.context['request']._user)
citation = node.add_citation(auth, save=True, **validated_data)
return citation
def update(self, instance, validated_data):
errors = self.error_checker(validated_data)
if len(errors) > 0:
raise ValidationError(detail=errors)
node = self.context['view'].get_node()
auth = Auth(self.context['request']._user)
instance = node.edit_citation(auth, instance, save=True, **validated_data)
return instance
def error_checker(self, data):
errors = []
name = data.get('name', None)
text = data.get('text', None)
citations = self.context['view'].get_node().alternative_citations
if not (self.instance and self.instance.name == name) and citations.find(Q('name', 'eq', name)).count() > 0:
errors.append("There is already a citation named '{}'".format(name))
if not (self.instance and self.instance.text == text):
matching_citations = citations.find(Q('text', 'eq', text))
if matching_citations.count() > 0:
names = "', '".join([str(citation.name) for citation in matching_citations])
errors.append("Citation matches '{}'".format(names))
return errors
|
|
# ***** BEGIN LICENSE BLOCK *****
# Sconspiracy - Copyright (C) IRCAD, 2004-2010.
# Distributed under the terms of the BSD Licence as
# published by the Open Source Initiative.
# ****** END LICENSE BLOCK ******
import os
import racy
import racy.rlog as rlog
import racy.rplugins as rplug
from racy.renv import constants
from racy.renv.options import get_option
from racy.rproject.project import ConstructibleRacyProject, \
InstallableRacyProject
from racy.rutils import memoize, remove_vcs_dirs, vcs, Version
all = ['RacyProjectsDB']
@memoize
def find_files(root, directory, filename):
"""Find recursively 'filename' in 'root' and returns a list
with paths of found files if these files are in a directory 'directory'.
Ignore VCS dirs.
The 'directory' is filtered to minimize the number of walked directories.
"""
path_list = []
walker = os.walk(root, followlinks=True)
for root, dirs, files in walker:
remove_vcs_dirs(dirs)
#if dirs containt 'directory', don't walk others
if directory in dirs: dirs[:] = [directory]
if root.endswith(os.path.sep + directory):
if filename in files:
path_list.append(os.path.join(root, filename))
dirs[:] = []
return path_list
@memoize
def find_files_in_dirs(directory_list, filename):
"""Specialized version of find_files, lookup in multiple directories and
filter dirs with constants.PRJ_OPT_DIR"""
path_list = []
for directory in directory_list :
path_list += find_files(
directory,
constants.PRJ_OPT_DIR,
filename
)
return path_list
class RacyProjectsDB(object):
"""Build a map of all build options in a directory list"""
current_db = None
def __init__(self, directory_list=[], env={},
prj_file=constants.OPTS_FILE ):
from collections import defaultdict
self.prj_args = {}
self.deps_db = defaultdict(dict)
self.src_lib_deps = {}
self.bin_lib_deps = {}
self.installed_libext = []
self._prj_map = {}
self._prj_aliases = {}
cc_version = env['TOOLINFO']['VERSION'].replace('.','-')
cxx = env['TOOLINFO']['NAME'] + cc_version
self.prj_args['platform'] = get_option('PLATFORM')
self.prj_args['debug'] = get_option('DEBUG')
self.prj_args['env'] = env
self.prj_args['cxx'] = Version(cxx)
self.prj_args['projects_db'] = self
if not directory_list:
directory_list = racy.renv.dirs.code
vcs.init_repo_informations(directory_list)
self.prj_path_list = find_files_in_dirs(directory_list, prj_file)
for f in self.prj_path_list:
self._register_prj_from_file(f)
RacyProjectsDB.current_db = self
def __iter__(self):
return self._prj_map.__iter__()
def __getitem__(self, item):
res = None
name = getattr(item, 'register_name', item)
name = self._prj_aliases.get(name, name)
res = self._prj_map[name]
return res
def has_key(self, key):
return self._prj_map.has_key(key)
def __len__(self):
return self._prj_map.__len__()
def get_additive_projects(self, prj):
get_adds = rplug.register.get_additive_projects
deps = (prj,) + prj.source_rec_deps
res = [p for dep in deps for p in get_adds(dep)]
return res
def register_prj(self, prj, raise_exception=True):
if prj.register_name in self._prj_map:
prev_prj = self._prj_map[prj.register_name]
msg = """An existing project is already named <{0.register_name}>.
defined here : {1.opts_source}
redefined here : {0.opts_source}
""".format(prj, prev_prj)
if raise_exception:
raise RacyProjectError(prj, msg)
else:
racy.print_warning( 'Project {0.full_name}'.format(prj), msg)
self._prj_map[prj.register_name] = prj
if prj.name != prj.register_name:
self._prj_aliases[prj.name] = prj.register_name
@memoize
def make_prjs_from(self, source, args = {},
factory = ConstructibleRacyProject ):
kwargs = {}
kwargs.update(self.prj_args)
kwargs.update(args)
prj = [factory(build_options=source, **kwargs)]
replacement = rplug.register.get_replacement_projects(*prj)
if replacement:
prj = replacement
return prj
@memoize
def make_prj_from_libext(self, libext):
return self.make_prjs_from(
libext._project_source,
args = {'prj_path' : libext._src},
factory=InstallableRacyProject)
def _register_prj_from_file(self, file):
name = file.split(os.sep)[-3]
target = racy.renv.TARGETS.get(name)
args = {}
for el in target.args:
if el.startswith("@") :
args['config'] = config = el[1:]
if args.get('config'):
target.name = '_'.join([target.name, config])
prjs = self.make_prjs_from(file, args)
for prj in prjs:
self.register_prj(prj, raise_exception=False)
def target_lookup(self, name, **kw):
try:
target = racy.renv.TARGETS.get(name)
db = self
if target.name and db.has_key(target.name):
racy.print_msg('Target : ' + name)
rlog.info.log("Target", name)
prj = db[target.name]
to_build = [prj]
to_build += db.get_additive_projects(prj)
res = []
for p in to_build:
builddeps = racy.rutils.is_true(p.get('BUILDDEPS'))
buildpkg = racy.rutils.is_true(p.get('BUILDPKG'))
opts = ['rc']
opts += ['deps'] if builddeps else []
opts += ['pkg'] if buildpkg else []
res += p.install(opts = opts)
pack = []
#pack = prj.env.Package(
##source=res,
#target="/tmp/"+prj.full_name+".zip",
#PACKAGETYPE='zip',
#NAME=prj.name,
#VERSION=prj.version,
#LICENCE='bsd',
#DESCRIPTION='fwCore lib',
#SUMMARY='f4s basics',
#VENDOR='IRCAD',
#X_MSI_LANGUAGE = '1033',
#)
res = prj.env.Alias(prj.full_name, res+pack)
return res[0]
elif target.name:
msg = 'Unknown target : {trg.name}'.format(trg = target)
racy.print_warning('Unknown target', msg)
except racy.RacyException, e:
racy.manage_exception(e)
exit(1)
|
|
import pandas as pd
from pandas import DataFrame
import math
import numpy as np
from .utils import *
from .functions import *
from .parameters import Parameters
from .records import Records
import copy
all_cols = set()
def add_df(alldfs, df):
for col in df.columns:
if col not in all_cols:
all_cols.add(col)
alldfs.append(df[col])
else:
dup_index = [i for i,
series in enumerate(alldfs) if series.name == col][0]
alldfs[dup_index] = df[col]
def calculator(params, records, mods="", **kwargs):
update_mods = {}
if mods:
if isinstance(mods, str):
import json
dd = json.loads(mods)
dd = {int(k): (np.array(v) if type(v) == list else v)
for k, v in dd.items()}
update_mods.update(dd)
else:
update_mods.update(mods)
final_mods = toolz.merge_with(toolz.merge, update_mods,
{params.current_year: kwargs})
params.implement_reform(final_mods)
if final_mods:
max_yr = max(yr for yr in final_mods)
else:
max_yr = 0
if (params.current_year < max_yr):
msg = ("Modifications are for year {0} and Parameters are for"
" year {1}. Parameters will be advanced to year {0}")
print(msg.format(max_yr, params.current_year))
while params.current_year < max_yr:
params.set_year(params.current_year + 1)
if (records.current_year < max_yr):
msg = ("Modifications are for year {0} and Records are for"
" year {1}. Records will be advanced to year {0}")
print(msg.format(max_yr, records.current_year))
while records.current_year < max_yr:
records.increment_year()
calc = Calculator(params, records)
return calc
class Calculator(object):
def __init__(self, params=None, records=None, sync_years=True, **kwargs):
if isinstance(params, Parameters):
self._params = params
else:
msg = 'Must supply tax parameters as a Parameters object'
raise ValueError(msg)
if isinstance(records, Records):
self._records = records
elif isinstance(records, str):
self._records = Records.from_file(records, **kwargs)
else:
msg = 'Must supply tax records as a file path or Records object'
raise ValueError(msg)
if sync_years and self._records.current_year == 2008:
print("You loaded data for " +
str(self._records.current_year) + '.')
while self._records.current_year < self._params.current_year:
self._records.increment_year()
print("Your data have beeen extrapolated to " +
str(self._records.current_year) + ".")
assert self._params.current_year == self._records.current_year
@property
def params(self):
return self._params
@property
def records(self):
return self._records
def calc_all(self):
FilingStatus(self.params, self.records)
Adj(self.params, self.records)
CapGains(self.params, self.records)
SSBenefits(self.params, self.records)
AGI(self.params, self.records)
ItemDed(self.params, self.records)
EI_FICA(self.params, self.records)
AMED(self.params, self.records)
StdDed(self.params, self.records)
XYZD(self.params, self.records)
NonGain(self.params, self.records)
TaxGains(self.params, self.records)
MUI(self.params, self.records)
AMTI(self.params, self.records)
F2441(self.params, self.records)
DepCareBen(self.params, self.records)
ExpEarnedInc(self.params, self.records)
RateRed(self.params, self.records)
NumDep(self.params, self.records)
ChildTaxCredit(self.params, self.records)
AmOppCr(self.params, self.records)
LLC(self.params, self.records)
RefAmOpp(self.params, self.records)
NonEdCr(self.params, self.records)
AddCTC(self.params, self.records)
F5405(self.params, self.records)
C1040(self.params, self.records)
DEITC(self.params, self.records)
OSPC_TAX(self.params, self.records)
ExpandIncome(self.params, self.records)
def calc_all_test(self):
all_dfs = []
add_df(all_dfs, FilingStatus(self.params, self.records))
add_df(all_dfs, Adj(self.params, self.records))
add_df(all_dfs, CapGains(self.params, self.records))
add_df(all_dfs, SSBenefits(self.params, self.records))
add_df(all_dfs, AGI(self.params, self.records))
add_df(all_dfs, ItemDed(self.params, self.records))
add_df(all_dfs, EI_FICA(self.params, self.records))
add_df(all_dfs, AMED(self.params, self.records))
add_df(all_dfs, StdDed(self.params, self.records))
add_df(all_dfs, XYZD(self.params, self.records))
add_df(all_dfs, NonGain(self.params, self.records))
add_df(all_dfs, TaxGains(self.params, self.records))
add_df(all_dfs, MUI(self.params, self.records))
add_df(all_dfs, AMTI(self.params, self.records))
add_df(all_dfs, F2441(self.params, self.records))
add_df(all_dfs, DepCareBen(self.params, self.records))
add_df(all_dfs, ExpEarnedInc(self.params, self.records))
add_df(all_dfs, RateRed(self.params, self.records))
add_df(all_dfs, NumDep(self.params, self.records))
add_df(all_dfs, ChildTaxCredit(self.params, self.records))
add_df(all_dfs, AmOppCr(self.params, self.records))
add_df(all_dfs, LLC(self.params, self.records))
add_df(all_dfs, RefAmOpp(self.params, self.records))
add_df(all_dfs, NonEdCr(self.params, self.records))
add_df(all_dfs, AddCTC(self.params, self.records))
add_df(all_dfs, F5405(self.params, self.records))
add_df(all_dfs, C1040(self.params, self.records))
add_df(all_dfs, DEITC(self.params, self.records))
add_df(all_dfs, OSPC_TAX(self.params, self.records))
add_df(all_dfs, ExpandIncome(self.params, self.records))
totaldf = pd.concat(all_dfs, axis=1)
return totaldf
def increment_year(self):
self.records.increment_year()
self.params.set_year(self.params.current_year + 1)
@property
def current_year(self):
return self.params.current_year
def mtr(self, income_type_string, diff=100):
"""
This method calculates the marginal tax rate for every record.
In order to avoid kinks, we find the marginal rates associated with
both a tax increase and a tax decrease and use the more modest of
the two.
"""
income_type = getattr(self, income_type_string)
# Calculate the base level of taxes.
self.calc_all()
taxes_base = np.copy(self._ospctax)
# Calculate the tax change with a marginal increase in income.
setattr(self, income_type_string, income_type + diff)
self.calc_all()
delta_taxes_up = self._ospctax - taxes_base
# Calculate the tax change with a marginal decrease in income.
setattr(self, income_type_string, income_type - diff)
self.calc_all()
delta_taxes_down = taxes_base - self._ospctax
# Reset the income_type to its starting point to avoid
# unintended consequences.
setattr(self, income_type_string, income_type)
self.calc_all()
# Choose the more modest effect of either adding or subtracting income
delta_taxes = np.where(np.absolute(delta_taxes_up) <=
np.absolute(delta_taxes_down),
delta_taxes_up, delta_taxes_down)
# Calculate the marginal tax rate
mtr = delta_taxes / diff
return mtr
def diagnostic_table(self, num_years=5):
table = []
row_years = []
calc = copy.deepcopy(self)
for i in range(0, num_years):
calc.calc_all()
row_years.append(calc.params._current_year)
# totoal number of records
returns = calc.records.s006.sum()
# AGI
agi = (calc.records.c00100 * calc.records.s006).sum()
# number of itemizers
ID1 = calc.records.c04470 * calc.records.s006
STD1 = calc.records._standard * calc.records.s006
deduction = np.maximum(calc.records.c04470, calc.records._standard)
# S TD1 = (calc.c04100 + calc.c04200)*calc.s006
NumItemizer1 = (calc.records.s006[(calc.records.c04470 > 0) *
(calc.records.c00100 > 0)].sum())
# itemized deduction
ID = ID1[calc.records.c04470 > 0].sum()
NumSTD = calc.records.s006[(calc.records._standard > 0) *
(calc.records.c00100 > 0)].sum()
# standard deduction
STD = STD1[(calc.records._standard > 0) *
(calc.records.c00100 > 0)].sum()
# personal exemption
PE = (calc.records.c04600 *
calc.records.s006)[calc.records.c00100 > 0].sum()
# taxable income
taxinc = (calc.records.c04800 * calc.records.s006).sum()
# regular tax
regular_tax = (calc.records.c05200 * calc.records.s006).sum()
# AMT income
AMTI = (calc.records.c62100 * calc.records.s006).sum()
# total AMTs
AMT = (calc.records.c09600 * calc.records.s006).sum()
# number of people paying AMT
NumAMT1 = calc.records.s006[calc.records.c09600 > 0].sum()
# tax before credits
tax_bf_credits = (calc.records.c05800 * calc.records.s006).sum()
# tax before nonrefundable credits 09200
tax_bf_nonrefundable = (calc.records.c09200 *
calc.records.s006).sum()
# refundable credits
refundable = (calc.records._refund * calc.records.s006).sum()
# nonrefuncable credits
nonrefundable = (calc.records.c07100 * calc.records.s006).sum()
# ospc_tax
revenue1 = (calc.records._ospctax * calc.records.s006).sum()
table.append([returns / math.pow(10, 6), agi / math.pow(10, 9),
NumItemizer1 / math.pow(10, 6), ID / math.pow(10, 9),
NumSTD / math.pow(10, 6), STD / math.pow(10, 9),
PE / math.pow(10, 9), taxinc / math.pow(10, 9),
regular_tax / math.pow(10, 9),
AMTI / math.pow(10, 9), AMT / math.pow(10, 9),
NumAMT1 / math.pow(10, 6),
tax_bf_credits / math.pow(10, 9),
refundable / math.pow(10, 9),
nonrefundable / math.pow(10, 9),
revenue1 / math.pow(10, 9)])
calc.increment_year()
df = DataFrame(table, row_years,
["Returns (#m)", "AGI ($b)", "Itemizers (#m)",
"Itemized Deduction ($b)",
"Standard Deduction Filers (#m)",
"Standard Deduction ($b)", "Personal Exemption ($b)",
"Taxable income ($b)", "Regular Tax ($b)",
"AMT income ($b)", "AMT amount ($b)",
"AMT number (#m)", "Tax before credits ($b)",
"refundable credits ($b)",
"nonrefundable credits ($b)",
"ospctax ($b)"])
df = df.transpose()
pd.options.display.float_format = '{:8,.1f}'.format
return df
|
|
#!/usr/bin/env python
'''
/**************************************************************************
*
* Copyright 2009 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
'''
VOID, UNSIGNED, SIGNED, FIXED, FLOAT = range(5)
SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_0, SWIZZLE_1, SWIZZLE_NONE, = range(7)
PLAIN = 'plain'
RGB = 'rgb'
SRGB = 'srgb'
YUV = 'yuv'
ZS = 'zs'
def is_pot(x):
return (x & (x - 1)) == 0
VERY_LARGE = 99999999999999999999999
class Channel:
'''Describe the channel of a color channel.'''
def __init__(self, type, norm, pure, size, name = ''):
self.type = type
self.norm = norm
self.pure = pure
self.size = size
self.sign = type in (SIGNED, FIXED, FLOAT)
self.name = name
def __str__(self):
s = str(self.type)
if self.norm:
s += 'n'
if self.pure:
s += 'p'
s += str(self.size)
return s
def __eq__(self, other):
return self.type == other.type and self.norm == other.norm and self.pure == other.pure and self.size == other.size
def max(self):
'''Maximum representable number.'''
if self.type == FLOAT:
return VERY_LARGE
if self.type == FIXED:
return (1 << (self.size/2)) - 1
if self.norm:
return 1
if self.type == UNSIGNED:
return (1 << self.size) - 1
if self.type == SIGNED:
return (1 << (self.size - 1)) - 1
assert False
def min(self):
'''Minimum representable number.'''
if self.type == FLOAT:
return -VERY_LARGE
if self.type == FIXED:
return -(1 << (self.size/2))
if self.type == UNSIGNED:
return 0
if self.norm:
return -1
if self.type == SIGNED:
return -(1 << (self.size - 1))
assert False
class Format:
'''Describe a pixel format.'''
def __init__(self, name, layout, block_width, block_height, channels, swizzles, colorspace):
self.name = name
self.layout = layout
self.block_width = block_width
self.block_height = block_height
self.channels = channels
self.swizzles = swizzles
self.name = name
self.colorspace = colorspace
def __str__(self):
return self.name
def short_name(self):
'''Make up a short norm for a format, suitable to be used as suffix in
function names.'''
name = self.name
if name.startswith('PIPE_FORMAT_'):
name = name[len('PIPE_FORMAT_'):]
name = name.lower()
return name
def block_size(self):
size = 0
for channel in self.channels:
size += channel.size
return size
def nr_channels(self):
nr_channels = 0
for channel in self.channels:
if channel.size:
nr_channels += 1
return nr_channels
def is_array(self):
if self.layout != PLAIN:
return False
ref_channel = self.channels[0]
for channel in self.channels[1:]:
if channel.size and (channel.size != ref_channel.size or channel.size % 8):
return False
return True
def is_mixed(self):
if self.layout != PLAIN:
return False
ref_channel = self.channels[0]
if ref_channel.type == VOID:
ref_channel = self.channels[1]
for channel in self.channels[1:]:
if channel.type != VOID:
if channel.type != ref_channel.type:
return True
if channel.norm != ref_channel.norm:
return True
if channel.pure != ref_channel.pure:
return True
return False
def is_pot(self):
return is_pot(self.block_size())
def is_int(self):
if self.layout != PLAIN:
return False
for channel in self.channels:
if channel.type not in (VOID, UNSIGNED, SIGNED):
return False
return True
def is_float(self):
if self.layout != PLAIN:
return False
for channel in self.channels:
if channel.type not in (VOID, FLOAT):
return False
return True
def is_bitmask(self):
if self.layout != PLAIN:
return False
if self.block_size() not in (8, 16, 32):
return False
for channel in self.channels:
if channel.type not in (VOID, UNSIGNED, SIGNED):
return False
return True
def inv_swizzles(self):
'''Return an array[4] of inverse swizzle terms'''
'''Only pick the first matching value to avoid l8 getting blue and i8 getting alpha'''
inv_swizzle = [None]*4
for i in range(4):
swizzle = self.swizzles[i]
if swizzle < 4 and inv_swizzle[swizzle] == None:
inv_swizzle[swizzle] = i
return inv_swizzle
def stride(self):
return self.block_size()/8
_type_parse_map = {
'': VOID,
'x': VOID,
'u': UNSIGNED,
's': SIGNED,
'h': FIXED,
'f': FLOAT,
}
_swizzle_parse_map = {
'x': SWIZZLE_X,
'y': SWIZZLE_Y,
'z': SWIZZLE_Z,
'w': SWIZZLE_W,
'0': SWIZZLE_0,
'1': SWIZZLE_1,
'_': SWIZZLE_NONE,
}
def parse(filename):
'''Parse the format descrition in CSV format in terms of the
Channel and Format classes above.'''
stream = open(filename)
formats = []
for line in stream:
try:
comment = line.index('#')
except ValueError:
pass
else:
line = line[:comment]
line = line.strip()
if not line:
continue
fields = [field.strip() for field in line.split(',')]
name = fields[0]
layout = fields[1]
block_width, block_height = map(int, fields[2:4])
swizzles = [_swizzle_parse_map[swizzle] for swizzle in fields[8]]
colorspace = fields[9]
if layout == PLAIN:
names = ['']*4
if colorspace in (RGB, SRGB):
for i in range(4):
swizzle = swizzles[i]
if swizzle < 4:
names[swizzle] += 'rgba'[i]
elif colorspace == ZS:
for i in range(4):
swizzle = swizzles[i]
if swizzle < 4:
names[swizzle] += 'zs'[i]
else:
assert False
for i in range(4):
if names[i] == '':
names[i] = 'x'
else:
names = ['x', 'y', 'z', 'w']
channels = []
for i in range(0, 4):
field = fields[4 + i]
if field:
type = _type_parse_map[field[0]]
if field[1] == 'n':
norm = True
pure = False
size = int(field[2:])
elif field[1] == 'p':
pure = True
norm = False
size = int(field[2:])
else:
norm = False
pure = False
size = int(field[1:])
else:
type = VOID
norm = False
pure = False
size = 0
channel = Channel(type, norm, pure, size, names[i])
channels.append(channel)
format = Format(name, layout, block_width, block_height, channels, swizzles, colorspace)
formats.append(format)
return formats
|
|
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2004, 2006 Sean C. Gillies
# Copyright (c) 2007 STFC <http://www.stfc.ac.uk>
#
# Authors :
# Oliver Clements <olcl@pml.ac.uk>
#
# Contact email: olcl@pml.ac.uk
# =============================================================================
# !!! NOTE: Does not conform to new interfaces yet #################
from owslib.coverage.wcsBase import WCSBase, WCSCapabilitiesReader, ServiceException
from owslib.ows import (
OwsCommon,
ServiceIdentification,
ServiceProvider,
OperationsMetadata,
)
from urllib.parse import urlencode
from owslib.util import openURL, testXMLValue
from owslib.etree import etree
from owslib.crs import Crs
import os
import errno
import dateutil.parser as parser
from datetime import timedelta
import logging
from owslib.util import log, datetime_from_ansi, datetime_from_iso, param_list_to_url_string
# function to save writing out WCS namespace in full each time
def ns(tag):
return "{http://www.opengis.net/ows/2.0}" + tag
def nsWCS2(tag):
return "{http://www.opengis.net/wcs/2.0}" + tag
class WebCoverageService_2_0_1(WCSBase):
"""Abstraction for OGC Web Coverage Service (WCS), version 2.0.1
Implements IWebCoverageService.
"""
def __getitem__(self, name):
""" check contents dictionary to allow dict like access to service layers"""
if name in list(self.__getattribute__("contents").keys()):
return self.__getattribute__("contents")[name]
else:
raise KeyError("No content named %s" % name)
def __init__(self, url, xml, cookies, auth=None):
super(WebCoverageService_2_0_1, self).__init__(auth=auth)
self.version = "2.0.1"
self.url = url
self.cookies = cookies
self.ows_common = OwsCommon(version="2.0.1")
# initialize from saved capability document or access the server
reader = WCSCapabilitiesReader(self.version, self.cookies, self.auth)
if xml:
self._capabilities = reader.readString(xml)
else:
self._capabilities = reader.read(self.url)
# check for exceptions
se = self._capabilities.find("ServiceException")
if se is not None:
err_message = str(se.text).strip()
raise ServiceException(err_message, xml)
# serviceIdentification metadata
subelem = self._capabilities.find(ns("ServiceIdentification"))
self.identification = ServiceIdentification(
subelem, namespace=self.ows_common.namespace
)
# serviceProvider metadata
serviceproviderelem = self._capabilities.find(ns("ServiceProvider"))
self.provider = ServiceProvider(
serviceproviderelem, namespace=self.ows_common.namespace
)
# serviceOperations metadata
self.operations = []
for elem in self._capabilities.find(ns("OperationsMetadata"))[:]:
if elem.tag != ns("ExtendedCapabilities"):
self.operations.append(
OperationsMetadata(elem, namespace=self.ows_common.namespace)
)
# serviceContents metadata
self.contents = {}
for elem in self._capabilities.findall(
nsWCS2("Contents/") + nsWCS2("CoverageSummary")
):
cm = ContentMetadata(elem, self)
self.contents[cm.id] = cm
# exceptions
self.exceptions = [
f.text for f in self._capabilities.findall("Capability/Exception/Format")
]
def items(self):
"""supports dict-like items() access"""
items = []
for item in self.contents:
items.append((item, self.contents[item]))
return items
def __makeString(self, value):
# using repr unconditionally breaks things in some circumstances if a value is already a string
if type(value) is not str:
sval = repr(value)
else:
sval = value
return sval
def getCoverage(
self,
identifier=None,
bbox=None,
time=None,
format=None,
subsets=None,
resolutions=None,
sizes=None,
crs=None,
width=None,
height=None,
resx=None,
resy=None,
resz=None,
parameter=None,
method="Get",
**kwargs
):
"""Request and return a coverage from the WCS as a file-like object
note: additional **kwargs helps with multi-version implementation
core keyword arguments should be supported cross version
example:
cvg=wcs.getCoverage(identifier=['TuMYrRQ4'], timeSequence=['2792-06-01T00:00:00.0'], bbox=(-112,36,-106,41),
format='cf-netcdf')
is equivalent to:
http://myhost/mywcs?SERVICE=WCS&REQUEST=GetCoverage&IDENTIFIER=TuMYrRQ4&VERSION=1.1.0&BOUNDINGBOX=-180,-90,180,90&TIME=2792-06-01T00:00:00.0&FORMAT=cf-netcdf
example 2.0.1 URL
http://earthserver.pml.ac.uk/rasdaman/ows?&SERVICE=WCS&VERSION=2.0.1&REQUEST=GetCoverage
&COVERAGEID=V2_monthly_CCI_chlor_a_insitu_test&SUBSET=Lat(40,50)&SUBSET=Long(-10,0)&SUBSET=ansi(144883,145000)&FORMAT=application/netcdf
cvg=wcs.getCoverage(identifier=['myID'], format='application/netcdf', subsets=[('axisName',min,max),
('axisName',min,max),('axisName',min,max)])
"""
if log.isEnabledFor(logging.DEBUG):
log.debug(
"WCS 2.0.1 DEBUG: Parameters passed to GetCoverage: identifier=%s, bbox=%s, time=%s, format=%s, crs=%s, width=%s, height=%s, resx=%s, resy=%s, resz=%s, parameter=%s, method=%s, other_arguments=%s" # noqa
% (
identifier,
bbox,
time,
format,
crs,
width,
height,
resx,
resy,
resz,
parameter,
method,
str(kwargs),
)
)
try:
base_url = next(
(
m.get("url")
for m in self.getOperationByName("GetCoverage").methods
if m.get("type").lower() == method.lower()
)
)
except StopIteration:
base_url = self.url
log.debug("WCS 2.0.1 DEBUG: base url of server: %s" % base_url)
request = {"version": self.version, "request": "GetCoverage", "service": "WCS"}
assert len(identifier) > 0
request["CoverageID"] = identifier[0]
if crs:
request["crs"] = crs
request["format"] = format
if width:
request["width"] = width
if height:
request["height"] = height
# anything else e.g. vendor specific parameters must go through kwargs
if kwargs:
for kw in kwargs:
request[kw] = kwargs[kw]
# encode and request
data = urlencode(request)
if subsets:
data += param_list_to_url_string(subsets, 'subset')
if resolutions:
log.debug('Adding vendor-specific RESOLUTION parameter.')
data += param_list_to_url_string(resolutions, 'resolution')
if sizes:
log.debug('Adding vendor-specific SIZE parameter.')
data += param_list_to_url_string(sizes, 'size')
log.debug("WCS 2.0.1 DEBUG: Second part of URL: %s" % data)
u = openURL(base_url, data, method, self.cookies, auth=self.auth)
return u
def is_number(self, s):
"""simple helper to test if value is number as requests with numbers dont
need quote marks
"""
try:
float(s)
return True
except ValueError:
return False
def getOperationByName(self, name):
"""Return a named operation item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError("No operation named %s" % name)
class ContentMetadata(object):
"""
Implements IContentMetadata
"""
def __init__(self, elem, service):
"""Initialize. service is required so that describeCoverage requests may be made"""
# TODO - examine the parent for bounding box info.
self._elem = elem
self._service = service
self.id = elem.find(nsWCS2("CoverageId")).text
self.title = testXMLValue(elem.find(ns("label")))
self.abstract = testXMLValue(elem.find(ns("description")))
self.keywords = [
f.text for f in elem.findall(ns("keywords") + "/" + ns("keyword"))
]
self.boundingBox = None # needed for iContentMetadata harmonisation
self.boundingBoxWGS84 = None
b = elem.find(ns("lonLatEnvelope"))
if b is not None:
gmlpositions = b.findall("{http://www.opengis.net/gml}pos")
lc = gmlpositions[0].text
uc = gmlpositions[1].text
self.boundingBoxWGS84 = (
float(lc.split()[0]),
float(lc.split()[1]),
float(uc.split()[0]),
float(uc.split()[1]),
)
# others not used but needed for iContentMetadata harmonisation
self.styles = None
self.crsOptions = None
self.defaulttimeposition = None
# grid is either a gml:Grid or a gml:RectifiedGrid if supplied as part of the DescribeCoverage response.
def _getGrid(self):
if not hasattr(self, "descCov"):
self.descCov = self._service.getDescribeCoverage(self.id)
gridelem = self.descCov.find(
nsWCS2("CoverageDescription/") + "{http://www.opengis.net/gml/3.2}domainSet/" + "{http://www.opengis.net/gml/3.3/rgrid}ReferenceableGridByVectors" # noqa
)
if gridelem is not None:
grid = ReferenceableGridByVectors(gridelem)
else:
# HERE I LOOK FOR RECTIFIEDGRID
gridelem = self.descCov.find(
nsWCS2("CoverageDescription/") + "{http://www.opengis.net/gml/3.2}domainSet/" + "{http://www.opengis.net/gml/3.2}RectifiedGrid" # noqa
)
grid = RectifiedGrid(gridelem)
return grid
grid = property(_getGrid, None)
# timelimits are the start/end times, timepositions are all timepoints. WCS servers can declare one
# or both or neither of these.
# in wcs 2.0 this can be gathered from the Envelope tag
def _getTimeLimits(self):
# timepoints, timelimits=[],[]
# b=self._elem.find(ns('lonLatEnvelope'))
# if b is not None:
# timepoints=b.findall('{http://www.opengis.net/gml}timePosition')
# else:
# #have to make a describeCoverage request...
# if not hasattr(self, 'descCov'):
# self.descCov=self._service.getDescribeCoverage(self.id)
# for pos in self.descCov.findall(
# ns('CoverageOffering/')+ns('domainSet/')+ns('temporalDomain/')+'{http://www.opengis.net/gml}timePosition'):
# timepoints.append(pos)
# if timepoints:
# timelimits=[timepoints[0].text,timepoints[1].text]
return [self.timepositions[0], self.timepositions[-1]]
timelimits = property(_getTimeLimits, None)
def _getTimePositions(self):
timepositions = []
if not hasattr(self, "descCov"):
self.descCov = self._service.getDescribeCoverage(self.id)
gridelem = self.descCov.find(
nsWCS2("CoverageDescription/") + "{http://www.opengis.net/gml/3.2}domainSet/" + "{http://www.opengis.net/gml/3.3/rgrid}ReferenceableGridByVectors" # noqa
)
if gridelem is not None:
# irregular time axis
cooeficients = []
grid_axes = gridelem.findall(
"{http://www.opengis.net/gml/3.3/rgrid}generalGridAxis"
)
for elem in grid_axes:
if elem.find(
"{http://www.opengis.net/gml/3.3/rgrid}GeneralGridAxis/{http://www.opengis.net/gml/3.3/rgrid}gridAxesSpanned" # noqa
).text in ["ansi", "unix"]:
cooeficients = elem.find(
"{http://www.opengis.net/gml/3.3/rgrid}GeneralGridAxis/{http://www.opengis.net/gml/3.3/rgrid}coefficients" # noqa
).text.split(" ")
for x in cooeficients:
x = x.replace('"', "")
t_date = datetime_from_iso(x)
timepositions.append(t_date)
else:
# regular time
if len(self.grid.origin) > 2:
t_grid = self.grid
t_date = t_grid.origin[2]
start_pos = parser.parse(t_date, fuzzy=True)
step = float(t_grid.offsetvectors[2][2])
start_pos = start_pos + timedelta(days=(step / 2))
no_steps = int(t_grid.highlimits[2])
for x in range(no_steps):
t_pos = start_pos + timedelta(days=(step * x))
# t_date = datetime_from_ansi(t_pos)
# t_date = t_pos.isoformat()
timepositions.append(t_pos)
else:
# no time axis
timepositions = None
return timepositions
timepositions = property(_getTimePositions, None)
def _getOtherBoundingBoxes(self):
""" incomplete, should return other bounding boxes not in WGS84
#TODO: find any other bounding boxes. Need to check for gml:EnvelopeWithTimePeriod."""
bboxes = []
if not hasattr(self, "descCov"):
self.descCov = self._service.getDescribeCoverage(self.id)
for envelope in self.descCov.findall(
nsWCS2("CoverageDescription/") + "{http://www.opengis.net/gml/3.2}boundedBy/" + "{http://www.opengis.net/gml/3.2}Envelope" # noqa
):
bbox = {}
bbox["nativeSrs"] = envelope.attrib["srsName"]
lc = envelope.find("{http://www.opengis.net/gml/3.2}lowerCorner")
lc = lc.text.split()
uc = envelope.find("{http://www.opengis.net/gml/3.2}upperCorner")
uc = uc.text.split()
bbox["bbox"] = (float(lc[0]), float(lc[1]), float(uc[0]), float(uc[1]))
bboxes.append(bbox)
return bboxes
boundingboxes = property(_getOtherBoundingBoxes, None)
def _getSupportedCRSProperty(self):
# gets supported crs info
crss = []
for elem in self._service.getDescribeCoverage(self.id).findall(
ns("CoverageOffering/") + ns("supportedCRSs/") + ns("responseCRSs")
):
for crs in elem.text.split(" "):
crss.append(Crs(crs))
for elem in self._service.getDescribeCoverage(self.id).findall(
ns("CoverageOffering/") + ns("supportedCRSs/") + ns("requestResponseCRSs")
):
for crs in elem.text.split(" "):
crss.append(Crs(crs))
for elem in self._service.getDescribeCoverage(self.id).findall(
ns("CoverageOffering/") + ns("supportedCRSs/") + ns("nativeCRSs")
):
for crs in elem.text.split(" "):
crss.append(Crs(crs))
return crss
supportedCRS = property(_getSupportedCRSProperty, None)
def _getSupportedFormatsProperty(self):
# gets supported formats info
frmts = []
for elem in self._service._capabilities.findall(
nsWCS2("ServiceMetadata/") + nsWCS2("formatSupported")
):
frmts.append(elem.text)
return frmts
supportedFormats = property(_getSupportedFormatsProperty, None)
def _getAxisDescriptionsProperty(self):
# gets any axis descriptions contained in the rangeset (requires a DescribeCoverage call to server).
axisDescs = []
for elem in self._service.getDescribeCoverage(self.id).findall(
ns("CoverageOffering/") + ns("rangeSet/") + ns("RangeSet/") + ns("axisDescription/") + ns("AxisDescription")
):
axisDescs.append(
AxisDescription(elem)
) # create a 'AxisDescription' object.
return axisDescs
axisDescriptions = property(_getAxisDescriptionsProperty, None)
# Adding classes to represent gml:grid and gml:rectifiedgrid. One of these is used for the cvg.grid property
# (where cvg is a member of the contents dictionary)
# There is no simple way to convert the offset values in a rectifiedgrid grid to real values without CRS understanding,
# therefore this is beyond the current scope of owslib, so the representation here is purely to provide
# access to the information in the GML.
class Grid(object):
""" Simple grid class to provide axis and value information for a gml grid """
def __init__(self, grid):
self.axislabels = []
self.dimension = None
self.lowlimits = []
self.highlimits = []
if grid is not None:
self.dimension = int(grid.get("dimension"))
self.lowlimits = grid.find(
"{http://www.opengis.net/gml/3.2}limits/{http://www.opengis.net/gml/3.2}GridEnvelope/{http://www.opengis.net/gml/3.2}low" # noqa
).text.split(" ")
self.highlimits = grid.find(
"{http://www.opengis.net/gml/3.2}limits/{http://www.opengis.net/gml/3.2}GridEnvelope/{http://www.opengis.net/gml/3.2}high" # noqa
).text.split(" ")
for axis in grid.findall("{http://www.opengis.net/gml/3.2}axisLabels")[
0
].text.split(" "):
self.axislabels.append(axis)
class RectifiedGrid(Grid):
""" RectifiedGrid class, extends Grid with additional offset vector information """
def __init__(self, rectifiedgrid):
super(RectifiedGrid, self).__init__(rectifiedgrid)
self.origin = rectifiedgrid.find(
"{http://www.opengis.net/gml/3.2}origin/{http://www.opengis.net/gml/3.2}Point/{http://www.opengis.net/gml/3.2}pos" # noqa
).text.split()
self.offsetvectors = []
for offset in rectifiedgrid.findall(
"{http://www.opengis.net/gml/3.2}offsetVector"
):
self.offsetvectors.append(offset.text.split())
class ReferenceableGridByVectors(Grid):
""" ReferenceableGridByVectors class, extends Grid with additional vector information """
def __init__(self, refereceablegridbyvectors):
super(ReferenceableGridByVectors, self).__init__(refereceablegridbyvectors)
self.origin = refereceablegridbyvectors.find(
"{http://www.opengis.net/gml/3.3/rgrid}origin/{http://www.opengis.net/gml/3.2}Point/{http://www.opengis.net/gml/3.2}pos" # noqa
).text.split()
self.offsetvectors = []
for offset in refereceablegridbyvectors.findall(
"{http://www.opengis.net/gml/3.3/rgrid}generalGridAxis/{http://www.opengis.net/gml/3.3/rgrid}GeneralGridAxis/{http://www.opengis.net/gml/3.3/rgrid}offsetVector" # noqa
):
self.offsetvectors.append(offset.text.split())
class AxisDescription(object):
""" Class to represent the AxisDescription element optionally found as part of the RangeSet and used to
define ordinates of additional dimensions such as wavelength bands or pressure levels"""
def __init__(self, axisdescElem):
self.name = self.label = None
self.values = []
for elem in axisdescElem.getchildren():
if elem.tag == ns("name"):
self.name = elem.text
elif elem.tag == ns("label"):
self.label = elem.text
elif elem.tag == ns("values"):
for child in elem.getchildren():
self.values.append(child.text)
|
|
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/IDLTests.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import unittest
import sys
import os
import os.path
import TestCmd
import TestUnit
import SCons.Scanner.IDL
import SCons.Node.FS
import SCons.Warnings
test = TestCmd.TestCmd(workdir = '')
os.chdir(test.workpath(''))
# create some source files and headers:
test.write('t1.idl','''
#include "f1.idl"
#include <f2.idl>
import "f3.idl";
[
object,
uuid(22995106-CE26-4561-AF1B-C71C6934B840),
dual,
helpstring("IBarObject Interface"),
pointer_default(unique)
]
interface IBarObject : IDispatch
{
};
''')
test.write('t2.idl',"""
#include \"d1/f1.idl\"
#include <d2/f1.idl>
#include \"f1.idl\"
import <f3.idl>;
[
object,
uuid(22995106-CE26-4561-AF1B-C71C6934B840),
dual,
helpstring(\"IBarObject Interface\"),
pointer_default(unique)
]
interface IBarObject : IDispatch
{
};
""")
test.write('t3.idl',"""
#include \t \"f1.idl\"
\t #include \"f2.idl\"
# \t include \"f3-test.idl\"
#include \t <d1/f1.idl>
\t #include <d1/f2.idl>
# \t include <d1/f3-test.idl>
import \t \"d1/f1.idl\"
\t import \"d1/f2.idl\"
include \t \"never.idl\"
\t include \"never.idl\"
// #include \"never.idl\"
const char* x = \"#include <never.idl>\"
[
object,
uuid(22995106-CE26-4561-AF1B-C71C6934B840),
dual,
helpstring(\"IBarObject Interface\"),
pointer_default(unique)
]
interface IBarObject : IDispatch
{
};
""")
test.subdir('d1', ['d1', 'd2'])
headers = ['f1.idl','f2.idl', 'f3.idl', 'f3-test.idl', 'fi.idl', 'fj.idl', 'never.idl',
'd1/f1.idl', 'd1/f2.idl', 'd1/f3-test.idl', 'd1/fi.idl', 'd1/fj.idl',
'd1/d2/f1.idl', 'd1/d2/f2.idl', 'd1/d2/f3-test.idl',
'd1/d2/f4.idl', 'd1/d2/fi.idl', 'd1/d2/fj.idl']
for h in headers:
test.write(h, " ")
test.write('f2.idl',"""
#include "fi.idl"
""")
test.write('f3-test.idl',"""
#include <fj.idl>
""")
test.subdir('include', 'subdir', ['subdir', 'include'])
test.write('t4.idl',"""
#include \"fa.idl\"
#include <fb.idl>
[
object,
uuid(22995106-CE26-4561-AF1B-C71C6934B840),
dual,
helpstring(\"IBarObject Interface\"),
pointer_default(unique)
]
interface IBarObject : IDispatch
{
};
""")
test.write(['include', 'fa.idl'], "\n")
test.write(['include', 'fb.idl'], "\n")
test.write(['subdir', 'include', 'fa.idl'], "\n")
test.write(['subdir', 'include', 'fb.idl'], "\n")
test.subdir('repository', ['repository', 'include'],
['repository', 'src' ])
test.subdir('work', ['work', 'src'])
test.write(['repository', 'include', 'iii.idl'], "\n")
test.write(['work', 'src', 'fff.c'], """
#include <iii.idl>
#include <jjj.idl>
int main()
{
return 0;
}
""")
test.write([ 'work', 'src', 'aaa.c'], """
#include "bbb.idl"
int main()
{
return 0;
}
""")
test.write([ 'work', 'src', 'bbb.idl'], "\n")
test.write([ 'repository', 'src', 'ccc.c'], """
#include "ddd.idl"
int main()
{
return 0;
}
""")
test.write([ 'repository', 'src', 'ddd.idl'], "\n")
# define some helpers:
class DummyEnvironment(object):
def __init__(self, listCppPath):
self.path = listCppPath
self.fs = SCons.Node.FS.FS(test.workpath(''))
def Dictionary(self, *args):
if not args:
return { 'CPPPATH': self.path }
elif len(args) == 1 and args[0] == 'CPPPATH':
return self.path
else:
raise KeyError("Dummy environment only has CPPPATH attribute.")
def subst(self, arg, target=None, source=None, conv=None):
return arg
def subst_path(self, path, target=None, source=None, conv=None):
if not isinstance(path, list):
path = [path]
return list(map(self.subst, path))
def has_key(self, key):
return key in self.Dictionary()
def __getitem__(self,key):
return self.Dictionary()[key]
def __setitem__(self,key,value):
self.Dictionary()[key] = value
def __delitem__(self,key):
del self.Dictionary()[key]
def get_calculator(self):
return None
def get_factory(self, factory):
return factory or self.fs.File
def Dir(self, filename):
return self.fs.Dir(filename)
def File(self, filename):
return self.fs.File(filename)
global my_normpath
my_normpath = os.path.normpath
if os.path.normcase('foo') == os.path.normcase('FOO'):
my_normpath = os.path.normcase
def deps_match(self, deps, headers):
scanned = list(map(my_normpath, list(map(str, deps))))
expect = list(map(my_normpath, headers))
self.failUnless(scanned == expect, "expect %s != scanned %s" % (expect, scanned))
# define some tests:
class IDLScannerTestCase1(unittest.TestCase):
def runTest(self):
env = DummyEnvironment([])
s = SCons.Scanner.IDL.IDLScan()
path = s.path(env)
deps = s(env.File('t1.idl'), env, path)
headers = ['f1.idl', 'f3.idl', 'f2.idl']
deps_match(self, deps, headers)
class IDLScannerTestCase2(unittest.TestCase):
def runTest(self):
env = DummyEnvironment([test.workpath("d1")])
s = SCons.Scanner.IDL.IDLScan()
path = s.path(env)
deps = s(env.File('t1.idl'), env, path)
headers = ['f1.idl', 'f3.idl', 'd1/f2.idl']
deps_match(self, deps, headers)
class IDLScannerTestCase3(unittest.TestCase):
def runTest(self):
env = DummyEnvironment([test.workpath("d1")])
s = SCons.Scanner.IDL.IDLScan()
path = s.path(env)
deps = s(env.File('t2.idl'), env, path)
headers = ['d1/f1.idl', 'f1.idl', 'd1/d2/f1.idl', 'f3.idl']
deps_match(self, deps, headers)
class IDLScannerTestCase4(unittest.TestCase):
def runTest(self):
env = DummyEnvironment([test.workpath("d1"), test.workpath("d1/d2")])
s = SCons.Scanner.IDL.IDLScan()
path = s.path(env)
deps = s(env.File('t2.idl'), env, path)
headers = ['d1/f1.idl', 'f1.idl', 'd1/d2/f1.idl', 'f3.idl']
deps_match(self, deps, headers)
class IDLScannerTestCase5(unittest.TestCase):
def runTest(self):
env = DummyEnvironment([])
s = SCons.Scanner.IDL.IDLScan()
path = s.path(env)
n = env.File('t3.idl')
def my_rexists(s):
s.Tag('rexists_called', 1)
return SCons.Node._rexists_map[s.GetTag('old_rexists')](s)
n.Tag('old_rexists', n._func_rexists)
SCons.Node._rexists_map[3] = my_rexists
n._func_rexists = 3
deps = s(n, env, path)
# Make sure rexists() got called on the file node being
# scanned, essential for cooperation with VariantDir functionality.
assert n.GetTag('rexists_called')
headers = ['d1/f1.idl', 'd1/f2.idl',
'f1.idl', 'f2.idl', 'f3-test.idl',
'd1/f1.idl', 'd1/f2.idl', 'd1/f3-test.idl']
deps_match(self, deps, headers)
class IDLScannerTestCase6(unittest.TestCase):
def runTest(self):
env1 = DummyEnvironment([test.workpath("d1")])
env2 = DummyEnvironment([test.workpath("d1/d2")])
s = SCons.Scanner.IDL.IDLScan()
path1 = s.path(env1)
path2 = s.path(env2)
deps1 = s(env1.File('t1.idl'), env1, path1)
deps2 = s(env2.File('t1.idl'), env2, path2)
headers1 = ['f1.idl', 'f3.idl', 'd1/f2.idl']
headers2 = ['f1.idl', 'f3.idl', 'd1/d2/f2.idl']
deps_match(self, deps1, headers1)
deps_match(self, deps2, headers2)
class IDLScannerTestCase7(unittest.TestCase):
def runTest(self):
env = DummyEnvironment(["include"])
s = SCons.Scanner.IDL.IDLScan()
path = s.path(env)
deps1 = s(env.File('t4.idl'), env, path)
env.fs.chdir(env.Dir('subdir'))
dir = env.fs.getcwd()
env.fs.chdir(env.Dir(''))
path = s.path(env, dir)
deps2 = s(env.File('#t4.idl'), env, path)
headers1 = list(map(test.workpath, ['include/fa.idl', 'include/fb.idl']))
headers2 = ['include/fa.idl', 'include/fb.idl']
deps_match(self, deps1, headers1)
deps_match(self, deps2, headers2)
class IDLScannerTestCase8(unittest.TestCase):
def runTest(self):
SCons.Warnings.enableWarningClass(SCons.Warnings.DependencyWarning)
class TestOut(object):
def __call__(self, x):
self.out = x
to = TestOut()
to.out = None
SCons.Warnings._warningOut = to
test.write('fa.idl','\n')
env = DummyEnvironment([])
s = SCons.Scanner.IDL.IDLScan()
path = s.path(env)
deps = s(env.File('t4.idl'), env, path)
# Did we catch the warning associated with not finding fb.idl?
assert to.out
deps_match(self, deps, [ 'fa.idl' ])
test.unlink('fa.idl')
class IDLScannerTestCase9(unittest.TestCase):
def runTest(self):
env = DummyEnvironment([])
env.fs.chdir(env.Dir('include'))
s = SCons.Scanner.IDL.IDLScan()
path = s.path(env)
test.write('include/t4.idl', test.read('t4.idl'))
deps = s(env.File('#include/t4.idl'), env, path)
env.fs.chdir(env.Dir(''))
deps_match(self, deps, [ 'fa.idl', 'fb.idl' ])
test.unlink('include/t4.idl')
class IDLScannerTestCase10(unittest.TestCase):
def runTest(self):
os.chdir(test.workpath('work'))
fs = SCons.Node.FS.FS(test.workpath('work'))
fs.Repository(test.workpath('repository'))
# Create a derived file in a directory that does not exist yet.
# This was a bug at one time.
env = DummyEnvironment(['include', 'include2'])
env.fs = fs
f1 = fs.File('include2/jjj.idl')
f1.builder = 1
s = SCons.Scanner.IDL.IDLScan()
path = s.path(env)
deps = s(fs.File('src/fff.c'), env, path)
deps_match(self, deps, [ test.workpath('repository/include/iii.idl'),
'include2/jjj.idl' ])
os.chdir(test.workpath(''))
class IDLScannerTestCase11(unittest.TestCase):
def runTest(self):
os.chdir(test.workpath('work'))
fs = SCons.Node.FS.FS(test.workpath('work'))
fs.VariantDir('build1', 'src', 1)
fs.VariantDir('build2', 'src', 0)
fs.Repository(test.workpath('repository'))
env = DummyEnvironment([])
env.fs = fs
s = SCons.Scanner.IDL.IDLScan()
path = s.path(env)
deps1 = s(fs.File('build1/aaa.c'), env, path)
deps_match(self, deps1, [ 'build1/bbb.idl' ])
deps2 = s(fs.File('build2/aaa.c'), env, path)
deps_match(self, deps2, [ 'src/bbb.idl' ])
deps3 = s(fs.File('build1/ccc.c'), env, path)
deps_match(self, deps3, [ 'build1/ddd.idl' ])
deps4 = s(fs.File('build2/ccc.c'), env, path)
deps_match(self, deps4, [ test.workpath('repository/src/ddd.idl') ])
os.chdir(test.workpath(''))
class IDLScannerTestCase12(unittest.TestCase):
def runTest(self):
class SubstEnvironment(DummyEnvironment):
def subst(self, arg, target=None, source=None, conv=None, test=test):
if arg == "$blah":
return test.workpath("d1")
else:
return arg
env = SubstEnvironment(["$blah"])
s = SCons.Scanner.IDL.IDLScan()
path = s.path(env)
deps = s(env.File('t1.idl'), env, path)
headers = ['f1.idl', 'f3.idl', 'd1/f2.idl']
deps_match(self, deps, headers)
def suite():
suite = unittest.TestSuite()
suite.addTest(IDLScannerTestCase1())
suite.addTest(IDLScannerTestCase2())
suite.addTest(IDLScannerTestCase3())
suite.addTest(IDLScannerTestCase4())
suite.addTest(IDLScannerTestCase5())
suite.addTest(IDLScannerTestCase6())
suite.addTest(IDLScannerTestCase7())
suite.addTest(IDLScannerTestCase8())
suite.addTest(IDLScannerTestCase9())
suite.addTest(IDLScannerTestCase10())
suite.addTest(IDLScannerTestCase11())
suite.addTest(IDLScannerTestCase12())
return suite
if __name__ == "__main__":
TestUnit.run(suite())
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.compat
import os
import sys
import unittest
from collections import UserDict
import SCons.Errors
from SCons.Subst import *
class DummyNode(object):
"""Simple node work-alike."""
def __init__(self, name):
self.name = os.path.normpath(name)
def __str__(self):
return self.name
def is_literal(self):
return 1
def rfile(self):
return self
def get_subst_proxy(self):
return self
class DummyEnv(object):
def __init__(self, dict={}):
self.dict = dict
def Dictionary(self, key = None):
if not key:
return self.dict
return self.dict[key]
def __getitem__(self, key):
return self.dict[key]
def get(self, key, default):
return self.dict.get(key, default)
def sig_dict(self):
dict = self.dict.copy()
dict["TARGETS"] = 'tsig'
dict["SOURCES"] = 'ssig'
return dict
def cs(target=None, source=None, env=None, for_signature=None):
return 'cs'
def cl(target=None, source=None, env=None, for_signature=None):
return ['cl']
def CmdGen1(target, source, env, for_signature):
# Nifty trick...since Environment references are interpolated,
# instantiate an instance of a callable class with this one,
# which will then get evaluated.
assert str(target) == 't', target
assert str(source) == 's', source
return "${CMDGEN2('foo', %d)}" % for_signature
class CmdGen2(object):
def __init__(self, mystr, forsig):
self.mystr = mystr
self.expect_for_signature = forsig
def __call__(self, target, source, env, for_signature):
assert str(target) == 't', target
assert str(source) == 's', source
assert for_signature == self.expect_for_signature, for_signature
return [ self.mystr, env.Dictionary('BAR') ]
if os.sep == '/':
def cvt(str):
return str
else:
def cvt(str):
return str.replace('/', os.sep)
class SubstTestCase(unittest.TestCase):
class MyNode(DummyNode):
"""Simple node work-alike with some extra stuff for testing."""
def __init__(self, name):
DummyNode.__init__(self, name)
class Attribute(object):
pass
self.attribute = Attribute()
self.attribute.attr1 = 'attr$1-' + os.path.basename(name)
self.attribute.attr2 = 'attr$2-' + os.path.basename(name)
def get_stuff(self, extra):
return self.name + extra
foo = 1
class TestLiteral(object):
def __init__(self, literal):
self.literal = literal
def __str__(self):
return self.literal
def is_literal(self):
return 1
class TestCallable(object):
def __init__(self, value):
self.value = value
def __call__(self):
pass
def __str__(self):
return self.value
def function_foo(arg):
pass
target = [ MyNode("./foo/bar.exe"),
MyNode("/bar/baz with spaces.obj"),
MyNode("../foo/baz.obj") ]
source = [ MyNode("./foo/blah with spaces.cpp"),
MyNode("/bar/ack.cpp"),
MyNode("../foo/ack.c") ]
callable_object_1 = TestCallable('callable-1')
callable_object_2 = TestCallable('callable-2')
def _defines(defs):
l = []
for d in defs:
if SCons.Util.is_List(d) or isinstance(d, tuple):
l.append(str(d[0]) + '=' + str(d[1]))
else:
l.append(str(d))
return l
loc = {
'xxx' : None,
'NEWLINE' : 'before\nafter',
'null' : '',
'zero' : 0,
'one' : 1,
'BAZ' : 'baz',
'ONE' : '$TWO',
'TWO' : '$THREE',
'THREE' : 'four',
'AAA' : 'a',
'BBB' : 'b',
'CCC' : 'c',
'DO' : DummyNode('do something'),
'FOO' : DummyNode('foo.in'),
'BAR' : DummyNode('bar with spaces.out'),
'CRAZY' : DummyNode('crazy\nfile.in'),
# $XXX$HHH should expand to GGGIII, not BADNEWS.
'XXX' : '$FFF',
'FFF' : 'GGG',
'HHH' : 'III',
'FFFIII' : 'BADNEWS',
'LITERAL' : TestLiteral("$XXX"),
# Test that we can expand to and return a function.
#'FUNCTION' : function_foo,
'CMDGEN1' : CmdGen1,
'CMDGEN2' : CmdGen2,
'LITERALS' : [ Literal('foo\nwith\nnewlines'),
Literal('bar\nwith\nnewlines') ],
'NOTHING' : "",
'NONE' : None,
# Test various combinations of strings, lists and functions.
'N' : None,
'X' : 'x',
'Y' : '$X',
'R' : '$R',
'S' : 'x y',
'LS' : ['x y'],
'L' : ['x', 'y'],
'TS' : ('x y'),
'T' : ('x', 'y'),
'CS' : cs,
'CL' : cl,
'US' : collections.UserString('us'),
# Test function calls within ${}.
'FUNCCALL' : '${FUNC1("$AAA $FUNC2 $BBB")}',
'FUNC1' : lambda x: x,
'FUNC2' : lambda target, source, env, for_signature: ['x$CCC'],
# Various tests refactored from ActionTests.py.
'LIST' : [["This", "is", "$(", "$a", "$)", "test"]],
# Test recursion.
'RECURSE' : 'foo $RECURSE bar',
'RRR' : 'foo $SSS bar',
'SSS' : '$RRR',
# Test callables that don't match the calling arguments.
'CALLABLE1' : callable_object_1,
'CALLABLE2' : callable_object_2,
'_defines' : _defines,
'DEFS' : [ ('Q1', '"q1"'), ('Q2', '"$AAA"') ],
}
def basic_comparisons(self, function, convert):
env = DummyEnv(self.loc)
cases = self.basic_cases[:]
kwargs = {'target' : self.target, 'source' : self.source,
'gvars' : env.Dictionary()}
failed = 0
while cases:
input, expect = cases[:2]
expect = convert(expect)
try:
result = function(input, env, **kwargs)
except Exception, e:
fmt = " input %s generated %s (%s)"
print fmt % (repr(input), e.__class__.__name__, repr(e))
failed = failed + 1
else:
if result != expect:
if failed == 0: print
print " input %s => %s did not match %s" % (repr(input), repr(result), repr(expect))
failed = failed + 1
del cases[:2]
fmt = "%d %s() cases failed"
assert failed == 0, fmt % (failed, function.__name__)
class scons_subst_TestCase(SubstTestCase):
# Basic tests of substitution functionality.
basic_cases = [
# Basics: strings without expansions are left alone, and
# the simplest possible expansion to a null-string value.
"test", "test",
"$null", "",
# Test expansion of integer values.
"test $zero", "test 0",
"test $one", "test 1",
# Test multiple re-expansion of values.
"test $ONE", "test four",
# Test a whole bunch of $TARGET[S] and $SOURCE[S] expansions.
"test $TARGETS $SOURCES",
"test foo/bar.exe /bar/baz with spaces.obj ../foo/baz.obj foo/blah with spaces.cpp /bar/ack.cpp ../foo/ack.c",
"test ${TARGETS[:]} ${SOURCES[0]}",
"test foo/bar.exe /bar/baz with spaces.obj ../foo/baz.obj foo/blah with spaces.cpp",
"test ${TARGETS[1:]}v",
"test /bar/baz with spaces.obj ../foo/baz.objv",
"test $TARGET",
"test foo/bar.exe",
"test $TARGET$NO_SUCH_VAR[0]",
"test foo/bar.exe[0]",
"test $TARGETS.foo",
"test 1 1 1",
"test ${SOURCES[0:2].foo}",
"test 1 1",
"test $SOURCE.foo",
"test 1",
"test ${TARGET.get_stuff('blah')}",
"test foo/bar.exeblah",
"test ${SOURCES.get_stuff('blah')}",
"test foo/blah with spaces.cppblah /bar/ack.cppblah ../foo/ack.cblah",
"test ${SOURCES[0:2].get_stuff('blah')}",
"test foo/blah with spaces.cppblah /bar/ack.cppblah",
"test ${SOURCES[0:2].get_stuff('blah')}",
"test foo/blah with spaces.cppblah /bar/ack.cppblah",
"test ${SOURCES.attribute.attr1}",
"test attr$1-blah with spaces.cpp attr$1-ack.cpp attr$1-ack.c",
"test ${SOURCES.attribute.attr2}",
"test attr$2-blah with spaces.cpp attr$2-ack.cpp attr$2-ack.c",
# Test adjacent expansions.
"foo$BAZ",
"foobaz",
"foo${BAZ}",
"foobaz",
# Test that adjacent expansions don't get re-interpreted
# together. The correct disambiguated expansion should be:
# $XXX$HHH => ${FFF}III => GGGIII
# not:
# $XXX$HHH => ${FFFIII} => BADNEWS
"$XXX$HHH", "GGGIII",
# Test double-dollar-sign behavior.
"$$FFF$HHH", "$FFFIII",
# Test that a Literal will stop dollar-sign substitution.
"$XXX $LITERAL $FFF", "GGG $XXX GGG",
# Test that we don't blow up even if they subscript
# something in ways they "can't."
"${FFF[0]}", "G",
"${FFF[7]}", "",
"${NOTHING[1]}", "",
# Test various combinations of strings and lists.
#None, '',
'', '',
'x', 'x',
'x y', 'x y',
'$N', '',
'$X', 'x',
'$Y', 'x',
'$R', '',
'$S', 'x y',
'$LS', 'x y',
'$L', 'x y',
'$TS', 'x y',
'$T', 'x y',
'$S z', 'x y z',
'$LS z', 'x y z',
'$L z', 'x y z',
'$TS z', 'x y z',
'$T z', 'x y z',
#cs, 'cs',
#cl, 'cl',
'$CS', 'cs',
'$CL', 'cl',
# Various uses of UserString.
collections.UserString('x'), 'x',
collections.UserString('$X'), 'x',
collections.UserString('$US'), 'us',
'$US', 'us',
# Test function calls within ${}.
'$FUNCCALL', 'a xc b',
# Bug reported by Christoph Wiedemann.
cvt('$xxx/bin'), '/bin',
# Tests callables that don't match our calling arguments.
'$CALLABLE1', 'callable-1',
# Test handling of quotes.
'aaa "bbb ccc" ddd', 'aaa "bbb ccc" ddd',
]
def test_scons_subst(self):
"""Test scons_subst(): basic substitution"""
return self.basic_comparisons(scons_subst, cvt)
subst_cases = [
"test $xxx",
"test ",
"test",
"test",
"test $($xxx$)",
"test $($)",
"test",
"test",
"test $( $xxx $)",
"test $( $)",
"test",
"test",
"$AAA ${AAA}A $BBBB $BBB",
"a aA b",
"a aA b",
"a aA b",
"$RECURSE",
"foo bar",
"foo bar",
"foo bar",
"$RRR",
"foo bar",
"foo bar",
"foo bar",
# Verify what happens with no target or source nodes.
"$TARGET $SOURCES",
" ",
"",
"",
"$TARGETS $SOURCE",
" ",
"",
"",
# Various tests refactored from ActionTests.py.
"${LIST}",
"This is $( $) test",
"This is test",
"This is test",
["|", "$(", "$AAA", "|", "$BBB", "$)", "|", "$CCC", 1],
["|", "$(", "a", "|", "b", "$)", "|", "c", "1"],
["|", "a", "|", "b", "|", "c", "1"],
["|", "|", "c", "1"],
]
def test_subst_env(self):
"""Test scons_subst(): expansion dictionary"""
# The expansion dictionary no longer comes from the construction
# environment automatically.
env = DummyEnv(self.loc)
s = scons_subst('$AAA', env)
assert s == '', s
def test_subst_SUBST_modes(self):
"""Test scons_subst(): SUBST_* modes"""
env = DummyEnv(self.loc)
subst_cases = self.subst_cases[:]
gvars = env.Dictionary()
failed = 0
while subst_cases:
input, eraw, ecmd, esig = subst_cases[:4]
result = scons_subst(input, env, mode=SUBST_RAW, gvars=gvars)
if result != eraw:
if failed == 0: print
print " input %s => RAW %s did not match %s" % (repr(input), repr(result), repr(eraw))
failed = failed + 1
result = scons_subst(input, env, mode=SUBST_CMD, gvars=gvars)
if result != ecmd:
if failed == 0: print
print " input %s => CMD %s did not match %s" % (repr(input), repr(result), repr(ecmd))
failed = failed + 1
result = scons_subst(input, env, mode=SUBST_SIG, gvars=gvars)
if result != esig:
if failed == 0: print
print " input %s => SIG %s did not match %s" % (repr(input), repr(result), repr(esig))
failed = failed + 1
del subst_cases[:4]
assert failed == 0, "%d subst() mode cases failed" % failed
def test_subst_target_source(self):
"""Test scons_subst(): target= and source= arguments"""
env = DummyEnv(self.loc)
t1 = self.MyNode('t1')
t2 = self.MyNode('t2')
s1 = self.MyNode('s1')
s2 = self.MyNode('s2')
result = scons_subst("$TARGET $SOURCES", env,
target=[t1, t2],
source=[s1, s2])
assert result == "t1 s1 s2", result
result = scons_subst("$TARGET $SOURCES", env,
target=[t1, t2],
source=[s1, s2],
gvars={})
assert result == "t1 s1 s2", result
result = scons_subst("$TARGET $SOURCES", env, target=[], source=[])
assert result == " ", result
result = scons_subst("$TARGETS $SOURCE", env, target=[], source=[])
assert result == " ", result
def test_subst_callable_expansion(self):
"""Test scons_subst(): expanding a callable"""
env = DummyEnv(self.loc)
gvars = env.Dictionary()
newcom = scons_subst("test $CMDGEN1 $SOURCES $TARGETS", env,
target=self.MyNode('t'), source=self.MyNode('s'),
gvars=gvars)
assert newcom == "test foo bar with spaces.out s t", newcom
def test_subst_attribute_errors(self):
"""Test scons_subst(): handling attribute errors"""
env = DummyEnv(self.loc)
try:
class Foo(object):
pass
scons_subst('${foo.bar}', env, gvars={'foo':Foo()})
except SCons.Errors.UserError, e:
expect = [
"AttributeError `bar' trying to evaluate `${foo.bar}'",
"AttributeError `Foo instance has no attribute 'bar'' trying to evaluate `${foo.bar}'",
"AttributeError `'Foo' instance has no attribute 'bar'' trying to evaluate `${foo.bar}'",
"AttributeError `'Foo' object has no attribute 'bar'' trying to evaluate `${foo.bar}'",
]
assert str(e) in expect, e
else:
raise AssertionError("did not catch expected UserError")
def test_subst_syntax_errors(self):
"""Test scons_subst(): handling syntax errors"""
env = DummyEnv(self.loc)
try:
scons_subst('$foo.bar.3.0', env)
except SCons.Errors.UserError, e:
expect = [
# Python 2.3, 2.4
"SyntaxError `invalid syntax (line 1)' trying to evaluate `$foo.bar.3.0'",
# Python 2.5
"SyntaxError `invalid syntax (<string>, line 1)' trying to evaluate `$foo.bar.3.0'",
]
assert str(e) in expect, e
else:
raise AssertionError("did not catch expected UserError")
def test_subst_type_errors(self):
"""Test scons_subst(): handling type errors"""
env = DummyEnv(self.loc)
try:
scons_subst("${NONE[2]}", env, gvars={'NONE':None})
except SCons.Errors.UserError, e:
expect = [
# Python 2.3, 2.4
"TypeError `unsubscriptable object' trying to evaluate `${NONE[2]}'",
# Python 2.5, 2.6
"TypeError `'NoneType' object is unsubscriptable' trying to evaluate `${NONE[2]}'",
# Python 2.7 and later
"TypeError `'NoneType' object is not subscriptable' trying to evaluate `${NONE[2]}'",
# Python 2.7 and later under Fedora
"TypeError `'NoneType' object has no attribute '__getitem__'' trying to evaluate `${NONE[2]}'",
]
assert str(e) in expect, e
else:
raise AssertionError("did not catch expected UserError")
try:
def func(a, b, c):
pass
scons_subst("${func(1)}", env, gvars={'func':func})
except SCons.Errors.UserError, e:
expect = [
# Python 2.3, 2.4, 2.5
"TypeError `func() takes exactly 3 arguments (1 given)' trying to evaluate `${func(1)}'"
]
assert str(e) in expect, repr(str(e))
else:
raise AssertionError("did not catch expected UserError")
def test_subst_raw_function(self):
"""Test scons_subst(): fetch function with SUBST_RAW plus conv"""
# Test that the combination of SUBST_RAW plus a pass-through
# conversion routine allows us to fetch a function through the
# dictionary. CommandAction uses this to allow delayed evaluation
# of $SPAWN variables.
env = DummyEnv(self.loc)
gvars = env.Dictionary()
x = lambda x: x
r = scons_subst("$CALLABLE1", env, mode=SUBST_RAW, conv=x, gvars=gvars)
assert r is self.callable_object_1, repr(r)
r = scons_subst("$CALLABLE1", env, mode=SUBST_RAW, gvars=gvars)
assert r == 'callable-1', repr(r)
# Test how we handle overriding the internal conversion routines.
def s(obj):
return obj
n1 = self.MyNode('n1')
env = DummyEnv({'NODE' : n1})
gvars = env.Dictionary()
node = scons_subst("$NODE", env, mode=SUBST_RAW, conv=s, gvars=gvars)
assert node is n1, node
node = scons_subst("$NODE", env, mode=SUBST_CMD, conv=s, gvars=gvars)
assert node is n1, node
node = scons_subst("$NODE", env, mode=SUBST_SIG, conv=s, gvars=gvars)
assert node is n1, node
#def test_subst_function_return(self):
# """Test scons_subst(): returning a function"""
# env = DummyEnv({'FUNCTION' : foo})
# gvars = env.Dictionary()
# func = scons_subst("$FUNCTION", env, mode=SUBST_RAW, call=None, gvars=gvars)
# assert func is function_foo, func
# func = scons_subst("$FUNCTION", env, mode=SUBST_CMD, call=None, gvars=gvars)
# assert func is function_foo, func
# func = scons_subst("$FUNCTION", env, mode=SUBST_SIG, call=None, gvars=gvars)
# assert func is function_foo, func
def test_subst_overriding_gvars(self):
"""Test scons_subst(): supplying an overriding gvars dictionary"""
env = DummyEnv({'XXX' : 'xxx'})
result = scons_subst('$XXX', env, gvars=env.Dictionary())
assert result == 'xxx', result
result = scons_subst('$XXX', env, gvars={'XXX' : 'yyy'})
assert result == 'yyy', result
class CLVar_TestCase(unittest.TestCase):
def test_CLVar(self):
"""Test scons_subst() and scons_subst_list() with CLVar objects"""
loc = {}
loc['FOO'] = 'foo'
loc['BAR'] = SCons.Util.CLVar('bar')
loc['CALL'] = lambda target, source, env, for_signature: 'call'
env = DummyEnv(loc)
cmd = SCons.Util.CLVar("test $FOO $BAR $CALL test")
newcmd = scons_subst(cmd, env, gvars=env.Dictionary())
assert newcmd == ['test', 'foo', 'bar', 'call', 'test'], newcmd
cmd_list = scons_subst_list(cmd, env, gvars=env.Dictionary())
assert len(cmd_list) == 1, cmd_list
assert cmd_list[0][0] == "test", cmd_list[0][0]
assert cmd_list[0][1] == "foo", cmd_list[0][1]
assert cmd_list[0][2] == "bar", cmd_list[0][2]
assert cmd_list[0][3] == "call", cmd_list[0][3]
assert cmd_list[0][4] == "test", cmd_list[0][4]
class scons_subst_list_TestCase(SubstTestCase):
basic_cases = [
"$TARGETS",
[
["foo/bar.exe", "/bar/baz with spaces.obj", "../foo/baz.obj"],
],
"$SOURCES $NEWLINE $TARGETS",
[
["foo/blah with spaces.cpp", "/bar/ack.cpp", "../foo/ack.c", "before"],
["after", "foo/bar.exe", "/bar/baz with spaces.obj", "../foo/baz.obj"],
],
"$SOURCES$NEWLINE",
[
["foo/blah with spaces.cpp", "/bar/ack.cpp", "../foo/ack.cbefore"],
["after"],
],
"foo$FFF",
[
["fooGGG"],
],
"foo${FFF}",
[
["fooGGG"],
],
"test ${SOURCES.attribute.attr1}",
[
["test", "attr$1-blah with spaces.cpp", "attr$1-ack.cpp", "attr$1-ack.c"],
],
"test ${SOURCES.attribute.attr2}",
[
["test", "attr$2-blah with spaces.cpp", "attr$2-ack.cpp", "attr$2-ack.c"],
],
"$DO --in=$FOO --out=$BAR",
[
["do something", "--in=foo.in", "--out=bar with spaces.out"],
],
# This test is now fixed, and works like it should.
"$DO --in=$CRAZY --out=$BAR",
[
["do something", "--in=crazy\nfile.in", "--out=bar with spaces.out"],
],
# Try passing a list to scons_subst_list().
[ "$SOURCES$NEWLINE", "$TARGETS", "This is a test"],
[
["foo/blah with spaces.cpp", "/bar/ack.cpp", "../foo/ack.cbefore"],
["after", "foo/bar.exe", "/bar/baz with spaces.obj", "../foo/baz.obj", "This is a test"],
],
# Test against a former bug in scons_subst_list().
"$XXX$HHH",
[
["GGGIII"],
],
# Test double-dollar-sign behavior.
"$$FFF$HHH",
[
["$FFFIII"],
],
# Test various combinations of strings, lists and functions.
None, [[]],
[None], [[]],
'', [[]],
[''], [[]],
'x', [['x']],
['x'], [['x']],
'x y', [['x', 'y']],
['x y'], [['x y']],
['x', 'y'], [['x', 'y']],
'$N', [[]],
['$N'], [[]],
'$X', [['x']],
['$X'], [['x']],
'$Y', [['x']],
['$Y'], [['x']],
#'$R', [[]],
#['$R'], [[]],
'$S', [['x', 'y']],
'$S z', [['x', 'y', 'z']],
['$S'], [['x', 'y']],
['$S z'], [['x', 'y z']], # XXX - IS THIS BEST?
['$S', 'z'], [['x', 'y', 'z']],
'$LS', [['x y']],
'$LS z', [['x y', 'z']],
['$LS'], [['x y']],
['$LS z'], [['x y z']],
['$LS', 'z'], [['x y', 'z']],
'$L', [['x', 'y']],
'$L z', [['x', 'y', 'z']],
['$L'], [['x', 'y']],
['$L z'], [['x', 'y z']], # XXX - IS THIS BEST?
['$L', 'z'], [['x', 'y', 'z']],
cs, [['cs']],
[cs], [['cs']],
cl, [['cl']],
[cl], [['cl']],
'$CS', [['cs']],
['$CS'], [['cs']],
'$CL', [['cl']],
['$CL'], [['cl']],
# Various uses of UserString.
collections.UserString('x'), [['x']],
[collections.UserString('x')], [['x']],
collections.UserString('$X'), [['x']],
[collections.UserString('$X')], [['x']],
collections.UserString('$US'), [['us']],
[collections.UserString('$US')], [['us']],
'$US', [['us']],
['$US'], [['us']],
# Test function calls within ${}.
'$FUNCCALL', [['a', 'xc', 'b']],
# Test handling of newlines in white space.
'foo\nbar', [['foo'], ['bar']],
'foo\n\nbar', [['foo'], ['bar']],
'foo \n \n bar', [['foo'], ['bar']],
'foo \nmiddle\n bar', [['foo'], ['middle'], ['bar']],
# Bug reported by Christoph Wiedemann.
cvt('$xxx/bin'), [['/bin']],
# Test variables smooshed together with different prefixes.
'foo$AAA', [['fooa']],
'<$AAA', [['<', 'a']],
'>$AAA', [['>', 'a']],
'|$AAA', [['|', 'a']],
# Test callables that don't match our calling arguments.
'$CALLABLE2', [['callable-2']],
# Test handling of quotes.
# XXX Find a way to handle this in the future.
#'aaa "bbb ccc" ddd', [['aaa', 'bbb ccc', 'ddd']],
'${_defines(DEFS)}', [['Q1="q1"', 'Q2="a"']],
]
def test_scons_subst_list(self):
"""Test scons_subst_list(): basic substitution"""
def convert_lists(expect):
return [list(map(cvt, l)) for l in expect]
return self.basic_comparisons(scons_subst_list, convert_lists)
subst_list_cases = [
"test $xxx",
[["test"]],
[["test"]],
[["test"]],
"test $($xxx$)",
[["test", "$($)"]],
[["test"]],
[["test"]],
"test $( $xxx $)",
[["test", "$(", "$)"]],
[["test"]],
[["test"]],
"$AAA ${AAA}A $BBBB $BBB",
[["a", "aA", "b"]],
[["a", "aA", "b"]],
[["a", "aA", "b"]],
"$RECURSE",
[["foo", "bar"]],
[["foo", "bar"]],
[["foo", "bar"]],
"$RRR",
[["foo", "bar"]],
[["foo", "bar"]],
[["foo", "bar"]],
# Verify what happens with no target or source nodes.
"$TARGET $SOURCES",
[[]],
[[]],
[[]],
"$TARGETS $SOURCE",
[[]],
[[]],
[[]],
# Various test refactored from ActionTests.py
"${LIST}",
[['This', 'is', '$(', '$)', 'test']],
[['This', 'is', 'test']],
[['This', 'is', 'test']],
["|", "$(", "$AAA", "|", "$BBB", "$)", "|", "$CCC", 1],
[["|", "$(", "a", "|", "b", "$)", "|", "c", "1"]],
[["|", "a", "|", "b", "|", "c", "1"]],
[["|", "|", "c", "1"]],
]
def test_subst_env(self):
"""Test scons_subst_list(): expansion dictionary"""
# The expansion dictionary no longer comes from the construction
# environment automatically.
env = DummyEnv()
s = scons_subst_list('$AAA', env)
assert s == [[]], s
def test_subst_target_source(self):
"""Test scons_subst_list(): target= and source= arguments"""
env = DummyEnv(self.loc)
gvars = env.Dictionary()
t1 = self.MyNode('t1')
t2 = self.MyNode('t2')
s1 = self.MyNode('s1')
s2 = self.MyNode('s2')
result = scons_subst_list("$TARGET $SOURCES", env,
target=[t1, t2],
source=[s1, s2],
gvars=gvars)
assert result == [['t1', 's1', 's2']], result
result = scons_subst_list("$TARGET $SOURCES", env,
target=[t1, t2],
source=[s1, s2],
gvars={})
assert result == [['t1', 's1', 's2']], result
# Test interpolating a callable.
_t = DummyNode('t')
_s = DummyNode('s')
cmd_list = scons_subst_list("testing $CMDGEN1 $TARGETS $SOURCES",
env, target=_t, source=_s,
gvars=gvars)
assert cmd_list == [['testing', 'foo', 'bar with spaces.out', 't', 's']], cmd_list
def test_subst_escape(self):
"""Test scons_subst_list(): escape functionality"""
env = DummyEnv(self.loc)
gvars = env.Dictionary()
def escape_func(foo):
return '**' + foo + '**'
cmd_list = scons_subst_list("abc $LITERALS xyz", env, gvars=gvars)
assert cmd_list == [['abc',
'foo\nwith\nnewlines',
'bar\nwith\nnewlines',
'xyz']], cmd_list
c = cmd_list[0][0].escape(escape_func)
assert c == 'abc', c
c = cmd_list[0][1].escape(escape_func)
assert c == '**foo\nwith\nnewlines**', c
c = cmd_list[0][2].escape(escape_func)
assert c == '**bar\nwith\nnewlines**', c
c = cmd_list[0][3].escape(escape_func)
assert c == 'xyz', c
# We used to treat literals smooshed together like the whole
# thing was literal and escape it as a unit. The commented-out
# asserts below are in case we ever have to find a way to
# resurrect that functionality in some way.
cmd_list = scons_subst_list("abc${LITERALS}xyz", env, gvars=gvars)
c = cmd_list[0][0].escape(escape_func)
#assert c == '**abcfoo\nwith\nnewlines**', c
assert c == 'abcfoo\nwith\nnewlines', c
c = cmd_list[0][1].escape(escape_func)
#assert c == '**bar\nwith\nnewlinesxyz**', c
assert c == 'bar\nwith\nnewlinesxyz', c
_t = DummyNode('t')
cmd_list = scons_subst_list('echo "target: $TARGET"', env,
target=_t, gvars=gvars)
c = cmd_list[0][0].escape(escape_func)
assert c == 'echo', c
c = cmd_list[0][1].escape(escape_func)
assert c == '"target:', c
c = cmd_list[0][2].escape(escape_func)
assert c == 't"', c
def test_subst_SUBST_modes(self):
"""Test scons_subst_list(): SUBST_* modes"""
env = DummyEnv(self.loc)
subst_list_cases = self.subst_list_cases[:]
gvars = env.Dictionary()
r = scons_subst_list("$TARGET $SOURCES", env, mode=SUBST_RAW, gvars=gvars)
assert r == [[]], r
failed = 0
while subst_list_cases:
input, eraw, ecmd, esig = subst_list_cases[:4]
result = scons_subst_list(input, env, mode=SUBST_RAW, gvars=gvars)
if result != eraw:
if failed == 0: print
print " input %s => RAW %s did not match %s" % (repr(input), repr(result), repr(eraw))
failed = failed + 1
result = scons_subst_list(input, env, mode=SUBST_CMD, gvars=gvars)
if result != ecmd:
if failed == 0: print
print " input %s => CMD %s did not match %s" % (repr(input), repr(result), repr(ecmd))
failed = failed + 1
result = scons_subst_list(input, env, mode=SUBST_SIG, gvars=gvars)
if result != esig:
if failed == 0: print
print " input %s => SIG %s did not match %s" % (repr(input), repr(result), repr(esig))
failed = failed + 1
del subst_list_cases[:4]
assert failed == 0, "%d subst() mode cases failed" % failed
def test_subst_attribute_errors(self):
"""Test scons_subst_list(): handling attribute errors"""
env = DummyEnv()
try:
class Foo(object):
pass
scons_subst_list('${foo.bar}', env, gvars={'foo':Foo()})
except SCons.Errors.UserError, e:
expect = [
"AttributeError `bar' trying to evaluate `${foo.bar}'",
"AttributeError `Foo instance has no attribute 'bar'' trying to evaluate `${foo.bar}'",
"AttributeError `'Foo' instance has no attribute 'bar'' trying to evaluate `${foo.bar}'",
"AttributeError `'Foo' object has no attribute 'bar'' trying to evaluate `${foo.bar}'",
]
assert str(e) in expect, e
else:
raise AssertionError("did not catch expected UserError")
def test_subst_syntax_errors(self):
"""Test scons_subst_list(): handling syntax errors"""
env = DummyEnv()
try:
scons_subst_list('$foo.bar.3.0', env)
except SCons.Errors.UserError, e:
expect = [
"SyntaxError `invalid syntax' trying to evaluate `$foo.bar.3.0'",
"SyntaxError `invalid syntax (line 1)' trying to evaluate `$foo.bar.3.0'",
"SyntaxError `invalid syntax (<string>, line 1)' trying to evaluate `$foo.bar.3.0'",
]
assert str(e) in expect, e
else:
raise AssertionError("did not catch expected SyntaxError")
def test_subst_raw_function(self):
"""Test scons_subst_list(): fetch function with SUBST_RAW plus conv"""
# Test that the combination of SUBST_RAW plus a pass-through
# conversion routine allows us to fetch a function through the
# dictionary.
env = DummyEnv(self.loc)
gvars = env.Dictionary()
x = lambda x: x
r = scons_subst_list("$CALLABLE2", env, mode=SUBST_RAW, conv=x, gvars=gvars)
assert r == [[self.callable_object_2]], repr(r)
r = scons_subst_list("$CALLABLE2", env, mode=SUBST_RAW, gvars=gvars)
assert r == [['callable-2']], repr(r)
def test_subst_list_overriding_gvars(self):
"""Test scons_subst_list(): overriding conv()"""
env = DummyEnv()
def s(obj):
return obj
n1 = self.MyNode('n1')
env = DummyEnv({'NODE' : n1})
gvars=env.Dictionary()
node = scons_subst_list("$NODE", env, mode=SUBST_RAW, conv=s, gvars=gvars)
assert node == [[n1]], node
node = scons_subst_list("$NODE", env, mode=SUBST_CMD, conv=s, gvars=gvars)
assert node == [[n1]], node
node = scons_subst_list("$NODE", env, mode=SUBST_SIG, conv=s, gvars=gvars)
assert node == [[n1]], node
def test_subst_list_overriding_gvars(self):
"""Test scons_subst_list(): supplying an overriding gvars dictionary"""
env = DummyEnv({'XXX' : 'xxx'})
result = scons_subst_list('$XXX', env, gvars=env.Dictionary())
assert result == [['xxx']], result
result = scons_subst_list('$XXX', env, gvars={'XXX' : 'yyy'})
assert result == [['yyy']], result
class scons_subst_once_TestCase(unittest.TestCase):
loc = {
'CCFLAGS' : '-DFOO',
'ONE' : 1,
'RECURSE' : 'r $RECURSE r',
'LIST' : ['a', 'b', 'c'],
}
basic_cases = [
'$CCFLAGS -DBAR',
'OTHER_KEY',
'$CCFLAGS -DBAR',
'$CCFLAGS -DBAR',
'CCFLAGS',
'-DFOO -DBAR',
'x $ONE y',
'ONE',
'x 1 y',
'x $RECURSE y',
'RECURSE',
'x r $RECURSE r y',
'$LIST',
'LIST',
'a b c',
['$LIST'],
'LIST',
['a', 'b', 'c'],
['x', '$LIST', 'y'],
'LIST',
['x', 'a', 'b', 'c', 'y'],
['x', 'x $LIST y', 'y'],
'LIST',
['x', 'x a b c y', 'y'],
['x', 'x $CCFLAGS y', 'y'],
'LIST',
['x', 'x $CCFLAGS y', 'y'],
['x', 'x $RECURSE y', 'y'],
'LIST',
['x', 'x $RECURSE y', 'y'],
]
def test_subst_once(self):
"""Test the scons_subst_once() function"""
env = DummyEnv(self.loc)
cases = self.basic_cases[:]
failed = 0
while cases:
input, key, expect = cases[:3]
result = scons_subst_once(input, env, key)
if result != expect:
if failed == 0: print
print " input %s (%s) => %s did not match %s" % (repr(input), repr(key), repr(result), repr(expect))
failed = failed + 1
del cases[:3]
assert failed == 0, "%d subst() cases failed" % failed
class quote_spaces_TestCase(unittest.TestCase):
def test_quote_spaces(self):
"""Test the quote_spaces() method..."""
q = quote_spaces('x')
assert q == 'x', q
q = quote_spaces('x x')
assert q == '"x x"', q
q = quote_spaces('x\tx')
assert q == '"x\tx"', q
class Node(object):
def __init__(self, name, children=[]):
self.children = children
self.name = name
def __str__(self):
return self.name
def exists(self):
return 1
def rexists(self):
return 1
def has_builder(self):
return 1
def has_explicit_builder(self):
return 1
def side_effect(self):
return 1
def precious(self):
return 1
def always_build(self):
return 1
def current(self):
return 1
class LiteralTestCase(unittest.TestCase):
def test_Literal(self):
"""Test the Literal() function."""
input_list = [ '$FOO', Literal('$BAR') ]
gvars = { 'FOO' : 'BAZ', 'BAR' : 'BLAT' }
def escape_func(cmd):
return '**' + cmd + '**'
cmd_list = scons_subst_list(input_list, None, gvars=gvars)
cmd_list = escape_list(cmd_list[0], escape_func)
assert cmd_list == ['BAZ', '**$BAR**'], cmd_list
class SpecialAttrWrapperTestCase(unittest.TestCase):
def test_SpecialAttrWrapper(self):
"""Test the SpecialAttrWrapper() function."""
input_list = [ '$FOO', SpecialAttrWrapper('$BAR', 'BLEH') ]
gvars = { 'FOO' : 'BAZ', 'BAR' : 'BLAT' }
def escape_func(cmd):
return '**' + cmd + '**'
cmd_list = scons_subst_list(input_list, None, gvars=gvars)
cmd_list = escape_list(cmd_list[0], escape_func)
assert cmd_list == ['BAZ', '**$BAR**'], cmd_list
cmd_list = scons_subst_list(input_list, None, mode=SUBST_SIG, gvars=gvars)
cmd_list = escape_list(cmd_list[0], escape_func)
assert cmd_list == ['BAZ', '**BLEH**'], cmd_list
class subst_dict_TestCase(unittest.TestCase):
def test_subst_dict(self):
"""Test substituting dictionary values in an Action
"""
t = DummyNode('t')
s = DummyNode('s')
d = subst_dict(target=t, source=s)
assert str(d['TARGETS'][0]) == 't', d['TARGETS']
assert str(d['TARGET']) == 't', d['TARGET']
assert str(d['SOURCES'][0]) == 's', d['SOURCES']
assert str(d['SOURCE']) == 's', d['SOURCE']
t1 = DummyNode('t1')
t2 = DummyNode('t2')
s1 = DummyNode('s1')
s2 = DummyNode('s2')
d = subst_dict(target=[t1, t2], source=[s1, s2])
TARGETS = sorted([str(x) for x in d['TARGETS']])
assert TARGETS == ['t1', 't2'], d['TARGETS']
assert str(d['TARGET']) == 't1', d['TARGET']
SOURCES = sorted([str(x) for x in d['SOURCES']])
assert SOURCES == ['s1', 's2'], d['SOURCES']
assert str(d['SOURCE']) == 's1', d['SOURCE']
class V(object):
# Fake Value node with no rfile() method.
def __init__(self, name):
self.name = name
def __str__(self):
return 'v-'+self.name
def get_subst_proxy(self):
return self
class N(V):
def rfile(self):
return self.__class__('rstr-' + self.name)
t3 = N('t3')
t4 = DummyNode('t4')
t5 = V('t5')
s3 = DummyNode('s3')
s4 = N('s4')
s5 = V('s5')
d = subst_dict(target=[t3, t4, t5], source=[s3, s4, s5])
TARGETS = sorted([str(x) for x in d['TARGETS']])
assert TARGETS == ['t4', 'v-t3', 'v-t5'], TARGETS
SOURCES = sorted([str(x) for x in d['SOURCES']])
assert SOURCES == ['s3', 'v-rstr-s4', 'v-s5'], SOURCES
if __name__ == "__main__":
suite = unittest.TestSuite()
tclasses = [
CLVar_TestCase,
LiteralTestCase,
SpecialAttrWrapperTestCase,
quote_spaces_TestCase,
scons_subst_TestCase,
scons_subst_list_TestCase,
scons_subst_once_TestCase,
subst_dict_TestCase,
]
for tclass in tclasses:
names = unittest.getTestCaseNames(tclass, 'test_')
suite.addTests(list(map(tclass, names)))
if not unittest.TextTestRunner().run(suite).wasSuccessful():
sys.exit(1)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
"""Unit tests for ClearCaseClient."""
from __future__ import unicode_literals
import os
import unittest
import kgb
import six
from rbtools.clients.clearcase import ClearCaseClient, ClearCaseRepositoryInfo
from rbtools.clients.errors import SCMError
from rbtools.clients.tests import SCMClientTestCase
from rbtools.utils.checks import check_gnu_diff
from rbtools.utils.filesystem import is_exe_in_path
from rbtools.utils.process import execute
_SNAPSHOT_VIEW_INFO = [
' snapshot-view-test',
'/home/user/stgloc_view1/user/snapshot-view-test.vws',
'Created 2021-06-18T18:10:32-04:00 by user.user@localhost.localdomain',
('Last modified 2021-06-24T17:01:39-04:00 by '
'user.user@localhost.localdomain'),
('Last accessed 2021-06-24T17:01:39-04:00 by '
'user.user@localhost.localdomain'),
('Last read of private data 2021-06-24T17:01:39-04:00 by '
'user.user@localhost.localdomain'),
('Last config spec update 2021-06-24T15:40:22-04:00 by '
'user.user@localhost.localdomain'),
('Last view private object update 2021-06-24T17:01:39-04:00 by '
'user.user@localhost.localdomain'),
'Text mode: unix',
'Properties: snapshot readwrite',
'Owner: user : rwx (all)',
'Group: user : rwx (all)',
'Other: : r-x (read)',
'Additional groups: wheel',
]
_DYNAMIC_VIEW_INFO = [
'* test-view /viewstore/test-view.vbs',
'Created 2021-06-10T01:49:46-04:00 by user.user@localhost.localdomain',
('Last modified 2021-06-10T01:49:46-04:00 by '
'user.user@localhost.localdomain'),
('Last accessed 2021-06-10T01:49:46-04:00 by '
'user.user@localhost.localdomain'),
('Last config spec update 2021-06-10T01:49:46-04:00 by '
'user.user@localhost.localdomain'),
'Text mode: unix',
'Properties: dynamic readwrite shareable_dos',
'Owner: user : rwx (all)',
'Group: user : rwx (all)',
'Other: : r-x (read)',
'Additional groups: wheel',
]
_UCM_VIEW_INFO = [
'* development-view /viewstore/development-view.vbs',
'Created 2021-06-10T01:49:46-04:00 by user.user@localhost.localdomain',
('Last modified 2021-06-10T01:49:46-04:00 by '
'user.user@localhost.localdomain'),
('Last accessed 2021-06-10T01:49:46-04:00 by '
'user.user@localhost.localdomain'),
('Last config spec update 2021-06-10T01:49:46-04:00 by '
'user.user@localhost.localdomain'),
'Text mode: unix',
'Properties: dynamic ucmview readwrite shareable_dos',
'Owner: user : rwx (all)',
'Group: user : rwx (all)',
'Other: : r-x (read)',
'Additional groups: wheel',
]
_AUTOMATIC_VIEW_INFO = [
'* development-view /viewstore/development-view.vbs',
'Created 2021-06-10T01:49:46-04:00 by user.user@localhost.localdomain',
('Last modified 2021-06-10T01:49:46-04:00 by '
'user.user@localhost.localdomain'),
('Last accessed 2021-06-10T01:49:46-04:00 by '
'user.user@localhost.localdomain'),
('Last config spec update 2021-06-10T01:49:46-04:00 by '
'user.user@localhost.localdomain'),
'Text mode: unix',
'Properties: automatic readwrite shareable_dos',
'Owner: user : rwx (all)',
'Group: user : rwx (all)',
'Other: : r-x (read)',
'Additional groups: wheel',
]
_WEBVIEW_VIEW_INFO = [
'* development-view /viewstore/development-view.vbs',
'Created 2021-06-10T01:49:46-04:00 by user.user@localhost.localdomain',
('Last modified 2021-06-10T01:49:46-04:00 by '
'user.user@localhost.localdomain'),
('Last accessed 2021-06-10T01:49:46-04:00 by '
'user.user@localhost.localdomain'),
('Last config spec update 2021-06-10T01:49:46-04:00 by '
'user.user@localhost.localdomain'),
'Text mode: unix',
'Properties: webview readwrite shareable_dos',
'Owner: user : rwx (all)',
'Group: user : rwx (all)',
'Other: : r-x (read)',
'Additional groups: wheel',
]
class ClearCaseClientTests(kgb.SpyAgency, SCMClientTestCase):
"""Unit tests for ClearCaseClient."""
@unittest.skipIf(not is_exe_in_path('cleartool'),
'cleartool not found in path')
def setUp(self):
super(ClearCaseClientTests, self).setUp()
self.set_user_home(
os.path.join(self.testdata_dir, 'homedir'))
self.client = ClearCaseClient(options=self.options)
def test_get_local_path_outside_view(self):
"""Testing ClearCaseClient.get_local_path outside of view"""
self.spy_on(execute, op=kgb.SpyOpMatchInOrder([
{
'args': (['cleartool', 'pwv', '-short'],),
'op': kgb.SpyOpReturn('** NONE **'),
},
]))
self.assertEqual(self.client.get_local_path(), None)
def test_get_local_path_inside_view(self):
"""Testing ClearCaseClient.get_local_path inside view"""
self.spy_on(execute, op=kgb.SpyOpMatchInOrder([
{
'args': (['cleartool', 'pwv', '-short'],),
'op': kgb.SpyOpReturn('test-view'),
},
{
'args': (['cleartool', 'pwv', '-root'],),
'op': kgb.SpyOpReturn('/test/view'),
},
{
'args': (['cleartool', 'describe', '-short', 'vob:.'],),
'op': kgb.SpyOpReturn('vob'),
},
]))
self.assertEqual(self.client.get_local_path(), '/test/view/vob')
def test_get_repository_info_snapshot(self):
"""Testing ClearCaseClient.get_repository_info with snapshot view"""
self.spy_on(check_gnu_diff, call_original=False)
self.spy_on(execute, op=kgb.SpyOpMatchInOrder([
{
'args': (['cleartool', 'pwv', '-short'],),
'op': kgb.SpyOpReturn('test-view'),
},
{
'args': (['cleartool', 'pwv', '-root'],),
'op': kgb.SpyOpReturn('/test/view'),
},
{
'args': (['cleartool', 'describe', '-short', 'vob:.'],),
'op': kgb.SpyOpReturn('vob'),
},
{
'args': (['cleartool', 'lsview', '-full', '-properties',
'-cview'],),
'op': kgb.SpyOpReturn(_SNAPSHOT_VIEW_INFO),
},
]))
repository_info = self.client.get_repository_info()
self.assertEqual(repository_info.path, '/test/view/vob')
self.assertEqual(repository_info.vobtag, 'vob')
self.assertEqual(repository_info.vob_tags, {'vob'})
# Initial state that gets populated later by update_from_remote
self.assertEqual(repository_info.uuid_to_tags, {})
self.assertEqual(repository_info.is_legacy, True)
self.assertEqual(self.client.viewtype, 'snapshot')
self.assertEqual(self.client.is_ucm, False)
def test_get_repository_info_dynamic(self):
"""Testing ClearCaseClient.get_repository_info with dynamic view and
base ClearCase
"""
self.spy_on(check_gnu_diff, call_original=False)
self.spy_on(execute, op=kgb.SpyOpMatchInOrder([
{
'args': (['cleartool', 'pwv', '-short'],),
'op': kgb.SpyOpReturn('test-view'),
},
{
'args': (['cleartool', 'pwv', '-root'],),
'op': kgb.SpyOpReturn('/test/view'),
},
{
'args': (['cleartool', 'describe', '-short', 'vob:.'],),
'op': kgb.SpyOpReturn('vob'),
},
{
'args': (['cleartool', 'lsview', '-full', '-properties',
'-cview'],),
'op': kgb.SpyOpReturn(_DYNAMIC_VIEW_INFO),
},
]))
repository_info = self.client.get_repository_info()
self.assertEqual(repository_info.path, '/test/view/vob')
self.assertEqual(repository_info.vobtag, 'vob')
self.assertEqual(repository_info.vob_tags, {'vob'})
# Initial state that gets populated later by update_from_remote
self.assertEqual(repository_info.uuid_to_tags, {})
self.assertEqual(repository_info.is_legacy, True)
self.assertEqual(self.client.viewtype, 'dynamic')
self.assertEqual(self.client.is_ucm, False)
def test_get_repository_info_dynamic_UCM(self):
"""Testing ClearCaseClient.get_repository_info with dynamic view and UCM
"""
self.spy_on(check_gnu_diff, call_original=False)
self.spy_on(execute, op=kgb.SpyOpMatchInOrder([
{
'args': (['cleartool', 'pwv', '-short'],),
'op': kgb.SpyOpReturn('test-view'),
},
{
'args': (['cleartool', 'pwv', '-root'],),
'op': kgb.SpyOpReturn('/test/view'),
},
{
'args': (['cleartool', 'describe', '-short', 'vob:.'],),
'op': kgb.SpyOpReturn('vob'),
},
{
'args': (['cleartool', 'lsview', '-full', '-properties',
'-cview'],),
'op': kgb.SpyOpReturn(_UCM_VIEW_INFO),
},
]))
repository_info = self.client.get_repository_info()
self.assertEqual(repository_info.path, '/test/view/vob')
self.assertEqual(repository_info.vobtag, 'vob')
self.assertEqual(repository_info.vob_tags, {'vob'})
# Initial state that gets populated later by update_from_remote
self.assertEqual(repository_info.uuid_to_tags, {})
self.assertEqual(repository_info.is_legacy, True)
self.assertEqual(self.client.viewtype, 'dynamic')
self.assertEqual(self.client.is_ucm, True)
def test_get_repository_info_automatic(self):
"""Testing ClearCaseClient.get_repository_info with automatic view"""
self.spy_on(check_gnu_diff, call_original=False)
self.spy_on(execute, op=kgb.SpyOpMatchInOrder([
{
'args': (['cleartool', 'pwv', '-short'],),
'op': kgb.SpyOpReturn('test-view'),
},
{
'args': (['cleartool', 'pwv', '-root'],),
'op': kgb.SpyOpReturn('/test/view'),
},
{
'args': (['cleartool', 'describe', '-short', 'vob:.'],),
'op': kgb.SpyOpReturn('vob'),
},
{
'args': (['cleartool', 'lsview', '-full', '-properties',
'-cview'],),
'op': kgb.SpyOpReturn(_AUTOMATIC_VIEW_INFO),
},
]))
try:
self.client.get_repository_info()
except SCMError as e:
self.assertEqual(six.text_type(e),
'Webviews and automatic views are not currently '
'supported. RBTools commands can only be used in '
'dynamic or snapshot views.')
else:
self.fail('get_repository_info did not raise SCMError')
def test_get_repository_info_webview(self):
"""Testing ClearCaseClient.get_repository_info with webview"""
self.spy_on(check_gnu_diff, call_original=False)
self.spy_on(execute, op=kgb.SpyOpMatchInOrder([
{
'args': (['cleartool', 'pwv', '-short'],),
'op': kgb.SpyOpReturn('test-view'),
},
{
'args': (['cleartool', 'pwv', '-root'],),
'op': kgb.SpyOpReturn('/test/view'),
},
{
'args': (['cleartool', 'describe', '-short', 'vob:.'],),
'op': kgb.SpyOpReturn('vob'),
},
{
'args': (['cleartool', 'lsview', '-full', '-properties',
'-cview'],),
'op': kgb.SpyOpReturn(_WEBVIEW_VIEW_INFO),
},
]))
try:
self.client.get_repository_info()
except SCMError as e:
self.assertEqual(six.text_type(e),
'Webviews and automatic views are not currently '
'supported. RBTools commands can only be used in '
'dynamic or snapshot views.')
else:
self.fail('get_repository_info did not raise SCMError')
def test_repository_info_update_from_remote_clearcase(self):
"""Testing ClearCaseRepositoryInfo.update_from_remote with ClearCase
remote
"""
self.spy_on(execute, op=kgb.SpyOpMatchInOrder([
{
'args': (['cleartool', 'lsregion'],),
'op': kgb.SpyOpReturn(['region']),
},
{
'args': (['cleartool', 'lsvob', '-s', '-family',
'9ac6856f.c9af11eb.9851.52:54:00:7f:63:a5',
'-region', 'region'],),
'op': kgb.SpyOpReturn('vob1'),
},
{
'args': (['cleartool', 'lsvob', '-s', '-family',
'b520a815.c9af11eb.986f.52:54:00:7f:63:a5',
'-region', 'region'],),
'op': kgb.SpyOpReturn('vob2'),
},
]))
repository_info = ClearCaseRepositoryInfo('/view/test/vob', 'vob')
repository_info.update_from_remote({}, {
'repopath': '/view/server-view',
'uuids': [
'9ac6856f.c9af11eb.9851.52:54:00:7f:63:a5',
'b520a815.c9af11eb.986f.52:54:00:7f:63:a5',
],
})
self.assertEqual(repository_info.uuid_to_tags, {
'9ac6856f.c9af11eb.9851.52:54:00:7f:63:a5': ['vob1'],
'b520a815.c9af11eb.986f.52:54:00:7f:63:a5': ['vob2'],
})
self.assertEqual(repository_info.is_legacy, False)
def test_repository_info_update_from_remote_versionvault(self):
"""Testing ClearCaseRepositoryInfo.update_from_remote with
VersionVault remote
"""
self.spy_on(execute, op=kgb.SpyOpMatchInOrder([
{
'args': (['cleartool', 'lsregion'],),
'op': kgb.SpyOpReturn(['region']),
},
{
'args': (['cleartool', 'lsvob', '-s', '-family',
'9ac6856f.c9af11eb.9851.52:54:00:7f:63:a5',
'-region', 'region'],),
'op': kgb.SpyOpReturn('vob'),
},
]))
repository_info = ClearCaseRepositoryInfo('/view/test/vob', 'vob')
repository_info.update_from_remote({}, {
'repopath': '/view/server-view',
'uuid': '9ac6856f.c9af11eb.9851.52:54:00:7f:63:a5',
})
self.assertEqual(repository_info.uuid_to_tags, {
'9ac6856f.c9af11eb.9851.52:54:00:7f:63:a5': ['vob'],
})
self.assertEqual(repository_info.is_legacy, True)
def test_get_vobtag_success(self):
"""Testing ClearCaseClient._get_vobtag inside view"""
self.spy_on(execute, op=kgb.SpyOpMatchInOrder([
{
'args': (['cleartool', 'describe', '-short', 'vob:.'],),
'op': kgb.SpyOpReturn('/vob\n'),
},
]))
self.assertEqual(self.client._get_vobtag(), '/vob')
def test_get_vobtag_error(self):
"""Testing ClearCaseClient._get_vobtag outside view"""
self.spy_on(execute, op=kgb.SpyOpMatchInOrder([
{
'args': (['cleartool', 'describe', '-short', 'vob:.'],),
'op': kgb.SpyOpReturn(
'cleartool: Error: Unable to determine VOB for '
'pathname ".".\n'
),
},
]))
with self.assertRaises(SCMError):
self.client._get_vobtag()
def test_parse_revision_spec(self):
"""Testing ClearCaseClient.parse_revision_spec"""
cases = [
(
[],
'--rbtools-checkedout-base',
'--rbtools-checkedout-changeset',
),
(
['activity:bugfix123'],
'--rbtools-activity-base',
'bugfix123',
),
(
['baseline:test@/vob'],
'--rbtools-baseline-base',
['test@/vob'],
),
(
['brtype:bugfix123'],
'--rbtools-branch-base',
'bugfix123',
),
(
['lbtype:bugfix123'],
'--rbtools-label-base',
['bugfix123'],
),
(
['stream:bugfix123@/vob'],
'--rbtools-stream-base',
'bugfix123@/vob',
),
(
['baseline:dev@/vob', 'baseline:bugfix123@/vob'],
'--rbtools-baseline-base',
['dev@/vob', 'bugfix123@/vob'],
),
(
['lbtype:dev', 'lbtype:bugfix123'],
'--rbtools-label-base',
['dev', 'bugfix123'],
),
(
[
'vob1/file@@/main/0:vob1/file@@/main/4',
'vob2/file2@@/dev/3:vob2/file2@@/main/9',
],
'--rbtools-files',
[
['vob1/file@@/main/0', 'vob1/file@@/main/4'],
['vob2/file2@@/dev/3', 'vob2/file2@@/main/9'],
],
),
]
# Fake a dynamic view, which is required for revision specs with two
# revisions.
self.client.viewtype = 'dynamic'
for spec, base, tip in cases:
self.assertEqual(
self.client.parse_revision_spec(spec),
{'base': base, 'tip': tip})
def test_checkedout_changeset(self):
"""Testing ClearCaseClient._get_checkedout_changeset"""
self.spy_on(execute, op=kgb.SpyOpMatchInOrder([
{
'args': (['cleartool', 'lsregion'],),
'op': kgb.SpyOpReturn(['region']),
},
{
'args': (['cleartool', 'lsvob', '-s', '-family',
'9ac6856f.c9af11eb.9851.52:54:00:7f:63:a5',
'-region', 'region'],),
'op': kgb.SpyOpReturn('vob'),
},
{
'args': (['cleartool', 'lscheckout', '-avobs', '-cview',
'-me', '-fmt', r'%En\t%PVn\t%Vn\n'],),
'op': kgb.SpyOpReturn(
'test2.py\t/main/1\t/main/CHECKEDOUT\n'
'test.pdf\t/main/0\t/main/CHECKEDOUT\n'
'test.py\t/main/1\t/main/CHECKEDOUT\n'
),
},
]))
repository_info = ClearCaseRepositoryInfo('/view/test/vob', 'vob')
repository_info.update_from_remote({}, {
'repopath': '/view/server-view',
'uuid': '9ac6856f.c9af11eb.9851.52:54:00:7f:63:a5',
})
changeset = self.client._get_checkedout_changeset(repository_info)
self.assertEqual(changeset, [
('test2.py@@/main/1', 'test2.py'),
('test.pdf@@/main/0', 'test.pdf'),
('test.py@@/main/1', 'test.py'),
])
def test_activity_changeset(self):
"""Testing ClearCaseClient._get_activity_changeset"""
self.spy_on(execute, op=kgb.SpyOpMatchInOrder([
{
'args': (['cleartool', 'lsregion'],),
'op': kgb.SpyOpReturn(['region']),
},
{
'args': (['cleartool', 'lsvob', '-s', '-family',
'9ac6856f.c9af11eb.9851.52:54:00:7f:63:a5',
'-region', 'region'],),
'op': kgb.SpyOpReturn('/vobs/els'),
},
{
'args': (['cleartool', 'lsactivity', '-fmt', '%[versions]Qp',
'activity-name'],),
'op': kgb.SpyOpReturn(
'"/view/x/vobs/els/.@@/main/int/CHECKEDOUT.78" '
'"/view/x/vobs/els/test.pdf@@/main/int/CHECKEDOUT.77" '
'"/view/x/vobs/els/new.py@@/main/int/CHECKEDOUT.71" '
'"/view/x/vobs/els/test.py@@/main/int/CHECKEDOUT.64" '
'"/view/x/vobs/els/.@@/main/int/2" '
'"/view/x/vobs/els/test.py@@/main/int/3" '
'"/view/x/vobs/els/test.py@@/main/int/2"'
),
},
{
'args': (['cleartool', 'desc', '-fmt',
'%[version_predecessor]p',
'/view/x/vobs/els/.@@/main/int/2'],),
'op': kgb.SpyOpReturn('/main/int/1'),
},
{
'args': (['cleartool', 'desc', '-fmt',
'%[version_predecessor]p',
'/view/x/vobs/els/test.py@@/main/int/2'],),
'op': kgb.SpyOpReturn('/main/int/1'),
},
]))
repository_info = ClearCaseRepositoryInfo('/view/test/vob', 'vob')
repository_info.update_from_remote({}, {
'repopath': '/view/server-view',
'uuid': '9ac6856f.c9af11eb.9851.52:54:00:7f:63:a5',
})
changeset = self.client._get_activity_changeset('activity-name',
repository_info)
self.assertEqual(changeset, [
('/view/x/vobs/els/.@@/main/int/1',
'/view/x/vobs/els/.'),
('/view/x/vobs/els/test.pdf@@/main/int/0',
'/view/x/vobs/els/test.pdf'),
('/view/x/vobs/els/new.py@@/main/int/0',
'/view/x/vobs/els/new.py'),
('/view/x/vobs/els/test.py@@/main/int/1',
'/view/x/vobs/els/test.py'),
])
def test_diff_directory(self):
"""Testing ClearCaseClient._diff_directory"""
self.spy_on(execute, op=kgb.SpyOpMatchInOrder([
{
'args': (['cleartool', 'diff', '-ser',
'.@@/main/1', '.@@/main/CHECKEDOUT'],),
'op': kgb.SpyOpReturn([
'********************************',
'<<< directory 1: .@@/main/test-project_integration/2',
'>>> directory 2:',
'.@@/main/test-project_integration/CHECKEDOUT',
'********************************',
'-----[ renamed to ]-----',
'< test2.py --06-29T17:26 david',
'---',
'> renamed-file.py --06-29T17:26 david',
'-----[ deleted ]-----',
'< test3.py --07-28T00:30 david',
'-----[ added ]-----',
'> test4.py --07-28T18:27 david',
]),
},
{
'args': (['cleartool', 'desc', '-fmt', '%On',
'.@@/main/1/test2.py'],),
'op': kgb.SpyOpReturn('test2.py-fake-oid'),
},
{
'args': (['cleartool', 'desc', '-fmt', '%On',
'.@@/main/CHECKEDOUT/renamed-file.py'],),
'op': kgb.SpyOpReturn('renamed-file.py-fake-oid'),
},
{
'args': (['cleartool', 'desc', '-fmt', '%On',
'.@@/main/1/test3.py'],),
'op': kgb.SpyOpReturn('test3.py-fake-oid'),
},
{
'args': (['cleartool', 'desc', '-fmt', '%On',
'.@@/main/CHECKEDOUT/test4.py'],),
'op': kgb.SpyOpReturn('test4.py-fake-oid'),
},
]))
self.assertEqual(
self.client._diff_directory('.@@/main/1', '.@@/main/CHECKEDOUT'),
{
'added': {('.@@/main/CHECKEDOUT/test4.py',
'test4.py-fake-oid')},
'deleted': {('.@@/main/1/test3.py', 'test3.py-fake-oid')},
'renamed': {('.@@/main/1/test2.py', 'test2.py-fake-oid',
'.@@/main/CHECKEDOUT/renamed-file.py',
'renamed-file.py-fake-oid')},
})
|
|
# coding: utf-8
# # + 
# # **Word Count Lab: Building a word count application**
# #### This lab will build on the techniques covered in the Spark tutorial to develop a simple word count application. The volume of unstructured text in existence is growing dramatically, and Spark is an excellent tool for analyzing this type of data. In this lab, we will write code that calculates the most common words in the [Complete Works of William Shakespeare](http://www.gutenberg.org/ebooks/100) retrieved from [Project Gutenberg](http://www.gutenberg.org/wiki/Main_Page). This could also be scaled to find the most common words on the Internet.
# #### ** During this lab we will cover: **
# #### *Part 1:* Creating a base RDD and pair RDDs
# #### *Part 2:* Counting with pair RDDs
# #### *Part 3:* Finding unique words and a mean value
# #### *Part 4:* Apply word count to a file
# #### Note that, for reference, you can look up the details of the relevant methods in [Spark's Python API](https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD)
# ### ** Part 1: Creating a base RDD and pair RDDs **
# #### In this part of the lab, we will explore creating a base RDD with `parallelize` and using pair RDDs to count words.
# #### ** (1a) Create a base RDD **
# #### We'll start by generating a base RDD by using a Python list and the `sc.parallelize` method. Then we'll print out the type of the base RDD.
# In[1]:
wordsList = ['cat', 'elephant', 'rat', 'rat', 'cat']
wordsRDD = sc.parallelize(wordsList, 4)
# Print out the type of wordsRDD
print type(wordsRDD)
# #### ** (1b) Pluralize and test **
# #### Let's use a `map()` transformation to add the letter 's' to each string in the base RDD we just created. We'll define a Python function that returns the word with an 's' at the end of the word. Please replace `<FILL IN>` with your solution. If you have trouble, the next cell has the solution. After you have defined `makePlural` you can run the third cell which contains a test. If you implementation is correct it will print `1 test passed`.
# #### This is the general form that exercises will take, except that no example solution will be provided. Exercises will include an explanation of what is expected, followed by code cells where one cell will have one or more `<FILL IN>` sections. The cell that needs to be modified will have `# TODO: Replace <FILL IN> with appropriate code` on its first line. Once the `<FILL IN>` sections are updated and the code is run, the test cell can then be run to verify the correctness of your solution. The last code cell before the next markdown section will contain the tests.
# In[2]:
def makePlural(word):
"""Adds an 's' to `word`.
Note:
This is a simple function that only adds an 's'. No attempt is made to follow proper
pluralization rules.
Args:
word (str): A string.
Returns:
str: A string with 's' added to it.
"""
return word + 's'
print makePlural('cat')
# In[3]:
# One way of completing the function
def makePlural(word):
return word + 's'
print makePlural('cat')
# In[4]:
# Load in the testing code and check to see if your answer is correct
# If incorrect it will report back '1 test failed' for each failed test
# Make sure to rerun any cell you change before trying the test again
from test_helper import Test
# TEST Pluralize and test (1b)
Test.assertEquals(makePlural('rat'), 'rats', 'incorrect result: makePlural does not add an s')
# #### ** (1c) Apply `makePlural` to the base RDD **
# #### Now pass each item in the base RDD into a [map()](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.map) transformation that applies the `makePlural()` function to each element. And then call the [collect()](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.collect) action to see the transformed RDD.
# In[5]:
pluralRDD = wordsRDD.map(makePlural)
print pluralRDD.collect()
# In[6]:
# TEST Apply makePlural to the base RDD(1c)
Test.assertEquals(pluralRDD.collect(), ['cats', 'elephants', 'rats', 'rats', 'cats'],
'incorrect values for pluralRDD')
# #### ** (1d) Pass a `lambda` function to `map` **
# #### Let's create the same RDD using a `lambda` function.
# In[7]:
pluralLambdaRDD = wordsRDD.map(lambda w: w + 's')
print pluralLambdaRDD.collect()
# In[8]:
# TEST Pass a lambda function to map (1d)
Test.assertEquals(pluralLambdaRDD.collect(), ['cats', 'elephants', 'rats', 'rats', 'cats'],
'incorrect values for pluralLambdaRDD (1d)')
# #### ** (1e) Length of each word **
# #### Now use `map()` and a `lambda` function to return the number of characters in each word. We'll `collect` this result directly into a variable.
# In[9]:
pluralLengths = (pluralRDD
.map(lambda w: len(w))
.collect())
print pluralLengths
# In[10]:
# TEST Length of each word (1e)
Test.assertEquals(pluralLengths, [4, 9, 4, 4, 4],
'incorrect values for pluralLengths')
# #### ** (1f) Pair RDDs **
# #### The next step in writing our word counting program is to create a new type of RDD, called a pair RDD. A pair RDD is an RDD where each element is a pair tuple `(k, v)` where `k` is the key and `v` is the value. In this example, we will create a pair consisting of `('<word>', 1)` for each word element in the RDD.
# #### We can create the pair RDD using the `map()` transformation with a `lambda()` function to create a new RDD.
# In[11]:
wordPairs = wordsRDD.map(lambda w: (w, 1))
print wordPairs.collect()
# In[12]:
# TEST Pair RDDs (1f)
Test.assertEquals(wordPairs.collect(),
[('cat', 1), ('elephant', 1), ('rat', 1), ('rat', 1), ('cat', 1)],
'incorrect value for wordPairs')
# ### ** Part 2: Counting with pair RDDs **
# #### Now, let's count the number of times a particular word appears in the RDD. There are multiple ways to perform the counting, but some are much less efficient than others.
# #### A naive approach would be to `collect()` all of the elements and count them in the driver program. While this approach could work for small datasets, we want an approach that will work for any size dataset including terabyte- or petabyte-sized datasets. In addition, performing all of the work in the driver program is slower than performing it in parallel in the workers. For these reasons, we will use data parallel operations.
# #### ** (2a) `groupByKey()` approach **
# #### An approach you might first consider (we'll see shortly that there are better ways) is based on using the [groupByKey()](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.groupByKey) transformation. As the name implies, the `groupByKey()` transformation groups all the elements of the RDD with the same key into a single list in one of the partitions. There are two problems with using `groupByKey()`:
# + #### The operation requires a lot of data movement to move all the values into the appropriate partitions.
# + #### The lists can be very large. Consider a word count of English Wikipedia: the lists for common words (e.g., the, a, etc.) would be huge and could exhaust the available memory in a worker.
#
# #### Use `groupByKey()` to generate a pair RDD of type `('word', iterator)`.
# In[13]:
wordsGrouped = wordPairs.groupByKey()
for key, value in wordsGrouped.collect():
print '{0}: {1}'.format(key, list(value))
# In[14]:
# TEST groupByKey() approach (2a)
Test.assertEquals(sorted(wordsGrouped.mapValues(lambda x: list(x)).collect()),
[('cat', [1, 1]), ('elephant', [1]), ('rat', [1, 1])],
'incorrect value for wordsGrouped')
# #### ** (2b) Use `groupByKey()` to obtain the counts **
# #### Using the `groupByKey()` transformation creates an RDD containing 3 elements, each of which is a pair of a word and a Python iterator.
# #### Now sum the iterator using a `map()` transformation. The result should be a pair RDD consisting of (word, count) pairs.
# In[31]:
import itertools
wordCountsGrouped = wordsGrouped.map(lambda (k, v): (k, sum(list(v))))
print wordCountsGrouped.collect()
# In[32]:
# TEST Use groupByKey() to obtain the counts (2b)
Test.assertEquals(sorted(wordCountsGrouped.collect()),
[('cat', 2), ('elephant', 1), ('rat', 2)],
'incorrect value for wordCountsGrouped')
# #### ** (2c) Counting using `reduceByKey` **
# #### A better approach is to start from the pair RDD and then use the [reduceByKey()](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.reduceByKey) transformation to create a new pair RDD. The `reduceByKey()` transformation gathers together pairs that have the same key and applies the function provided to two values at a time, iteratively reducing all of the values to a single value. `reduceByKey()` operates by applying the function first within each partition on a per-key basis and then across the partitions, allowing it to scale efficiently to large datasets.
# In[42]:
wordCounts = wordPairs.reduceByKey(lambda a,b: a + b)
print wordCounts.collect()
# In[43]:
# TEST Counting using reduceByKey (2c)
Test.assertEquals(sorted(wordCounts.collect()), [('cat', 2), ('elephant', 1), ('rat', 2)],
'incorrect value for wordCounts')
# #### ** (2d) All together **
# #### The expert version of the code performs the `map()` to pair RDD, `reduceByKey()` transformation, and `collect` in one statement.
# In[44]:
# TODO: Replace <FILL IN> with appropriate code
wordCountsCollected = (wordsRDD
.map(lambda w: (w, 1))
.reduceByKey(lambda a,b: a + b)
.collect())
print wordCountsCollected
# In[45]:
# TEST All together (2d)
Test.assertEquals(sorted(wordCountsCollected), [('cat', 2), ('elephant', 1), ('rat', 2)],
'incorrect value for wordCountsCollected')
# ### ** Part 3: Finding unique words and a mean value **
# #### ** (3a) Unique words **
# #### Calculate the number of unique words in `wordsRDD`. You can use other RDDs that you have already created to make this easier.
# In[68]:
# TODO: Replace <FILL IN> with appropriate code
uniqueWords = wordsRDD.distinct().count()
print uniqueWords
# In[69]:
# TEST Unique words (3a)
Test.assertEquals(uniqueWords, 3, 'incorrect count of uniqueWords')
# #### ** (3b) Mean using `reduce` **
# #### Find the mean number of words per unique word in `wordCounts`.
# #### Use a `reduce()` action to sum the counts in `wordCounts` and then divide by the number of unique words. First `map()` the pair RDD `wordCounts`, which consists of (key, value) pairs, to an RDD of values.
# In[70]:
from operator import add
totalCount = (wordCounts
.map(lambda (w, c): c)
.reduce(add))
average = totalCount / float(uniqueWords)
print totalCount
print round(average, 2)
# In[71]:
# TEST Mean using reduce (3b)
Test.assertEquals(round(average, 2), 1.67, 'incorrect value of average')
# ### ** Part 4: Apply word count to a file **
# #### In this section we will finish developing our word count application. We'll have to build the `wordCount` function, deal with real world problems like capitalization and punctuation, load in our data source, and compute the word count on the new data.
# #### ** (4a) `wordCount` function **
# #### First, define a function for word counting. You should reuse the techniques that have been covered in earlier parts of this lab. This function should take in an RDD that is a list of words like `wordsRDD` and return a pair RDD that has all of the words and their associated counts.
# In[78]:
from operator import add
def wordCount(wordListRDD):
"""Creates a pair RDD with word counts from an RDD of words.
Args:
wordListRDD (RDD of str): An RDD consisting of words.
Returns:
RDD of (str, int): An RDD consisting of (word, count) tuples.
"""
return wordListRDD.map(lambda w: (w, 1)).reduceByKey(add)
print wordCount(wordsRDD).collect()
# In[79]:
# TEST wordCount function (4a)
Test.assertEquals(sorted(wordCount(wordsRDD).collect()),
[('cat', 2), ('elephant', 1), ('rat', 2)],
'incorrect definition for wordCount function')
# #### ** (4b) Capitalization and punctuation **
# #### Real world files are more complicated than the data we have been using in this lab. Some of the issues we have to address are:
# + #### Words should be counted independent of their capitialization (e.g., Spark and spark should be counted as the same word).
# + #### All punctuation should be removed.
# + #### Any leading or trailing spaces on a line should be removed.
#
# #### Define the function `removePunctuation` that converts all text to lower case, removes any punctuation, and removes leading and trailing spaces. Use the Python [re](https://docs.python.org/2/library/re.html) module to remove any text that is not a letter, number, or space. Reading `help(re.sub)` might be useful.
# In[81]:
import re
def removePunctuation(text):
"""Removes punctuation, changes to lower case, and strips leading and trailing spaces.
Note:
Only spaces, letters, and numbers should be retained. Other characters should should be
eliminated (e.g. it's becomes its). Leading and trailing spaces should be removed after
punctuation is removed.
Args:
text (str): A string.
Returns:
str: The cleaned up string.
"""
pattern = re.compile("[^a-z0-9 ]")
return pattern.sub('', text.strip().lower())
print removePunctuation('Hi, you!')
print removePunctuation(' No under_score!')
# In[82]:
# TEST Capitalization and punctuation (4b)
Test.assertEquals(removePunctuation(" The Elephant's 4 cats. "),
'the elephants 4 cats',
'incorrect definition for removePunctuation function')
# #### ** (4c) Load a text file **
# #### For the next part of this lab, we will use the [Complete Works of William Shakespeare](http://www.gutenberg.org/ebooks/100) from [Project Gutenberg](http://www.gutenberg.org/wiki/Main_Page). To convert a text file into an RDD, we use the `SparkContext.textFile()` method. We also apply the recently defined `removePunctuation()` function using a `map()` transformation to strip out the punctuation and change all text to lowercase. Since the file is large we use `take(15)`, so that we only print 15 lines.
# In[83]:
# Just run this code
import os.path
baseDir = os.path.join('data')
inputPath = os.path.join('cs100', 'lab1', 'shakespeare.txt')
fileName = os.path.join(baseDir, inputPath)
shakespeareRDD = (sc
.textFile(fileName, 8)
.map(removePunctuation))
print '\n'.join(shakespeareRDD
.zipWithIndex() # to (line, lineNum)
.map(lambda (l, num): '{0}: {1}'.format(num, l)) # to 'lineNum: line'
.take(15))
# #### ** (4d) Words from lines **
# #### Before we can use the `wordcount()` function, we have to address two issues with the format of the RDD:
# + #### The first issue is that that we need to split each line by its spaces.
# + #### The second issue is we need to filter out empty lines.
#
# #### Apply a transformation that will split each element of the RDD by its spaces. For each element of the RDD, you should apply Python's string [split()](https://docs.python.org/2/library/string.html#string.split) function. You might think that a `map()` transformation is the way to do this, but think about what the result of the `split()` function will be.
# In[86]:
shakespeareWordsRDD = shakespeareRDD.flatMap(lambda l: l.split(' '))
shakespeareWordCount = shakespeareWordsRDD.count()
print shakespeareWordsRDD.top(5)
print shakespeareWordCount
# In[87]:
# TEST Words from lines (4d)
# This test allows for leading spaces to be removed either before or after
# punctuation is removed.
Test.assertTrue(shakespeareWordCount == 927631 or shakespeareWordCount == 928908,
'incorrect value for shakespeareWordCount')
Test.assertEquals(shakespeareWordsRDD.top(5),
[u'zwaggerd', u'zounds', u'zounds', u'zounds', u'zounds'],
'incorrect value for shakespeareWordsRDD')
# #### ** (4e) Remove empty elements **
# #### The next step is to filter out the empty elements. Remove all entries where the word is `''`.
# In[89]:
shakeWordsRDD = shakespeareWordsRDD.filter(lambda w: w != '')
shakeWordCount = shakeWordsRDD.count()
print shakeWordCount
# In[90]:
# TEST Remove empty elements (4e)
Test.assertEquals(shakeWordCount, 882996, 'incorrect value for shakeWordCount')
# #### ** (4f) Count the words **
# #### We now have an RDD that is only words. Next, let's apply the `wordCount()` function to produce a list of word counts. We can view the top 15 words by using the `takeOrdered()` action; however, since the elements of the RDD are pairs, we need a custom sort function that sorts using the value part of the pair.
# #### You'll notice that many of the words are common English words. These are called stopwords. In a later lab, we will see how to eliminate them from the results.
# #### Use the `wordCount()` function and `takeOrdered()` to obtain the fifteen most common words and their counts.
# In[92]:
# TODO: Replace <FILL IN> with appropriate code
top15WordsAndCounts = wordCount(shakeWordsRDD).takeOrdered(15, lambda (w, c): c * -1)
print '\n'.join(map(lambda (w, c): '{0}: {1}'.format(w, c), top15WordsAndCounts))
# In[93]:
# TEST Count the words (4f)
Test.assertEquals(top15WordsAndCounts,
[(u'the', 27361), (u'and', 26028), (u'i', 20681), (u'to', 19150), (u'of', 17463),
(u'a', 14593), (u'you', 13615), (u'my', 12481), (u'in', 10956), (u'that', 10890),
(u'is', 9134), (u'not', 8497), (u'with', 7771), (u'me', 7769), (u'it', 7678)],
'incorrect value for top15WordsAndCounts')
|
|
"""
Logging
-------
This module provides custom logging functionality for other amici modules
"""
import logging
import platform
import socket
import amici
import os
import warnings
import time
import functools
from inspect import getouterframes, currentframe
LOG_LEVEL_ENV_VAR = 'AMICI_LOG'
BASE_LOGGER_NAME = 'amici'
# Supported values for LOG_LEVEL_ENV_VAR
NAMED_LOG_LEVELS = {'NOTSET': logging.NOTSET,
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL}
from typing import Optional, Callable, Union
def _setup_logger(level: Optional[int] = logging.WARNING,
console_output: Optional[bool] = True,
file_output: Optional[bool] = False,
capture_warnings: Optional[bool] = True) -> logging.Logger:
"""
Set up a new logging.Logger for AMICI logging
:param level:
Logging level, typically using a constant like logging.INFO or
logging.DEBUG
:param console_output:
Set up a default console log handler if True (default)
:param file_output:
Supply a filename to copy all log output to that file, or
set to False to disable (default)
:param capture_warnings:
Capture warnings from Python's warnings module if True (default)
:return:
A :class:`logging.Logger` object for AMICI logging. Note that other
AMICI modules
should use a logger specific to their namespace instead by calling
:func:`get_logger`.
"""
log = logging.getLogger(BASE_LOGGER_NAME)
# Logging level can be overridden with environment variable
if LOG_LEVEL_ENV_VAR in os.environ:
try:
level = int(os.environ[LOG_LEVEL_ENV_VAR])
except ValueError:
# Try parsing as a name
level_name = os.environ[LOG_LEVEL_ENV_VAR]
if level_name in NAMED_LOG_LEVELS.keys():
level = NAMED_LOG_LEVELS[level_name]
else:
raise ValueError(f'Environment variable {LOG_LEVEL_ENV_VAR} '
f'contains an invalid value "{level_name}".'
f' If set, its value must be one of '
f'{", ".join(NAMED_LOG_LEVELS.keys())}'
f' (case-sensitive) or an integer log level.')
log.setLevel(level)
# Remove default logging handler
log.handlers = []
log_fmt = logging.Formatter('%(asctime)s.%(msecs).3d - %(name)s - '
'%(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
if console_output:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(log_fmt)
log.addHandler(stream_handler)
if file_output:
file_handler = logging.FileHandler(file_output)
file_handler.setFormatter(log_fmt)
log.addHandler(file_handler)
log.info('Logging started on AMICI version %s', amici.__version__)
log.debug('OS Platform: %s', platform.platform())
log.debug('Python version: %s', platform.python_version())
log.debug('Hostname: %s', socket.getfqdn())
logging.captureWarnings(capture_warnings)
return log
def set_log_level(logger: logging.Logger, log_level: Union[int, bool]) -> None:
if log_level is not None and log_level is not False:
if isinstance(log_level, bool):
log_level = logging.DEBUG
elif not isinstance(log_level, int):
raise ValueError('log_level must be a boolean, integer or None')
if logger.getEffectiveLevel() != log_level:
logger.debug('Changing log_level from %d to %d' % (
logger.getEffectiveLevel(), log_level))
logger.setLevel(log_level)
def get_logger(logger_name: Optional[str] = BASE_LOGGER_NAME,
log_level: Optional[int] = None,
**kwargs) -> logging.Logger:
"""
Returns (if extistant) or creates an AMICI logger
If the AMICI base logger has already been set up, this method will
return it or any of its descendant loggers without overriding the
settings - i.e. any values supplied as kwargs will be ignored.
:param logger_name:
Get a logger for a specific namespace, typically __name__
for code outside of classes or self.__module__ inside a class
:param log_level:
Override the default or preset log level for the requested logger.
None or False uses the default or preset value. True evaluates to
logging.DEBUG. Any integer is used directly.
:param console_output:
Set up a default console log handler if True (default). Only used when
the AMICI logger hasn't been set up yet.
:param file_output:
Supply a filename to copy all log output to that file, or set to
False to disable (default). Only used when the AMICI logger hasn't
been set up yet.
:param capture_warnings:
Capture warnings from Python's warnings module if True (default).
Only used when the AMICI logger hasn't been set up yet..
:return:
A logging.Logger object with the requested name
"""
if BASE_LOGGER_NAME not in logging.Logger.manager.loggerDict.keys():
_setup_logger(**kwargs)
elif kwargs:
warnings.warn('AMICI logger already exists, ignoring keyword '
'arguments to setup_logger')
logger = logging.getLogger(logger_name)
set_log_level(logger, log_level)
return logger
def log_execution_time(description: str, logger: logging.Logger) -> Callable:
"""
Parameterized function decorator that enables automatic execution time
tracking
:param description:
Description of what the decorated function does
:param logger:
Logger to which execution timing will be printed
"""
def decorator_timer(func):
@functools.wraps(func)
def wrapper_timer(*args, **kwargs):
# append pluses to indicate recursion level
recursion_level = sum(
frame.function == 'wrapper_timer'
and frame.filename == __file__
for frame in getouterframes(currentframe(), context=0)
)
recursion = ''
if recursion_level > 1:
recursion = '+' * (recursion_level - 1)
tstart = time.perf_counter()
rval = func(*args, **kwargs)
tend = time.perf_counter()
spacers = ' ' * max(54 - len(description) - len(logger.name) -
len(recursion), 0)
logger.info(f'Finished {description}{spacers}'
f'{recursion} ({(tend - tstart):.2E}s)')
return rval
return wrapper_timer
return decorator_timer
|
|
from decimal import Decimal
from django.contrib.gis.db.models.fields import GeometryField, RasterField
from django.contrib.gis.db.models.sql import AreaField
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import (
Area as AreaMeasure, Distance as DistanceMeasure,
)
from django.core.exceptions import FieldError
from django.db.models import BooleanField, FloatField, IntegerField, TextField
from django.db.models.expressions import Func, Value
from django.utils import six
NUMERIC_TYPES = six.integer_types + (float, Decimal)
class GeoFunc(Func):
function = None
output_field_class = None
geom_param_pos = 0
def __init__(self, *expressions, **extra):
if 'output_field' not in extra and self.output_field_class:
extra['output_field'] = self.output_field_class()
super(GeoFunc, self).__init__(*expressions, **extra)
@property
def name(self):
return self.__class__.__name__
@property
def srid(self):
expr = self.source_expressions[self.geom_param_pos]
if hasattr(expr, 'srid'):
return expr.srid
try:
return expr.field.srid
except (AttributeError, FieldError):
return None
@property
def geo_field(self):
return GeometryField(srid=self.srid) if self.srid else None
def as_sql(self, compiler, connection, **extra_context):
if self.function is None:
self.function = connection.ops.spatial_function_name(self.name)
if any(isinstance(field, RasterField) for field in self.get_source_fields()):
raise TypeError("Geometry functions not supported for raster fields.")
return super(GeoFunc, self).as_sql(compiler, connection, **extra_context)
def resolve_expression(self, *args, **kwargs):
res = super(GeoFunc, self).resolve_expression(*args, **kwargs)
base_srid = res.srid
if not base_srid:
raise TypeError("Geometry functions can only operate on geometric content.")
for pos, expr in enumerate(res.source_expressions[1:], start=1):
if isinstance(expr, GeomValue) and expr.srid != base_srid:
# Automatic SRID conversion so objects are comparable
res.source_expressions[pos] = Transform(expr, base_srid).resolve_expression(*args, **kwargs)
return res
def _handle_param(self, value, param_name='', check_types=None):
if not hasattr(value, 'resolve_expression'):
if check_types and not isinstance(value, check_types):
raise TypeError(
"The %s parameter has the wrong type: should be %s." % (
param_name, str(check_types))
)
return value
class GeomValue(Value):
geography = False
@property
def srid(self):
return self.value.srid
def as_sql(self, compiler, connection):
return '%s(%%s, %s)' % (connection.ops.from_text, self.srid), [connection.ops.Adapter(self.value)]
def as_mysql(self, compiler, connection):
return '%s(%%s)' % (connection.ops.from_text), [connection.ops.Adapter(self.value)]
def as_postgresql(self, compiler, connection):
if self.geography:
self.value = connection.ops.Adapter(self.value, geography=self.geography)
else:
self.value = connection.ops.Adapter(self.value)
return super(GeomValue, self).as_sql(compiler, connection)
class GeoFuncWithGeoParam(GeoFunc):
def __init__(self, expression, geom, *expressions, **extra):
if not isinstance(geom, Geometry):
raise TypeError("Please provide a geometry object.")
if not hasattr(geom, 'srid') or not geom.srid:
raise ValueError("Please provide a geometry attribute with a defined SRID.")
super(GeoFuncWithGeoParam, self).__init__(expression, GeomValue(geom), *expressions, **extra)
class SQLiteDecimalToFloatMixin(object):
"""
By default, Decimal values are converted to str by the SQLite backend, which
is not acceptable by the GIS functions expecting numeric values.
"""
def as_sqlite(self, compiler, connection):
for expr in self.get_source_expressions():
if hasattr(expr, 'value') and isinstance(expr.value, Decimal):
expr.value = float(expr.value)
return super(SQLiteDecimalToFloatMixin, self).as_sql(compiler, connection)
class OracleToleranceMixin(object):
tolerance = 0.05
def as_oracle(self, compiler, connection):
tol = self.extra.get('tolerance', self.tolerance)
self.template = "%%(function)s(%%(expressions)s, %s)" % tol
return super(OracleToleranceMixin, self).as_sql(compiler, connection)
class Area(OracleToleranceMixin, GeoFunc):
output_field_class = AreaField
arity = 1
def as_sql(self, compiler, connection, **extra_context):
if connection.ops.geography:
self.output_field.area_att = 'sq_m'
else:
# Getting the area units of the geographic field.
geo_field = self.geo_field
if geo_field.geodetic(connection):
if connection.features.supports_area_geodetic:
self.output_field.area_att = 'sq_m'
else:
# TODO: Do we want to support raw number areas for geodetic fields?
raise NotImplementedError('Area on geodetic coordinate systems not supported.')
else:
units_name = geo_field.units_name(connection)
if units_name:
self.output_field.area_att = AreaMeasure.unit_attname(units_name)
return super(Area, self).as_sql(compiler, connection, **extra_context)
def as_oracle(self, compiler, connection):
self.output_field = AreaField('sq_m') # Oracle returns area in units of meters.
return super(Area, self).as_oracle(compiler, connection)
def as_sqlite(self, compiler, connection, **extra_context):
if self.geo_field.geodetic(connection):
extra_context['template'] = '%(function)s(%(expressions)s, %(spheroid)d)'
extra_context['spheroid'] = True
return self.as_sql(compiler, connection, **extra_context)
class AsGeoJSON(GeoFunc):
output_field_class = TextField
def __init__(self, expression, bbox=False, crs=False, precision=8, **extra):
expressions = [expression]
if precision is not None:
expressions.append(self._handle_param(precision, 'precision', six.integer_types))
options = 0
if crs and bbox:
options = 3
elif bbox:
options = 1
elif crs:
options = 2
if options:
expressions.append(options)
super(AsGeoJSON, self).__init__(*expressions, **extra)
class AsGML(GeoFunc):
geom_param_pos = 1
output_field_class = TextField
def __init__(self, expression, version=2, precision=8, **extra):
expressions = [version, expression]
if precision is not None:
expressions.append(self._handle_param(precision, 'precision', six.integer_types))
super(AsGML, self).__init__(*expressions, **extra)
def as_oracle(self, compiler, connection, **extra_context):
source_expressions = self.get_source_expressions()
version = source_expressions[0]
clone = self.copy()
clone.set_source_expressions([source_expressions[1]])
extra_context['function'] = 'SDO_UTIL.TO_GML311GEOMETRY' if version.value == 3 else 'SDO_UTIL.TO_GMLGEOMETRY'
return super(AsGML, clone).as_sql(compiler, connection, **extra_context)
class AsKML(AsGML):
def as_sqlite(self, compiler, connection):
# No version parameter
clone = self.copy()
clone.set_source_expressions(self.get_source_expressions()[1:])
return clone.as_sql(compiler, connection)
class AsSVG(GeoFunc):
output_field_class = TextField
def __init__(self, expression, relative=False, precision=8, **extra):
relative = relative if hasattr(relative, 'resolve_expression') else int(relative)
expressions = [
expression,
relative,
self._handle_param(precision, 'precision', six.integer_types),
]
super(AsSVG, self).__init__(*expressions, **extra)
class BoundingCircle(OracleToleranceMixin, GeoFunc):
def __init__(self, expression, num_seg=48, **extra):
super(BoundingCircle, self).__init__(*[expression, num_seg], **extra)
def as_oracle(self, compiler, connection):
clone = self.copy()
clone.set_source_expressions([self.get_source_expressions()[0]])
return super(BoundingCircle, clone).as_oracle(compiler, connection)
class Centroid(OracleToleranceMixin, GeoFunc):
arity = 1
class Difference(OracleToleranceMixin, GeoFuncWithGeoParam):
arity = 2
class DistanceResultMixin(object):
def source_is_geography(self):
return self.get_source_fields()[0].geography and self.srid == 4326
def convert_value(self, value, expression, connection, context):
if value is None:
return None
geo_field = self.geo_field
if geo_field.geodetic(connection):
dist_att = 'm'
else:
units = geo_field.units_name(connection)
if units:
dist_att = DistanceMeasure.unit_attname(units)
else:
dist_att = None
if dist_att:
return DistanceMeasure(**{dist_att: value})
return value
class Distance(DistanceResultMixin, OracleToleranceMixin, GeoFuncWithGeoParam):
output_field_class = FloatField
spheroid = None
def __init__(self, expr1, expr2, spheroid=None, **extra):
expressions = [expr1, expr2]
if spheroid is not None:
self.spheroid = spheroid
expressions += (self._handle_param(spheroid, 'spheroid', bool),)
super(Distance, self).__init__(*expressions, **extra)
def as_postgresql(self, compiler, connection):
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
if self.source_is_geography():
# Set parameters as geography if base field is geography
for pos, expr in enumerate(
self.source_expressions[self.geom_param_pos + 1:], start=self.geom_param_pos + 1):
if isinstance(expr, GeomValue):
expr.geography = True
elif geo_field.geodetic(connection):
# Geometry fields with geodetic (lon/lat) coordinates need special distance functions
if self.spheroid:
# DistanceSpheroid is more accurate and resource intensive than DistanceSphere
self.function = connection.ops.spatial_function_name('DistanceSpheroid')
# Replace boolean param by the real spheroid of the base field
self.source_expressions[2] = Value(geo_field._spheroid)
else:
self.function = connection.ops.spatial_function_name('DistanceSphere')
return super(Distance, self).as_sql(compiler, connection)
def as_oracle(self, compiler, connection):
if self.spheroid:
self.source_expressions.pop(2)
return super(Distance, self).as_oracle(compiler, connection)
def as_sqlite(self, compiler, connection, **extra_context):
if self.spheroid:
self.source_expressions.pop(2)
if self.geo_field.geodetic(connection):
# SpatiaLite returns NULL instead of zero on geodetic coordinates
extra_context['template'] = 'COALESCE(%(function)s(%(expressions)s, %(spheroid)s), 0)'
extra_context['spheroid'] = int(bool(self.spheroid))
return super(Distance, self).as_sql(compiler, connection, **extra_context)
class Envelope(GeoFunc):
arity = 1
class ForceRHR(GeoFunc):
arity = 1
class GeoHash(GeoFunc):
output_field_class = TextField
def __init__(self, expression, precision=None, **extra):
expressions = [expression]
if precision is not None:
expressions.append(self._handle_param(precision, 'precision', six.integer_types))
super(GeoHash, self).__init__(*expressions, **extra)
class Intersection(OracleToleranceMixin, GeoFuncWithGeoParam):
arity = 2
class IsValid(OracleToleranceMixin, GeoFunc):
output_field_class = BooleanField
def as_oracle(self, compiler, connection, **extra_context):
sql, params = super(IsValid, self).as_oracle(compiler, connection, **extra_context)
return "CASE %s WHEN 'TRUE' THEN 1 ELSE 0 END" % sql, params
class Length(DistanceResultMixin, OracleToleranceMixin, GeoFunc):
output_field_class = FloatField
def __init__(self, expr1, spheroid=True, **extra):
self.spheroid = spheroid
super(Length, self).__init__(expr1, **extra)
def as_sql(self, compiler, connection):
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
if geo_field.geodetic(connection) and not connection.features.supports_length_geodetic:
raise NotImplementedError("This backend doesn't support Length on geodetic fields")
return super(Length, self).as_sql(compiler, connection)
def as_postgresql(self, compiler, connection):
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
if self.source_is_geography():
self.source_expressions.append(Value(self.spheroid))
elif geo_field.geodetic(connection):
# Geometry fields with geodetic (lon/lat) coordinates need length_spheroid
self.function = connection.ops.spatial_function_name('LengthSpheroid')
self.source_expressions.append(Value(geo_field._spheroid))
else:
dim = min(f.dim for f in self.get_source_fields() if f)
if dim > 2:
self.function = connection.ops.length3d
return super(Length, self).as_sql(compiler, connection)
def as_sqlite(self, compiler, connection):
geo_field = GeometryField(srid=self.srid)
if geo_field.geodetic(connection):
if self.spheroid:
self.function = 'GeodesicLength'
else:
self.function = 'GreatCircleLength'
return super(Length, self).as_sql(compiler, connection)
class MakeValid(GeoFunc):
pass
class MemSize(GeoFunc):
output_field_class = IntegerField
arity = 1
class NumGeometries(GeoFunc):
output_field_class = IntegerField
arity = 1
class NumPoints(GeoFunc):
output_field_class = IntegerField
arity = 1
def as_sql(self, compiler, connection):
if self.source_expressions[self.geom_param_pos].output_field.geom_type != 'LINESTRING':
if not connection.features.supports_num_points_poly:
raise TypeError('NumPoints can only operate on LineString content on this database.')
return super(NumPoints, self).as_sql(compiler, connection)
class Perimeter(DistanceResultMixin, OracleToleranceMixin, GeoFunc):
output_field_class = FloatField
arity = 1
def as_postgresql(self, compiler, connection):
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
if geo_field.geodetic(connection) and not self.source_is_geography():
raise NotImplementedError("ST_Perimeter cannot use a non-projected non-geography field.")
dim = min(f.dim for f in self.get_source_fields())
if dim > 2:
self.function = connection.ops.perimeter3d
return super(Perimeter, self).as_sql(compiler, connection)
def as_sqlite(self, compiler, connection):
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
if geo_field.geodetic(connection):
raise NotImplementedError("Perimeter cannot use a non-projected field.")
return super(Perimeter, self).as_sql(compiler, connection)
class PointOnSurface(OracleToleranceMixin, GeoFunc):
arity = 1
class Reverse(GeoFunc):
arity = 1
class Scale(SQLiteDecimalToFloatMixin, GeoFunc):
def __init__(self, expression, x, y, z=0.0, **extra):
expressions = [
expression,
self._handle_param(x, 'x', NUMERIC_TYPES),
self._handle_param(y, 'y', NUMERIC_TYPES),
]
if z != 0.0:
expressions.append(self._handle_param(z, 'z', NUMERIC_TYPES))
super(Scale, self).__init__(*expressions, **extra)
class SnapToGrid(SQLiteDecimalToFloatMixin, GeoFunc):
def __init__(self, expression, *args, **extra):
nargs = len(args)
expressions = [expression]
if nargs in (1, 2):
expressions.extend(
[self._handle_param(arg, '', NUMERIC_TYPES) for arg in args]
)
elif nargs == 4:
# Reverse origin and size param ordering
expressions.extend(
[self._handle_param(arg, '', NUMERIC_TYPES) for arg in args[2:]]
)
expressions.extend(
[self._handle_param(arg, '', NUMERIC_TYPES) for arg in args[0:2]]
)
else:
raise ValueError('Must provide 1, 2, or 4 arguments to `SnapToGrid`.')
super(SnapToGrid, self).__init__(*expressions, **extra)
class SymDifference(OracleToleranceMixin, GeoFuncWithGeoParam):
arity = 2
class Transform(GeoFunc):
def __init__(self, expression, srid, **extra):
expressions = [
expression,
self._handle_param(srid, 'srid', six.integer_types),
]
if 'output_field' not in extra:
extra['output_field'] = GeometryField(srid=srid)
super(Transform, self).__init__(*expressions, **extra)
@property
def srid(self):
# Make srid the resulting srid of the transformation
return self.source_expressions[self.geom_param_pos + 1].value
class Translate(Scale):
def as_sqlite(self, compiler, connection):
if len(self.source_expressions) < 4:
# Always provide the z parameter for ST_Translate
self.source_expressions.append(Value(0))
return super(Translate, self).as_sqlite(compiler, connection)
class Union(OracleToleranceMixin, GeoFuncWithGeoParam):
arity = 2
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.dataplex_v1.services.metadata_service import pagers
from google.cloud.dataplex_v1.types import metadata_
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import MetadataServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import MetadataServiceGrpcAsyncIOTransport
from .client import MetadataServiceClient
class MetadataServiceAsyncClient:
"""Metadata service manages metadata resources such as tables,
filesets and partitions.
"""
_client: MetadataServiceClient
DEFAULT_ENDPOINT = MetadataServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = MetadataServiceClient.DEFAULT_MTLS_ENDPOINT
entity_path = staticmethod(MetadataServiceClient.entity_path)
parse_entity_path = staticmethod(MetadataServiceClient.parse_entity_path)
partition_path = staticmethod(MetadataServiceClient.partition_path)
parse_partition_path = staticmethod(MetadataServiceClient.parse_partition_path)
zone_path = staticmethod(MetadataServiceClient.zone_path)
parse_zone_path = staticmethod(MetadataServiceClient.parse_zone_path)
common_billing_account_path = staticmethod(
MetadataServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
MetadataServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(MetadataServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(
MetadataServiceClient.parse_common_folder_path
)
common_organization_path = staticmethod(
MetadataServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
MetadataServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(MetadataServiceClient.common_project_path)
parse_common_project_path = staticmethod(
MetadataServiceClient.parse_common_project_path
)
common_location_path = staticmethod(MetadataServiceClient.common_location_path)
parse_common_location_path = staticmethod(
MetadataServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MetadataServiceAsyncClient: The constructed client.
"""
return MetadataServiceClient.from_service_account_info.__func__(MetadataServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MetadataServiceAsyncClient: The constructed client.
"""
return MetadataServiceClient.from_service_account_file.__func__(MetadataServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return MetadataServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> MetadataServiceTransport:
"""Returns the transport used by the client instance.
Returns:
MetadataServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(MetadataServiceClient).get_transport_class, type(MetadataServiceClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, MetadataServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the metadata service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.MetadataServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = MetadataServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def create_entity(
self,
request: Union[metadata_.CreateEntityRequest, dict] = None,
*,
parent: str = None,
entity: metadata_.Entity = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> metadata_.Entity:
r"""Create a metadata entity.
.. code-block:: python
from google.cloud import dataplex_v1
def sample_create_entity():
# Create a client
client = dataplex_v1.MetadataServiceClient()
# Initialize request argument(s)
entity = dataplex_v1.Entity()
entity.id = "id_value"
entity.type_ = "FILESET"
entity.asset = "asset_value"
entity.data_path = "data_path_value"
entity.system = "BIGQUERY"
entity.format_.mime_type = "mime_type_value"
entity.schema.user_managed = True
request = dataplex_v1.CreateEntityRequest(
parent="parent_value",
entity=entity,
)
# Make the request
response = client.create_entity(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataplex_v1.types.CreateEntityRequest, dict]):
The request object. Create a metadata entity request.
parent (:class:`str`):
Required. The resource name of the parent zone:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
entity (:class:`google.cloud.dataplex_v1.types.Entity`):
Required. Entity resource.
This corresponds to the ``entity`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataplex_v1.types.Entity:
Represents tables and fileset
metadata contained within a zone.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, entity])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_.CreateEntityRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if entity is not None:
request.entity = entity
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_entity,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def update_entity(
self,
request: Union[metadata_.UpdateEntityRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> metadata_.Entity:
r"""Update a metadata entity. Only supports full resource
update.
.. code-block:: python
from google.cloud import dataplex_v1
def sample_update_entity():
# Create a client
client = dataplex_v1.MetadataServiceClient()
# Initialize request argument(s)
entity = dataplex_v1.Entity()
entity.id = "id_value"
entity.type_ = "FILESET"
entity.asset = "asset_value"
entity.data_path = "data_path_value"
entity.system = "BIGQUERY"
entity.format_.mime_type = "mime_type_value"
entity.schema.user_managed = True
request = dataplex_v1.UpdateEntityRequest(
entity=entity,
)
# Make the request
response = client.update_entity(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataplex_v1.types.UpdateEntityRequest, dict]):
The request object. Update a metadata entity request.
The exiting entity will be fully replaced by the entity
in the request. The entity ID is mutable. To modify the
ID, use the current entity ID in the request URL and
specify the new ID in the request body.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataplex_v1.types.Entity:
Represents tables and fileset
metadata contained within a zone.
"""
# Create or coerce a protobuf request object.
request = metadata_.UpdateEntityRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_entity,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("entity.name", request.entity.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def delete_entity(
self,
request: Union[metadata_.DeleteEntityRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Delete a metadata entity.
.. code-block:: python
from google.cloud import dataplex_v1
def sample_delete_entity():
# Create a client
client = dataplex_v1.MetadataServiceClient()
# Initialize request argument(s)
request = dataplex_v1.DeleteEntityRequest(
name="name_value",
etag="etag_value",
)
# Make the request
client.delete_entity(request=request)
Args:
request (Union[google.cloud.dataplex_v1.types.DeleteEntityRequest, dict]):
The request object. Delete a metadata entity request.
name (:class:`str`):
Required. The resource name of the entity:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}/entities/{entity_id}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_.DeleteEntityRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_entity,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
await rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
async def get_entity(
self,
request: Union[metadata_.GetEntityRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> metadata_.Entity:
r"""Get a metadata entity.
.. code-block:: python
from google.cloud import dataplex_v1
def sample_get_entity():
# Create a client
client = dataplex_v1.MetadataServiceClient()
# Initialize request argument(s)
request = dataplex_v1.GetEntityRequest(
name="name_value",
)
# Make the request
response = client.get_entity(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataplex_v1.types.GetEntityRequest, dict]):
The request object. Get metadata entity request.
name (:class:`str`):
Required. The resource name of the entity:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}/entities/{entity_id}.``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataplex_v1.types.Entity:
Represents tables and fileset
metadata contained within a zone.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_.GetEntityRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_entity,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def list_entities(
self,
request: Union[metadata_.ListEntitiesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListEntitiesAsyncPager:
r"""List metadata entities in a zone.
.. code-block:: python
from google.cloud import dataplex_v1
def sample_list_entities():
# Create a client
client = dataplex_v1.MetadataServiceClient()
# Initialize request argument(s)
request = dataplex_v1.ListEntitiesRequest(
parent="parent_value",
view="FILESETS",
)
# Make the request
page_result = client.list_entities(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.dataplex_v1.types.ListEntitiesRequest, dict]):
The request object. List metadata entities request.
parent (:class:`str`):
Required. The resource name of the parent zone:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataplex_v1.services.metadata_service.pagers.ListEntitiesAsyncPager:
List metadata entities response.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_.ListEntitiesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_entities,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListEntitiesAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def create_partition(
self,
request: Union[metadata_.CreatePartitionRequest, dict] = None,
*,
parent: str = None,
partition: metadata_.Partition = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> metadata_.Partition:
r"""Create a metadata partition.
.. code-block:: python
from google.cloud import dataplex_v1
def sample_create_partition():
# Create a client
client = dataplex_v1.MetadataServiceClient()
# Initialize request argument(s)
partition = dataplex_v1.Partition()
partition.values = ['values_value_1', 'values_value_2']
partition.location = "location_value"
request = dataplex_v1.CreatePartitionRequest(
parent="parent_value",
partition=partition,
)
# Make the request
response = client.create_partition(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataplex_v1.types.CreatePartitionRequest, dict]):
The request object. Create metadata partition request.
parent (:class:`str`):
Required. The resource name of the parent zone:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}/entities/{entity_id}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
partition (:class:`google.cloud.dataplex_v1.types.Partition`):
Required. Partition resource.
This corresponds to the ``partition`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataplex_v1.types.Partition:
Represents partition metadata
contained within entity instances.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, partition])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_.CreatePartitionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if partition is not None:
request.partition = partition
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_partition,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def delete_partition(
self,
request: Union[metadata_.DeletePartitionRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Delete a metadata partition.
.. code-block:: python
from google.cloud import dataplex_v1
def sample_delete_partition():
# Create a client
client = dataplex_v1.MetadataServiceClient()
# Initialize request argument(s)
request = dataplex_v1.DeletePartitionRequest(
name="name_value",
)
# Make the request
client.delete_partition(request=request)
Args:
request (Union[google.cloud.dataplex_v1.types.DeletePartitionRequest, dict]):
The request object. Delete metadata partition request.
name (:class:`str`):
Required. The resource name of the partition. format:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}/entities/{entity_id}/partitions/{partition_value_path}``.
The {partition_value_path} segment consists of an
ordered sequence of partition values separated by "/".
All values must be provided.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_.DeletePartitionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_partition,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
await rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
async def get_partition(
self,
request: Union[metadata_.GetPartitionRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> metadata_.Partition:
r"""Get a metadata partition of an entity.
.. code-block:: python
from google.cloud import dataplex_v1
def sample_get_partition():
# Create a client
client = dataplex_v1.MetadataServiceClient()
# Initialize request argument(s)
request = dataplex_v1.GetPartitionRequest(
name="name_value",
)
# Make the request
response = client.get_partition(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataplex_v1.types.GetPartitionRequest, dict]):
The request object. Get metadata partition request.
name (:class:`str`):
Required. The resource name of the partition:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}/entities/{entity_id}/partitions/{partition_value_path}``.
The {partition_value_path} segment consists of an
ordered sequence of partition values separated by "/".
All values must be provided.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataplex_v1.types.Partition:
Represents partition metadata
contained within entity instances.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_.GetPartitionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_partition,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def list_partitions(
self,
request: Union[metadata_.ListPartitionsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListPartitionsAsyncPager:
r"""List metadata partitions of an entity.
.. code-block:: python
from google.cloud import dataplex_v1
def sample_list_partitions():
# Create a client
client = dataplex_v1.MetadataServiceClient()
# Initialize request argument(s)
request = dataplex_v1.ListPartitionsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_partitions(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.dataplex_v1.types.ListPartitionsRequest, dict]):
The request object. List metadata partitions request.
parent (:class:`str`):
Required. The resource name of the parent entity:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}/entities/{entity_id}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataplex_v1.services.metadata_service.pagers.ListPartitionsAsyncPager:
List metadata partitions response.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_.ListPartitionsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_partitions,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListPartitionsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-dataplex",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("MetadataServiceAsyncClient",)
|
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: rpc.py
"""RPC Implemention, originally written for the Python Idle IDE
For security reasons, GvR requested that Idle's Python execution server process
connect to the Idle process, which listens for the connection. Since Idle has
has only one client per server, this was not a limitation.
+---------------------------------+ +-------------+
| SocketServer.BaseRequestHandler | | SocketIO |
+---------------------------------+ +-------------+
^ | register() |
| | unregister()|
| +-------------+
| ^ ^
| | |
| + -------------------+ |
| | |
+-------------------------+ +-----------------+
| RPCHandler | | RPCClient |
| [attribute of RPCServer]| | |
+-------------------------+ +-----------------+
The RPCServer handler class is expected to provide register/unregister methods.
RPCHandler inherits the mix-in class SocketIO, which provides these methods.
See the Idle run.main() docstring for further information on how this was
accomplished in Idle.
"""
import sys
import os
import socket
import select
import SocketServer
import struct
import cPickle as pickle
import threading
import Queue
import traceback
import copy_reg
import types
import marshal
def unpickle_code(ms):
co = marshal.loads(ms)
return co
def pickle_code(co):
ms = marshal.dumps(co)
return (
unpickle_code, (ms,))
copy_reg.pickle(types.CodeType, pickle_code, unpickle_code)
BUFSIZE = 8 * 1024
LOCALHOST = '127.0.0.1'
class RPCServer(SocketServer.TCPServer):
def __init__(self, addr, handlerclass=None):
if handlerclass is None:
handlerclass = RPCHandler
SocketServer.TCPServer.__init__(self, addr, handlerclass)
return
def server_bind(self):
"""Override TCPServer method, no bind() phase for connecting entity"""
pass
def server_activate(self):
"""Override TCPServer method, connect() instead of listen()
Due to the reversed connection, self.server_address is actually the
address of the Idle Client to which we are connecting.
"""
self.socket.connect(self.server_address)
def get_request(self):
"""Override TCPServer method, return already connected socket"""
return (
self.socket, self.server_address)
def handle_error(self, request, client_address):
"""Override TCPServer method
Error message goes to __stderr__. No error message if exiting
normally or socket raised EOF. Other exceptions not handled in
server code will cause os._exit.
"""
try:
raise
except SystemExit:
raise
except:
erf = sys.__stderr__
print >> erf, '\n' + '-' * 40
print >> erf, 'Unhandled server exception!'
print >> erf, 'Thread: %s' % threading.currentThread().getName()
print >> erf, 'Client Address: ', client_address
print >> erf, 'Request: ', repr(request)
traceback.print_exc(file=erf)
print >> erf, '\n*** Unrecoverable, server exiting!'
print >> erf, '-' * 40
os._exit(0)
objecttable = {}
request_queue = Queue.Queue(0)
response_queue = Queue.Queue(0)
class SocketIO(object):
nextseq = 0
def __init__(self, sock, objtable=None, debugging=None):
self.sockthread = threading.currentThread()
if debugging is not None:
self.debugging = debugging
self.sock = sock
if objtable is None:
objtable = objecttable
self.objtable = objtable
self.responses = {}
self.cvars = {}
return
def close(self):
sock = self.sock
self.sock = None
if sock is not None:
sock.close()
return
def exithook(self):
"""override for specific exit action"""
os._exit()
def debug(self, *args):
if not self.debugging:
return
s = self.location + ' ' + str(threading.currentThread().getName())
for a in args:
s = s + ' ' + str(a)
print >> sys.__stderr__, s
def register(self, oid, object):
self.objtable[oid] = object
def unregister(self, oid):
try:
del self.objtable[oid]
except KeyError:
pass
def localcall(self, seq, request):
self.debug('localcall:', request)
try:
how, (oid, methodname, args, kwargs) = request
except TypeError:
return ('ERROR', 'Bad request format')
if oid not in self.objtable:
return ('ERROR', 'Unknown object id: %r' % (oid,))
else:
obj = self.objtable[oid]
if methodname == '__methods__':
methods = {}
_getmethods(obj, methods)
return (
'OK', methods)
if methodname == '__attributes__':
attributes = {}
_getattributes(obj, attributes)
return (
'OK', attributes)
if not hasattr(obj, methodname):
return ('ERROR', 'Unsupported method name: %r' % (methodname,))
method = getattr(obj, methodname)
try:
if how == 'CALL':
ret = method(*args, **kwargs)
if isinstance(ret, RemoteObject):
ret = remoteref(ret)
return ('OK', ret)
if how == 'QUEUE':
request_queue.put((seq, (method, args, kwargs)))
return ('QUEUED', None)
return (
'ERROR', 'Unsupported message type: %s' % how)
except SystemExit:
raise
except socket.error:
raise
except:
msg = '*** Internal Error: rpc.py:SocketIO.localcall()\n\n Object: %s \n Method: %s \n Args: %s\n'
print >> sys.__stderr__, msg % (oid, method, args)
traceback.print_exc(file=sys.__stderr__)
return ('EXCEPTION', None)
return None
def remotecall(self, oid, methodname, args, kwargs):
self.debug('remotecall:asynccall: ', oid, methodname)
seq = self.asynccall(oid, methodname, args, kwargs)
return self.asyncreturn(seq)
def remotequeue(self, oid, methodname, args, kwargs):
self.debug('remotequeue:asyncqueue: ', oid, methodname)
seq = self.asyncqueue(oid, methodname, args, kwargs)
return self.asyncreturn(seq)
def asynccall(self, oid, methodname, args, kwargs):
request = (
'CALL', (oid, methodname, args, kwargs))
seq = self.newseq()
if threading.currentThread() != self.sockthread:
cvar = threading.Condition()
self.cvars[seq] = cvar
self.debug('asynccall:%d:' % seq, oid, methodname, args, kwargs)
self.putmessage((seq, request))
return seq
def asyncqueue(self, oid, methodname, args, kwargs):
request = (
'QUEUE', (oid, methodname, args, kwargs))
seq = self.newseq()
if threading.currentThread() != self.sockthread:
cvar = threading.Condition()
self.cvars[seq] = cvar
self.debug('asyncqueue:%d:' % seq, oid, methodname, args, kwargs)
self.putmessage((seq, request))
return seq
def asyncreturn(self, seq):
self.debug('asyncreturn:%d:call getresponse(): ' % seq)
response = self.getresponse(seq, wait=0.05)
self.debug('asyncreturn:%d:response: ' % seq, response)
return self.decoderesponse(response)
def decoderesponse(self, response):
how, what = response
if how == 'OK':
return what
else:
if how == 'QUEUED':
return None
if how == 'EXCEPTION':
self.debug('decoderesponse: EXCEPTION')
return None
if how == 'EOF':
self.debug('decoderesponse: EOF')
self.decode_interrupthook()
return None
if how == 'ERROR':
self.debug('decoderesponse: Internal ERROR:', what)
raise RuntimeError, what
raise SystemError, (how, what)
return None
def decode_interrupthook(self):
""""""
raise EOFError
def mainloop(self):
"""Listen on socket until I/O not ready or EOF
pollresponse() will loop looking for seq number None, which
never comes, and exit on EOFError.
"""
try:
self.getresponse(myseq=None, wait=0.05)
except EOFError:
self.debug('mainloop:return')
return
return
def getresponse(self, myseq, wait):
response = self._getresponse(myseq, wait)
if response is not None:
how, what = response
if how == 'OK':
response = (
how, self._proxify(what))
return response
def _proxify(self, obj):
if isinstance(obj, RemoteProxy):
return RPCProxy(self, obj.oid)
if isinstance(obj, types.ListType):
return map(self._proxify, obj)
return obj
def _getresponse(self, myseq, wait):
self.debug('_getresponse:myseq:', myseq)
if threading.currentThread() is self.sockthread:
while 1:
response = self.pollresponse(myseq, wait)
if response is not None:
return response
else:
cvar = self.cvars[myseq]
cvar.acquire()
while myseq not in self.responses:
cvar.wait()
response = self.responses[myseq]
self.debug('_getresponse:%s: thread woke up: response: %s' % (
myseq, response))
del self.responses[myseq]
del self.cvars[myseq]
cvar.release()
return response
return
def newseq(self):
self.nextseq = seq = self.nextseq + 2
return seq
def putmessage(self, message):
self.debug('putmessage:%d:' % message[0])
try:
s = pickle.dumps(message)
except pickle.PicklingError:
print >> sys.__stderr__, 'Cannot pickle:', repr(message)
raise
s = struct.pack('<i', len(s)) + s
while len(s) > 0:
try:
r, w, x = select.select([], [self.sock], [])
n = self.sock.send(s[:BUFSIZE])
except (AttributeError, TypeError):
raise IOError, 'socket no longer exists'
except socket.error:
raise
else:
s = s[n:]
buffer = ''
bufneed = 4
bufstate = 0
def pollpacket(self, wait):
self._stage0()
if len(self.buffer) < self.bufneed:
r, w, x = select.select([self.sock.fileno()], [], [], wait)
if len(r) == 0:
return None
try:
s = self.sock.recv(BUFSIZE)
except socket.error:
raise EOFError
if len(s) == 0:
raise EOFError
self.buffer += s
self._stage0()
return self._stage1()
def _stage0(self):
if self.bufstate == 0 and len(self.buffer) >= 4:
s = self.buffer[:4]
self.buffer = self.buffer[4:]
self.bufneed = struct.unpack('<i', s)[0]
self.bufstate = 1
def _stage1(self):
if self.bufstate == 1 and len(self.buffer) >= self.bufneed:
packet = self.buffer[:self.bufneed]
self.buffer = self.buffer[self.bufneed:]
self.bufneed = 4
self.bufstate = 0
return packet
def pollmessage(self, wait):
packet = self.pollpacket(wait)
if packet is None:
return
else:
try:
message = pickle.loads(packet)
except pickle.UnpicklingError:
print >> sys.__stderr__, '-----------------------'
print >> sys.__stderr__, 'cannot unpickle packet:', repr(packet)
traceback.print_stack(file=sys.__stderr__)
print >> sys.__stderr__, '-----------------------'
raise
return message
def pollresponse--- This code section failed: ---
412 0 SETUP_LOOP 439 'to 442'
414 3 SETUP_EXCEPT 19 'to 25'
415 6 LOAD_GLOBAL 0 'response_queue'
9 LOAD_ATTR 1 'get'
12 LOAD_CONST 1 ''
15 CALL_FUNCTION_1 1
18 STORE_FAST 3 'qmsg'
21 POP_BLOCK
22 JUMP_FORWARD 20 'to 45'
25_0 COME_FROM '3'
416 25 DUP_TOP
26 LOAD_GLOBAL 2 'Queue'
29 LOAD_ATTR 3 'Empty'
32 COMPARE_OP 10 'exception match'
35 POP_JUMP_IF_FALSE 44 'to 44'
38 POP_TOP
39 POP_TOP
40 POP_TOP
417 41 JUMP_FORWARD 44 'to 88'
44 END_FINALLY
45_0 COME_FROM '22'
419 45 LOAD_FAST 3 'qmsg'
48 UNPACK_SEQUENCE_2 2
51 STORE_FAST 4 'seq'
54 STORE_FAST 5 'response'
420 57 LOAD_FAST 4 'seq'
60 LOAD_CONST 2 'OK'
63 LOAD_FAST 5 'response'
66 BUILD_TUPLE_2 2
69 BUILD_TUPLE_2 2
72 STORE_FAST 6 'message'
421 75 LOAD_FAST 0 'self'
78 LOAD_ATTR 4 'putmessage'
81 LOAD_FAST 6 'message'
84 CALL_FUNCTION_1 1
87 POP_TOP
88_0 COME_FROM '44'
423 88 SETUP_EXCEPT 35 'to 126'
424 91 LOAD_FAST 0 'self'
94 LOAD_ATTR 5 'pollmessage'
97 LOAD_FAST 2 'wait'
100 CALL_FUNCTION_1 1
103 STORE_FAST 6 'message'
425 106 LOAD_FAST 6 'message'
109 LOAD_CONST 8 ''
112 COMPARE_OP 8 'is'
115 POP_JUMP_IF_FALSE 122 'to 122'
426 118 LOAD_CONST 8 ''
121 RETURN_END_IF
122_0 COME_FROM '115'
122 POP_BLOCK
123 JUMP_FORWARD 45 'to 171'
126_0 COME_FROM '88'
427 126 DUP_TOP
127 LOAD_GLOBAL 7 'EOFError'
130 COMPARE_OP 10 'exception match'
133 POP_JUMP_IF_FALSE 153 'to 153'
136 POP_TOP
137 POP_TOP
138 POP_TOP
428 139 LOAD_FAST 0 'self'
142 LOAD_ATTR 8 'handle_EOF'
145 CALL_FUNCTION_0 0
148 POP_TOP
429 149 LOAD_CONST 8 ''
152 RETURN_VALUE
430 153 DUP_TOP
154 LOAD_GLOBAL 9 'AttributeError'
157 COMPARE_OP 10 'exception match'
160 POP_JUMP_IF_FALSE 170 'to 170'
163 POP_TOP
164 POP_TOP
165 POP_TOP
431 166 LOAD_CONST 8 ''
169 RETURN_VALUE
170 END_FINALLY
171_0 COME_FROM '170'
171_1 COME_FROM '123'
432 171 LOAD_FAST 6 'message'
174 UNPACK_SEQUENCE_2 2
177 STORE_FAST 4 'seq'
180 STORE_FAST 7 'resq'
433 183 LOAD_FAST 7 'resq'
186 LOAD_CONST 1 ''
189 BINARY_SUBSCR
190 STORE_FAST 8 'how'
434 193 LOAD_FAST 0 'self'
196 LOAD_ATTR 10 'debug'
199 LOAD_CONST 3 'pollresponse:%d:myseq:%s'
202 LOAD_FAST 4 'seq'
205 LOAD_FAST 1 'myseq'
208 BUILD_TUPLE_2 2
211 BINARY_MODULO
212 CALL_FUNCTION_1 1
215 POP_TOP
436 216 LOAD_FAST 8 'how'
219 LOAD_CONST 9 ('CALL', 'QUEUE')
222 COMPARE_OP 6 'in'
225 POP_JUMP_IF_FALSE 341 'to 341'
437 228 LOAD_FAST 0 'self'
231 LOAD_ATTR 10 'debug'
234 LOAD_CONST 6 'pollresponse:%d:localcall:call:'
237 LOAD_FAST 4 'seq'
240 BINARY_MODULO
241 CALL_FUNCTION_1 1
244 POP_TOP
438 245 LOAD_FAST 0 'self'
248 LOAD_ATTR 11 'localcall'
251 LOAD_FAST 4 'seq'
254 LOAD_FAST 7 'resq'
257 CALL_FUNCTION_2 2
260 STORE_FAST 5 'response'
439 263 LOAD_FAST 0 'self'
266 LOAD_ATTR 10 'debug'
269 LOAD_CONST 7 'pollresponse:%d:localcall:response:%s'
440 272 LOAD_FAST 4 'seq'
275 LOAD_FAST 5 'response'
278 BUILD_TUPLE_2 2
281 BINARY_MODULO
282 CALL_FUNCTION_1 1
285 POP_TOP
441 286 LOAD_FAST 8 'how'
289 LOAD_CONST 4 'CALL'
292 COMPARE_OP 2 '=='
295 POP_JUMP_IF_FALSE 320 'to 320'
442 298 LOAD_FAST 0 'self'
301 LOAD_ATTR 4 'putmessage'
304 LOAD_FAST 4 'seq'
307 LOAD_FAST 5 'response'
310 BUILD_TUPLE_2 2
313 CALL_FUNCTION_1 1
316 POP_TOP
317 JUMP_BACK 3 'to 3'
443 320 LOAD_FAST 8 'how'
323 LOAD_CONST 5 'QUEUE'
326 COMPARE_OP 2 '=='
329 POP_JUMP_IF_FALSE 3 'to 3'
445 332 CONTINUE 3 'to 3'
446 335 CONTINUE 3 'to 3'
338 JUMP_BACK 3 'to 3'
448 341 LOAD_FAST 4 'seq'
344 LOAD_FAST 1 'myseq'
347 COMPARE_OP 2 '=='
350 POP_JUMP_IF_FALSE 357 'to 357'
449 353 LOAD_FAST 7 'resq'
356 RETURN_END_IF
357_0 COME_FROM '350'
452 357 LOAD_FAST 0 'self'
360 LOAD_ATTR 12 'cvars'
363 LOAD_ATTR 1 'get'
366 LOAD_FAST 4 'seq'
369 LOAD_CONST 8 ''
372 CALL_FUNCTION_2 2
375 STORE_FAST 9 'cv'
455 378 LOAD_FAST 9 'cv'
381 LOAD_CONST 8 ''
384 COMPARE_OP 9 'is not'
387 POP_JUMP_IF_FALSE 3 'to 3'
456 390 LOAD_FAST 9 'cv'
393 LOAD_ATTR 13 'acquire'
396 CALL_FUNCTION_0 0
399 POP_TOP
457 400 LOAD_FAST 7 'resq'
403 LOAD_FAST 0 'self'
406 LOAD_ATTR 14 'responses'
409 LOAD_FAST 4 'seq'
412 STORE_SUBSCR
458 413 LOAD_FAST 9 'cv'
416 LOAD_ATTR 15 'notify'
419 CALL_FUNCTION_0 0
422 POP_TOP
459 423 LOAD_FAST 9 'cv'
426 LOAD_ATTR 16 'release'
429 CALL_FUNCTION_0 0
432 POP_TOP
433 JUMP_BACK 3 'to 3'
460 436 CONTINUE 3 'to 3'
439 JUMP_BACK 3 'to 3'
442_0 COME_FROM '0'
442 LOAD_CONST 8 ''
445 RETURN_VALUE
Parse error at or near `COME_FROM' instruction at offset 442_0
def handle_EOF(self):
"""action taken upon link being closed by peer"""
self.EOFhook()
self.debug('handle_EOF')
for key in self.cvars:
cv = self.cvars[key]
cv.acquire()
self.responses[key] = ('EOF', None)
cv.notify()
cv.release()
self.exithook()
return None
def EOFhook(self):
"""Classes using rpc client/server can override to augment EOF action"""
pass
class RemoteObject(object):
pass
def remoteref(obj):
oid = id(obj)
objecttable[oid] = obj
return RemoteProxy(oid)
class RemoteProxy(object):
def __init__(self, oid):
self.oid = oid
class RPCHandler(SocketServer.BaseRequestHandler, SocketIO):
debugging = False
location = '#S'
def __init__(self, sock, addr, svr):
svr.current_handler = self
SocketIO.__init__(self, sock)
SocketServer.BaseRequestHandler.__init__(self, sock, addr, svr)
def handle(self):
"""handle() method required by SocketServer"""
self.mainloop()
def get_remote_proxy(self, oid):
return RPCProxy(self, oid)
class RPCClient(SocketIO):
debugging = False
location = '#C'
nextseq = 1
def __init__(self, address, family=socket.AF_INET, type=socket.SOCK_STREAM):
self.listening_sock = socket.socket(family, type)
self.listening_sock.bind(address)
self.listening_sock.listen(1)
def accept(self):
working_sock, address = self.listening_sock.accept()
if self.debugging:
print >> sys.__stderr__, '****** Connection request from ', address
if address[0] == LOCALHOST:
SocketIO.__init__(self, working_sock)
else:
print >> sys.__stderr__, '** Invalid host: ', address
raise socket.error
def get_remote_proxy(self, oid):
return RPCProxy(self, oid)
class RPCProxy(object):
__methods = None
__attributes = None
def __init__(self, sockio, oid):
self.sockio = sockio
self.oid = oid
def __getattr__(self, name):
if self.__methods is None:
self.__getmethods()
if self.__methods.get(name):
return MethodProxy(self.sockio, self.oid, name)
else:
if self.__attributes is None:
self.__getattributes()
if name in self.__attributes:
value = self.sockio.remotecall(self.oid, '__getattribute__', (
name,), {})
return value
raise AttributeError, name
return
def __getattributes(self):
self.__attributes = self.sockio.remotecall(self.oid, '__attributes__', (), {})
def __getmethods(self):
self.__methods = self.sockio.remotecall(self.oid, '__methods__', (), {})
def _getmethods(obj, methods):
for name in dir(obj):
attr = getattr(obj, name)
if hasattr(attr, '__call__'):
methods[name] = 1
if type(obj) == types.InstanceType:
_getmethods(obj.__class__, methods)
if type(obj) == types.ClassType:
for super in obj.__bases__:
_getmethods(super, methods)
def _getattributes(obj, attributes):
for name in dir(obj):
attr = getattr(obj, name)
if not hasattr(attr, '__call__'):
attributes[name] = 1
class MethodProxy(object):
def __init__(self, sockio, oid, name):
self.sockio = sockio
self.oid = oid
self.name = name
def __call__(self, *args, **kwargs):
value = self.sockio.remotecall(self.oid, self.name, args, kwargs)
return value
|
|
from collections import deque, OrderedDict
import logging
import queue
import signal
import threading
import time
from .sqs import SQSDeleteThread, SQSReceiveThread
logger = logging.getLogger("zentral.core.queues.backends.aws_sns_sqs.consumer")
# BaseConsumer
class BaseConsumer:
def __init__(self, queue_url, client_kwargs=None):
if client_kwargs is None:
client_kwargs = {}
self.process_message_queue = queue.Queue(maxsize=15)
self.delete_message_queue = queue.Queue(maxsize=15)
self.stop_receiving_event = threading.Event()
self.stop_event = threading.Event()
self._threads = [
SQSReceiveThread(queue_url, self.stop_receiving_event, self.process_message_queue, client_kwargs),
SQSDeleteThread(queue_url, self.stop_event, self.delete_message_queue, client_kwargs)
]
def _handle_signal(self, signum, frame):
if signum == signal.SIGTERM:
signum = "SIGTERM"
elif signum == signal.SIGINT:
signum = "SIGINT"
logger.debug("received signal %s", signum)
if not self.stop_receiving_event.is_set():
logger.error("signal %s - stop receiving events", signum)
self.stop_receiving_event.set()
def run(self, *args, **kwargs):
exit_status = 0
signal.signal(signal.SIGTERM, self._handle_signal)
signal.signal(signal.SIGINT, self._handle_signal)
for thread in self._threads:
thread.start()
try:
self.start_run_loop()
except Exception:
exit_status = 1
logger.exception("%s: run loop exception", self.name)
if not self.stop_receiving_event.is_set():
logger.error("%s: stop receiving", self.name)
self.stop_receiving_event.set()
# graceful stop
if not self.stop_event.is_set():
logger.error("Set stop event")
self.stop_event.set()
for thread in self._threads:
thread.join()
logger.error("All threads stopped.")
return exit_status
def start_run_loop(self):
raise NotImplementedError
def skip_event(self, receipt_handle, event_d):
# to override in the sub-classes if necessary
return False
# Consumer
class Consumer(BaseConsumer):
def start_run_loop(self):
while True:
try:
receipt_handle, routing_key, event_d = self.process_message_queue.get(block=True, timeout=1)
except queue.Empty:
logger.debug("no new event to process")
if self.stop_receiving_event.is_set():
break
else:
if self.skip_event(receipt_handle, event_d):
logger.debug("receipt handle %s: event skipped", receipt_handle[-7:])
else:
logger.debug("receipt handle %s: process new event", receipt_handle[-7:])
self.process_event(routing_key, event_d)
logger.debug("receipt handle %s: queue for deletion", receipt_handle[-7:])
self.delete_message_queue.put((receipt_handle, time.monotonic()))
def process_event(self, routing_key, event_d):
# to be implemented in the sub-classes
raise NotImplementedError
# ConcurrentConsumer
class ConcurrentConsumerFinalThread(threading.Thread):
def __init__(self, concurrent_consumer):
self.processed_event_queue = concurrent_consumer.processed_event_queue
self.delete_message_queue = concurrent_consumer.delete_message_queue
self.stop_event = concurrent_consumer.stop_event
self.update_metrics_cb = concurrent_consumer.update_metrics
super().__init__(name="ConcurrentConsumer final thread")
def run(self):
while True:
try:
receipt_handle, success, event_type, process_time = self.processed_event_queue.get(block=True,
timeout=1)
except queue.Empty:
logger.debug("[%s] no new processed event", self.name)
if self.stop_event.is_set():
logger.info("[%s] graceful exit", self.name)
break
else:
if success:
logger.debug("[%s] receipt handle %s: new processed event", self.name, receipt_handle[-7:])
self.delete_message_queue.put((receipt_handle, time.monotonic()))
else:
logger.error("[%s] receipt handle %s: could not process event", self.name, receipt_handle[-7:])
self.update_metrics_cb(success, event_type, process_time)
class ConcurrentConsumer(BaseConsumer):
def __init__(self, queue_url, concurrency, client_kwargs=None):
super().__init__(queue_url, client_kwargs)
self.concurrency = concurrency
self.process_event_queue = queue.Queue(maxsize=concurrency)
self.processed_event_queue = queue.Queue(maxsize=concurrency)
process_thread_constructor = self.get_process_thread_constructor()
for i in range(concurrency):
self._threads.append(
process_thread_constructor(
i + 1,
self.process_event_queue,
self.processed_event_queue,
self.stop_event
)
)
self._threads.append(ConcurrentConsumerFinalThread(self))
def start_run_loop(self):
while True:
try:
receipt_handle, routing_key, event_d = self.process_message_queue.get(block=True, timeout=1)
except queue.Empty:
logger.debug("no new event to process")
if self.stop_receiving_event.is_set():
break
else:
if self.skip_event(receipt_handle, event_d):
logger.debug("receipt handle %s: event skipped", receipt_handle[-7:])
self.delete_message_queue.put((receipt_handle, time.monotonic()))
else:
logger.debug("receipt handle %s: queue new event", receipt_handle[-7:])
self.process_event_queue.put((receipt_handle, routing_key, event_d))
# BatchConsumer
class BatchConsumer(BaseConsumer):
max_event_age_seconds = 5
def __init__(self, queue_url, batch_size, client_kwargs=None):
super().__init__(queue_url, client_kwargs)
self.batch_size = batch_size
self.batch = deque()
self.batch_start_ts = None
def start_run_loop(self):
while True:
try:
receipt_handle, routing_key, event_d = self.process_message_queue.get(block=True, timeout=1)
except queue.Empty:
logger.debug("no new event to process")
if self.batch:
if self.stop_receiving_event.is_set():
logger.debug("process events before graceful exit")
self._process_batch()
elif time.monotonic() > self.batch_start_ts + self.max_event_age_seconds:
logger.debug("process events because max event age reached")
self._process_batch()
if self.stop_receiving_event.is_set():
break
else:
if self.skip_event(receipt_handle, event_d):
logger.debug("receipt handle %s: event skipped", receipt_handle[-7:])
self.delete_message_queue.put((receipt_handle, time.monotonic()))
if self.batch and time.monotonic() > self.batch_start_ts + self.max_event_age_seconds:
logger.debug("process events because max event age reached")
self._process_batch()
else:
logger.debug("receipt handle %s: queue new event for batch processing", receipt_handle[-7:])
self.batch.append((receipt_handle, routing_key, event_d))
if self.batch_start_ts is None:
self.batch_start_ts = time.monotonic()
if len(self.batch) >= self.batch_size:
self._process_batch()
def _process_batch(self):
for receipt_handle in self.process_events(self.batch):
self.delete_message_queue.put((receipt_handle, time.monotonic()))
self.batch_start_ts = None
def process_events(self, batch):
# to be implemented in the sub-classes
# must be an iterator yielding the receipt handles to acknowledge
raise NotImplementedError
# ConsumerProducer
class ConsumerProducerFinalThread(threading.Thread):
def __init__(self, consumer_producer):
self.stop_event = consumer_producer.stop_event
self.in_queue = consumer_producer.published_message_queue
self.out_queue = consumer_producer.delete_message_queue
self.callback = consumer_producer.decrement_receipt_handle_unpublished_event_count
super().__init__(name="Consumer/Producer final thread")
def run(self):
while True:
try:
receipt_handle = self.in_queue.get(block=True, timeout=1)
except queue.Empty:
logger.debug("[%s] no new published event", self.name)
if self.stop_event.is_set():
logger.info("[%s] graceful exit", self.name)
break
else:
logger.debug("[%s] receipt handle %s: new published event", self.name, receipt_handle[-7:])
if self.callback(receipt_handle):
logger.debug("[%s] receipt handle %s: no more unpublished events", self.name, receipt_handle[-7:])
self.out_queue.put((receipt_handle, time.monotonic()))
else:
logger.debug("[%s] receipt handle %s: still waiting for some unpublished events",
self.name, receipt_handle[-7:])
class ConsumerProducer(BaseConsumer):
max_in_flight_receipt_handle_count = 100
def __init__(self, queue_url, client_kwargs=None):
super().__init__(queue_url, client_kwargs)
self.publish_message_queue = queue.Queue(maxsize=20)
self.published_message_queue = queue.Queue(maxsize=20)
self.in_flight_receipt_handles_lock = threading.RLock()
self.in_flight_receipt_handles = OrderedDict()
self._threads.append(ConsumerProducerFinalThread(self))
def increment_receipt_handle_unpublished_event_count(self, receipt_handle):
logger.debug("receipt handle %s: increment unpublished event count", receipt_handle[-7:])
new_count = self.in_flight_receipt_handles.get(receipt_handle, 0) + 1
logger.debug("receipt handle %s: %d (+1) unpublished events", receipt_handle[-7:], new_count)
self.in_flight_receipt_handles[receipt_handle] = new_count
self.in_flight_receipt_handles.move_to_end(receipt_handle)
# do some maintainance, to avoid memory issues in case of a bug
in_flight_receipt_handle_count = len(self.in_flight_receipt_handles)
logger.debug("%d in-flight receipt handles", in_flight_receipt_handle_count)
if in_flight_receipt_handle_count > self.max_in_flight_receipt_handle_count:
# need to purge the older receipt handles
logger.error("%d > %d in-flight receipt handles",
in_flight_receipt_handle_count,
self.max_in_flight_receipt_handle_count)
for _ in range(in_flight_receipt_handle_count - self.max_in_flight_receipt_handle_count):
k, v = self.in_flight_receipt_handles.popitem(last=False)
logger.error("receipt handle %s with %s in-flight events evicted", k, v)
def decrement_receipt_handle_unpublished_event_count(self, receipt_handle):
logger.debug("receipt handle %s: decrement unpublished event count", receipt_handle[-7:])
with self.in_flight_receipt_handles_lock:
try:
current_count = self.in_flight_receipt_handles[receipt_handle]
except KeyError:
logger.error("unknown receipt handle %s!", receipt_handle[-7:])
return False
new_count = current_count - 1
logger.debug("receipt handle %s: %d (-1) unpublished events", receipt_handle[-7:], new_count)
if new_count <= 0:
if new_count < 0:
logger.error("receipt handle %s: %d < 0 unpublished events", receipt_handle[-7:], new_count)
logger.debug("receipt handle %s: no more unpublished events", receipt_handle[-7:])
del self.in_flight_receipt_handles[receipt_handle]
logger.debug("%d in-flight receipt handles", len(self.in_flight_receipt_handles))
return True
else:
self.in_flight_receipt_handles[receipt_handle] = new_count
self.in_flight_receipt_handles.move_to_end(receipt_handle)
return False
def start_run_loop(self):
while True:
try:
receipt_handle, routing_key, event_d = self.process_message_queue.get(block=True, timeout=1)
except queue.Empty:
logger.debug("no new event to process")
if self.stop_receiving_event.is_set():
break
else:
logger.debug("receipt handle %s: process new event", receipt_handle[-7:])
generated_event_count = 0
for new_routing_key, new_event_d in self.generate_events(routing_key, event_d):
with self.in_flight_receipt_handles_lock:
self.increment_receipt_handle_unpublished_event_count(receipt_handle)
self.publish_message_queue.put((receipt_handle, new_routing_key, new_event_d, time.monotonic()))
generated_event_count += 1
if not generated_event_count:
logger.debug("receipt handle %s: no events to publish, queue for deletion", receipt_handle[-7:])
self.delete_message_queue.put((receipt_handle, time.monotonic()))
def generate_events(self, routing_key, event_d):
# must return an iterable other the generated events
raise NotImplementedError
|
|
#!/usr/bin/python2.6
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Client for Processing MQL queries.
This class exposes two main services, read() and write(), which perform
mql reads and mql writes. Both take any varenv params as keyword args.
(There are no envelopes here).
See http://www.freebase.com/docs/web_services for more information
"""
__author__ = "rtp@google.com (Tyler Pirtle)"
import collections
import copy
import logging
import time
from mql import error as mql_error
from mql.graph import TcpGraphConnector
from mql.hijson import HighQuery
from mql.lojson import LowQuery
class InvalidGraphAddr(Exception):
pass
class MQLService(object):
"""Entry point for making MQL requests to the graph.
see google3/metaweb/freebase/api, which provides
the stubby interface for mql queries. It should be
the only user of this library,
"""
dollar_keys = [
"user", "privileged", "lang", "permission", "authority", "attribution"
]
MQLResult = collections.namedtuple("MQLResult", "result cost dateline cursor")
def _fix_varenv(self, env):
"""Make a copy of self.varenv, update it with env."""
dollared_env = dict([
("$" + k, v) for k, v in env.items() if k in self.dollar_keys
])
not_dollared = dict([
(k, v) for k, v in env.items() if k not in self.dollar_keys
])
if "as_of_time" in not_dollared:
not_dollared["asof"] = not_dollared["as_of_time"]
del not_dollared["as_of_time"]
# externally, it's "debug_token", pymql internal it's "tid",
# in graphd it's the "id" field. legacy stuff.
if "debug_token" in not_dollared:
not_dollared["tid"] = not_dollared["debug_token"]
del not_dollared["debug_token"]
varenv = copy.deepcopy(self.varenv)
varenv.update(dollared_env)
varenv.update(not_dollared)
# convert 'deadline' to an absolute
# unix epoch deadline and set the var
deadline = varenv.get("deadline")
if deadline:
varenv["epoch_deadline"] = time.time() + deadline
return varenv
def __init__(self, connector=None, graphd_addrs=None):
"""Initialize a MQLService with a connector."""
self.varenv = {}
if connector is not None:
self.gc = connector
elif graphd_addrs:
addr_list = list(self._parse_graphaddr(graphd_addrs))
self.gc = TcpGraphConnector(addr_list)
else:
raise Exception("Must supply an address list or connector")
self.gc.open()
low_querier = LowQuery(self.gc)
self.high_querier = HighQuery(low_querier)
def _parse_graphaddr(self, addrs):
for g in addrs:
if isinstance(g, str):
addr = g.split(":")
if len(addr) < 2:
logging.warn("graph addr [%s] is malformed (missing :port)", g)
continue
yield (addr[0], int(addr[1]))
elif isinstance(g, tuple): # better be a tuple.
yield g
else:
raise InvalidGraphAddr(g)
def get_cost(self):
return self.gc.totalcost
def reset_costs(self):
self.gc.reset_cost()
self.high_querier.reset_cost()
def read(self, query, **varenv):
"""Initiate a read of the specified query.
Args:
query: dict/json obj, mql query
varenv: dict/json obj, options, key/vals: as_of_time (optional)
timestamp string e.g. "2013-01-01T00:00:00.0000" or less precise.
graph responses will be as though the query were made at this time.
cursor (optional) None/True or string returned from a previous query
(query with a "limit": n directive) which allows a paging mechanism
for the database to provide the next set of n results. To request
the first cursor use True. deadline (optional) float, timeout in
seconds for this request, feeds into epoch_deadline escape
(optional) boolean, default True (in effect) turns on cgi escaping
of string values. lang (optional) string, lang id, default
"/lang/en" project_id (optional) string, the project id that the
request should use quota from. query_timeout_tu int or None, if
provided, each resulting graph query will be cpu user-time
constrained by this number of ms. Think
of it as limiting the work done by the db. Note: a mql query can
result in an arbitrarily large number of graph queries, so even a
small value here could result in a lot of work done.
uniqueness_failure (optional)
string: 'hard' or 'soft', default 'hard'. If a query constraint is
null or {}, 'soft' won't complain if a list is returned
write_dateline string, which must be a valid dateline returned by a
previous mql query. In proper practice, this should be a dateline
returned by a mqlwrite, thus the name 'write_dateline'. This
dateline is passed to the graph db replica and requires that the
replica poll until it is caught up to the dateline that you provide
(the dateline represents the primitive index count, i.e. the hex
value of the latest guid + 1). It has the effect of ensuring the
replica is up to date with the users last update to the database. If
the replica is not up to date, it polls, until it gets there or
times out (lagging graphs could timeout) The assumption is that the
user only needs a level of freshness up to the last write that they
did.
So, the basic pattern is: use the write_dateline they provide for all
reads, until they do a write and then
return them a new dateline. see: go/graphd-dateline debug_token
(optional)
string: unique string to aid in debugging requests
DEPRECATED: normalize, extended
Returns:
response: json object, query result
cost: dict, of various cost key/val pairs
dateline: string, see description of write_dateline above
not sure how this, the latest dateline received, is
being used. frapi doesn't pass it on. In proper use
the user should only need a new dateline when doing
a mqlwrite.
cursor: string, a cursor to be used in subsequent paging
queries.
Raises: various exceptions
"""
self.reset_costs()
env = self._fix_varenv(varenv)
if env.get("cursor"):
query = sort_query_keys(query)
logging.debug("pymql.read.start env: %s query: %s", env, query)
r = self.high_querier.read(query, env)
cost = self.get_cost()
logging.debug("pymql.read.end env: %s cost: %s", env, cost.items())
result = self.MQLResult(r, cost, env.get("dateline"), env.get("cursor"))
return result
def write(self, query, **varenv):
"""Initiate a write of the specified query using the GraphConnector.
Args:
query: dict/json obj, mql query
varenv: dict/json obj, options, key/vals: attribution (optional) string,
id of freebase attribution object This will be written as the
attribution link for primitives written authority (optional) object,
Allows requests to be made with the attribution of the current user, but
using the permissions of the user specified by this param. Another
dangerous one; improper exposure to the outside world could result in
unwanted escalation of privileges. deadline (optional) float, timeout in
seconds for this request, feeds into epoch_deadline escape (optional)
boolean, default True (in effect) turns on cgi escaping of string
values. lang (optional) string, lang id, default "/lang/en" permission
(optional) string, id of freebase permission object This param should
only be used for certain low-level operations by members of the
Freebase/Metaweb team. privileged (optional) object, this object when
passed as the privileged field enables you to pass another user id as
the authority field the write will still be attributed to 'user', but
'authority' permissions will be checked in addition to 'user'
permissions. project_id (optional) string, the project id that the
request should use quota from. query_timeout_tu int or None, if
provided, all graph queries will be cpu user-time constrained by this
number of ms. Think
of it as limiting the work done by the db. Note: a mql query can
result in an arbitrarily large number of graph queries, so even a
small value here could result in a lot of work done. user (required)
string, freebase user id e.g. "/user/brendan" write_dateline string,
see description in read method, datelines have the same effect on
writes, they ensure the db replica you are talking to is up to date
with the user's last write state before doing the write. This is
necessary since a many graph reads happen as part of a write i.e.
the write may require data from a previous write in order to
complete correctly. see go/graphd-dateline debug_token (optional)
string: unique string to aid in debugging requests
DEPRECATED: normalize, extended
Returns:
response: json object, query result
cost: dict, of various cost key/val pairs
dateline: string, see description of write_dateline above
cursor: string, a cursor to be used in subsequent paging
queries.
Raises: various exceptions
"""
self.reset_costs()
env = self._fix_varenv(varenv)
if not "$user" in env:
raise mql_error.MQLAccessError(
None, "You need to specify a user to write with.")
logging.debug("pymql.write.start env: %s query: %s", env, query)
r = self.high_querier.write(query, env)
cost = self.get_cost()
logging.debug("pymql.write.end env: %s cost: %s", env, cost.items())
result = self.MQLResult(r, cost, env.get("write_dateline"),
env.get("cursor"))
return result
def normalize(self, query):
"""Normalize the specified query. TODO(rtp) What does this actually do?"""
self.reset_costs()
r = self.read(query, normalize_only=True)
result = self.MQLResult(r.result, r.cost, r.dateline, r.cursor)
return result
def sort_query_keys(part):
"""sort keys in place.
We do this to every mqlread with a cursor because
graphd relies on GQL query string order to
maintain the state of the cursor.
This calls itself recursively sorting keys
in any ply of the query that is a dict
Args:
part: any ply of your dict
Returns:
an OrderedDict where all keys have been sorted
in every ply of the query.
"""
if isinstance(part, list):
new_d = []
for item in part:
new_d.append(sort_query_keys(item))
return new_d
elif isinstance(part, dict):
new_d = collections.OrderedDict()
for k in sorted(part.keys()):
new_d[k] = sort_query_keys(part[k])
return new_d
else:
return part
|
|
from unittest import mock
from ddt import data, ddt
from freezegun import freeze_time
from rest_framework import status, test
from waldur_core.core import utils as core_utils
from waldur_core.logging import models as logging_models
from waldur_core.structure import models as structure_models
from waldur_core.structure.tests import fixtures
from waldur_core.structure.tests.factories import ProjectFactory, UserFactory
from waldur_mastermind.common.utils import parse_date
from waldur_mastermind.invoices import models as invoices_models
from waldur_mastermind.invoices.tests import factories as invoices_factories
from waldur_mastermind.marketplace import callbacks, log, models, plugins, tasks, utils
from waldur_mastermind.marketplace.tests import factories
from waldur_mastermind.marketplace.tests import utils as test_utils
from waldur_mastermind.marketplace.tests.fixtures import MarketplaceFixture
from waldur_mastermind.support.tests.base import override_support_settings
from waldur_openstack.openstack.tests import factories as openstack_factories
class ResourceGetTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ServiceFixture()
self.project = self.fixture.project
self.plan = factories.PlanFactory()
self.offering = self.plan.offering
self.resource = models.Resource.objects.create(
project=self.project, offering=self.offering, plan=self.plan,
)
def get_resource(self, user=None):
if not user:
user = self.fixture.owner
self.client.force_authenticate(user)
url = factories.ResourceFactory.get_url(self.resource)
return self.client.get(url)
def test_resource_is_usage_based(self):
factories.OfferingComponentFactory(
offering=self.offering,
billing_type=models.OfferingComponent.BillingTypes.USAGE,
)
self.assertTrue(self.get_resource().data['is_usage_based'])
def test_resource_is_not_usage_based(self):
self.assertFalse(self.get_resource().data['is_usage_based'])
def test_project_manager_can_get_resource_data(self):
response = self.get_resource(self.fixture.manager)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_customer_owner_can_get_resource_data(self):
response = self.get_resource(self.fixture.owner)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_service_provider_can_get_resource_data(self):
owner = UserFactory()
self.offering.customer.add_user(owner, structure_models.CustomerRole.OWNER)
response = self.get_resource()
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_other_user_can_not_get_resource_data(self):
response = self.get_resource(UserFactory())
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_filter_resources_for_service_manager(self):
# Arrange
offering = factories.OfferingFactory(customer=self.fixture.customer)
offering.add_user(self.fixture.user)
resource = factories.ResourceFactory(project=self.project, offering=offering)
# Act
self.client.force_authenticate(self.fixture.owner)
url = factories.ResourceFactory.get_list_url()
response = self.client.get(
url, {'service_manager_uuid': self.fixture.user.uuid.hex}
)
# Assert
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['uuid'], resource.uuid.hex)
class ResourceSwitchPlanTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ServiceFixture()
self.project = self.fixture.project
self.plan1 = factories.PlanFactory()
self.offering = self.plan1.offering
self.plan2 = factories.PlanFactory(offering=self.offering)
self.resource1 = models.Resource.objects.create(
project=self.project,
offering=self.offering,
plan=self.plan1,
state=models.Resource.States.OK,
)
self.resource2 = models.Resource.objects.create(
project=self.project, offering=self.offering, plan=self.plan2,
)
def switch_plan(self, user, resource, plan):
self.client.force_authenticate(user)
url = factories.ResourceFactory.get_url(resource, 'switch_plan')
payload = {'plan': factories.PlanFactory.get_url(plan)}
return self.client.post(url, payload)
def test_plan_switch_is_available_if_plan_limit_is_not_reached(self):
# Arrange
self.plan2.max_amount = 10
self.plan2.save()
# Act
response = self.switch_plan(self.fixture.owner, self.resource1, self.plan2)
# Assert
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_plan_switch_is_available_if_resource_is_terminated(self):
# Arrange
self.resource2.state = models.Resource.States.TERMINATED
self.resource2.save()
self.plan2.max_amount = 1
self.plan2.save()
# Act
response = self.switch_plan(self.fixture.owner, self.resource1, self.plan2)
# Assert
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_plan_switch_is_not_available_if_plan_limit_has_been_reached(self):
# Arrange
self.plan2.max_amount = 1
self.plan2.save()
# Act
response = self.switch_plan(self.fixture.owner, self.resource1, self.plan2)
# Assert
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_plan_switch_is_not_available_if_plan_is_related_to_another_offering(self):
# Act
response = self.switch_plan(
self.fixture.owner, self.resource1, factories.PlanFactory()
)
# Assert
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_plan_switch_is_not_available_if_resource_is_not_OK(self):
# Arrange
self.resource1.state = models.Resource.States.UPDATING
self.resource1.save()
# Act
response = self.switch_plan(self.fixture.owner, self.resource1, self.plan2)
# Assert
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
def test_order_item_is_created(self):
# Act
response = self.switch_plan(self.fixture.owner, self.resource1, self.plan2)
# Assert
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.assertTrue(
models.OrderItem.objects.filter(
type=models.OrderItem.Types.UPDATE,
plan=self.plan2,
resource=self.resource1,
).exists()
)
def test_order_is_created(self):
# Act
response = self.switch_plan(self.fixture.owner, self.resource1, self.plan2)
# Assert
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.assertTrue(
models.Order.objects.filter(
project=self.project, created_by=self.fixture.owner
).exists()
)
def test_order_is_approved_implicitly_for_authorized_user(self):
# Act
response = self.switch_plan(self.fixture.staff, self.resource1, self.plan2)
# Assert
order = models.Order.objects.get(uuid=response.data['order_uuid'])
self.assertEqual(order.state, models.Order.States.EXECUTING)
self.assertEqual(order.approved_by, self.fixture.staff)
def test_plan_switch_is_not_allowed_if_pending_order_item_for_resource_already_exists(
self,
):
# Arrange
factories.OrderItemFactory(
resource=self.resource1, state=models.OrderItem.States.PENDING
)
# Act
response = self.switch_plan(self.fixture.staff, self.resource1, self.plan2)
# Assert
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_plan_switching_is_not_available_for_blocked_organization(self):
self.fixture.customer.blocked = True
self.fixture.customer.save()
response = self.switch_plan(self.fixture.owner, self.resource1, self.plan2)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@mock.patch('waldur_mastermind.marketplace.tasks.process_order')
def test_order_has_been_approved_if_user_has_got_permissions(self, mock_task):
# Arrange
self.plan2.max_amount = 10
self.plan2.save()
# Act
response = self.switch_plan(self.fixture.owner, self.resource1, self.plan2)
# Assert
order = models.Order.objects.get(uuid=response.data['order_uuid'])
self.assertEqual(response.status_code, status.HTTP_200_OK)
mock_task.delay.assert_called_once_with(
'marketplace.order:%s' % order.id, 'core.user:%s' % self.fixture.owner.id
)
@mock.patch('waldur_mastermind.marketplace.views.tasks')
def test_order_has_not_been_approved_if_user_has_not_got_permissions(
self, mock_tasks
):
# Arrange
self.plan2.max_amount = 10
self.plan2.save()
# Act
response = self.switch_plan(self.fixture.admin, self.resource1, self.plan2)
# Assert
self.assertEqual(response.status_code, status.HTTP_200_OK)
mock_tasks.process_order.delay.assert_not_called()
@ddt
class ResourceTerminateTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ServiceFixture()
self.project = self.fixture.project
self.plan = factories.PlanFactory()
self.offering = self.plan.offering
self.resource = models.Resource.objects.create(
project=self.project,
offering=self.offering,
plan=self.plan,
state=models.Resource.States.OK,
)
def terminate(self, user, attributes=None):
attributes = attributes or {}
self.client.force_authenticate(user)
url = factories.ResourceFactory.get_url(self.resource, 'terminate')
if attributes:
return self.client.post(url, {'attributes': attributes})
else:
return self.client.post(url)
@mock.patch('waldur_mastermind.marketplace.tasks.notify_order_approvers.delay')
def test_service_provider_can_terminate_resource(self, mocked_approve):
# Arrange
owner = UserFactory()
self.offering.customer.add_user(owner, structure_models.CustomerRole.OWNER)
# Act
response = self.terminate(owner)
# Assert
self.assertEqual(response.status_code, status.HTTP_200_OK)
mocked_approve.assert_not_called()
def test_order_item_is_created_when_user_submits_termination_request(self):
# Act
response = self.terminate(self.fixture.owner)
# Assert
self.assertEqual(response.status_code, status.HTTP_200_OK)
order = models.Order.objects.get(uuid=response.data['order_uuid'])
self.assertEqual(order.project, self.project)
@data(
models.Resource.States.CREATING,
models.Resource.States.UPDATING,
models.Resource.States.TERMINATING,
)
def test_termination_request_is_not_accepted_if_resource_is_not_ok_or_erred(
self, state
):
# Arrange
self.resource.state = state
self.resource.save()
# Act
response = self.terminate(self.fixture.owner)
# Assert
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
@data(models.Resource.States.OK, models.Resource.States.ERRED)
def test_termination_request_is_accepted_if_resource_is_ok_or_erred(self, state):
# Arrange
self.resource.state = state
self.resource.save()
# Act
response = self.terminate(self.fixture.owner)
# Assert
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_order_is_approved_implicitly_for_authorized_user(self):
# Act
response = self.terminate(self.fixture.staff)
# Assert
order = models.Order.objects.get(uuid=response.data['order_uuid'])
self.assertEqual(order.state, models.Order.States.EXECUTING)
self.assertEqual(order.approved_by, self.fixture.staff)
def test_plan_switch_is_not_allowed_if_pending_order_item_for_resource_already_exists(
self,
):
# Arrange
factories.OrderItemFactory(
resource=self.resource, state=models.OrderItem.States.PENDING
)
# Act
response = self.terminate(self.fixture.staff)
# Assert
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_resource_terminating_is_not_available_for_blocked_organization(self):
self.fixture.customer.blocked = True
self.fixture.customer.save()
response = self.terminate(self.fixture.owner)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_delete_options_can_be_passed_if_resource_is_terminated(self):
# Act
response = self.terminate(self.fixture.staff, {'param': True})
# Assert
self.assertEqual(response.status_code, status.HTTP_200_OK)
order = models.Order.objects.get(uuid=response.data['order_uuid'])
self.assertEqual(order.project, self.project)
item = order.items.first()
self.assertTrue(item.attributes.get('param'))
def test_user_can_terminate_resource_if_project_has_been_soft_deleted(self):
self.project.is_removed = True
self.project.save()
response = self.terminate(self.fixture.staff)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class PlanUsageTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ServiceFixture()
self.project = self.fixture.project
self.plan1 = factories.PlanFactory()
self.offering = self.plan1.offering
self.plan2 = factories.PlanFactory(offering=self.offering)
factories.ResourceFactory.create_batch(
3,
project=self.project,
offering=self.offering,
plan=self.plan1,
state=models.Resource.States.OK,
)
factories.ResourceFactory.create_batch(
2,
project=self.project,
offering=self.offering,
plan=self.plan2,
state=models.Resource.States.OK,
)
factories.ResourceFactory.create_batch(
2,
project=self.project,
offering=self.offering,
plan=self.plan2,
state=models.Resource.States.TERMINATED,
)
def get_stats(self, data=None):
self.client.force_authenticate(self.fixture.owner)
url = factories.PlanFactory.get_list_url('usage_stats')
response = self.client.get(url, data)
return response
def test_count_plans_for_ok_resources(self):
response = self.get_stats()
self.assertEqual(response.data[0]['offering_uuid'], self.offering.uuid)
self.assertEqual(
response.data[0]['customer_provider_uuid'], self.offering.customer.uuid
)
self.assertEqual(response.data[0]['plan_uuid'], self.plan1.uuid)
self.assertEqual(response.data[0]['usage'], 3)
def test_count_plans_for_terminated_resources(self):
response = self.get_stats()
self.assertEqual(response.data[1]['usage'], 2)
def test_order_by_remaining_ascending(self):
self.plan1.max_amount = 100
self.plan1.save()
self.plan2.max_amount = 10
self.plan2.save()
response = self.get_stats({'o': 'remaining'})
data = response.data
self.assertEqual(data[0]['remaining'], 10 - 2)
self.assertEqual(data[1]['remaining'], 100 - 3)
def test_order_by_remaining_descending(self):
self.plan1.max_amount = 100
self.plan1.save()
self.plan2.max_amount = 10
self.plan2.save()
response = self.get_stats({'o': '-remaining'})
data = response.data
self.assertEqual(data[0]['remaining'], 100 - 3)
self.assertEqual(data[1]['remaining'], 10 - 2)
def test_filter_plans_by_offering_uuid(self):
plan = factories.PlanFactory()
factories.ResourceFactory.create_batch(
4,
project=self.project,
offering=plan.offering,
plan=plan,
state=models.Resource.States.OK,
)
response = self.get_stats({'offering_uuid': plan.offering.uuid.hex})
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['usage'], 4)
self.assertEqual(response.data[0]['offering_uuid'], plan.offering.uuid)
def test_filter_plans_by_customer_provider_uuid(self):
plan = factories.PlanFactory()
factories.ResourceFactory.create_batch(
4,
project=self.project,
offering=plan.offering,
plan=plan,
state=models.Resource.States.OK,
)
response = self.get_stats(
{'customer_provider_uuid': plan.offering.customer.uuid.hex}
)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['usage'], 4)
self.assertEqual(
response.data[0]['customer_provider_uuid'], plan.offering.customer.uuid
)
class ResourceCostEstimateTest(test.APITransactionTestCase):
@override_support_settings(
ENABLED=True,
ACTIVE_BACKEND='waldur_mastermind.support.backend.basic:BasicBackend',
)
def test_when_order_item_is_processed_cost_estimate_is_initialized(self):
# Arrange
fixture = fixtures.ProjectFixture()
offering = factories.OfferingFactory(type='Support.OfferingTemplate')
plan = factories.PlanFactory(unit_price=10)
order_item = factories.OrderItemFactory(
offering=offering,
plan=plan,
attributes={'name': 'item_name', 'description': 'Description'},
)
# Act
serialized_order = core_utils.serialize_instance(order_item.order)
serialized_user = core_utils.serialize_instance(fixture.staff)
tasks.process_order(serialized_order, serialized_user)
# Assert
order_item.refresh_from_db()
self.assertEqual(order_item.resource.cost, plan.unit_price)
def test_initialization_cost_is_added_to_cost_estimate_for_creation_request(self):
# Arrange
offering = factories.OfferingFactory(type='Support.OfferingTemplate')
one_time_offering_component = factories.OfferingComponentFactory(
offering=offering,
billing_type=models.OfferingComponent.BillingTypes.ONE_TIME,
type='signup',
)
usage_offering_component = factories.OfferingComponentFactory(
offering=offering,
billing_type=models.OfferingComponent.BillingTypes.USAGE,
type='cpu',
)
plan = factories.PlanFactory()
factories.PlanComponentFactory(
plan=plan, component=one_time_offering_component, price=100
)
factories.PlanComponentFactory(
plan=plan, component=usage_offering_component, price=10
)
order_item = factories.OrderItemFactory(offering=offering, plan=plan,)
order_item.init_cost()
self.assertEqual(order_item.cost, 100)
def test_when_plan_is_switched_cost_estimate_is_updated(self):
# Arrange
old_plan = factories.PlanFactory(unit_price=10)
new_plan = factories.PlanFactory(unit_price=100)
resource = factories.ResourceFactory(plan=old_plan)
factories.OrderItemFactory(
state=models.OrderItem.States.EXECUTING,
type=models.OrderItem.Types.UPDATE,
resource=resource,
plan=new_plan,
)
# Act
callbacks.resource_update_succeeded(resource)
resource.refresh_from_db()
# Assert
self.assertEqual(resource.cost, new_plan.unit_price)
def test_plan_switch_cost_is_added_to_cost_estimate_for_order_item(self):
# Arrange
offering = factories.OfferingFactory(type='Support.OfferingTemplate')
switch_offering_component = factories.OfferingComponentFactory(
offering=offering,
billing_type=models.OfferingComponent.BillingTypes.ON_PLAN_SWITCH,
type='plan_switch',
)
usage_offering_component = factories.OfferingComponentFactory(
offering=offering,
billing_type=models.OfferingComponent.BillingTypes.USAGE,
type='cpu',
)
plan = factories.PlanFactory()
factories.PlanComponentFactory(
plan=plan, component=switch_offering_component, price=50
)
factories.PlanComponentFactory(
plan=plan, component=usage_offering_component, price=10
)
order_item = factories.OrderItemFactory(
offering=offering, plan=plan, type=models.OrderItem.Types.UPDATE,
)
order_item.init_cost()
self.assertEqual(order_item.cost, 50)
@ddt
class ResourceNotificationTest(test.APITransactionTestCase):
@data(
'log_resource_creation_succeeded',
'log_resource_creation_failed',
'log_resource_update_succeeded',
'log_resource_update_failed',
'log_resource_terminate_succeeded',
'log_resource_terminate_failed',
)
@mock.patch('waldur_mastermind.marketplace.log.tasks')
def test_notify_about_resource_change(self, log_func_name, mock_tasks):
resource = factories.ResourceFactory()
log_func = getattr(log, log_func_name)
log_func(resource)
if log_func_name != 'log_resource_update_succeeded':
mock_tasks.notify_about_resource_change.delay.assert_called_once()
else:
mock_tasks.notify_about_resource_change.delay.assert_not_called()
class ResourceUpdateTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = MarketplaceFixture()
self.resource = self.fixture.resource
self.url = factories.ResourceFactory.get_url(self.resource)
def make_request(self, user, payload=None):
self.client.force_authenticate(user)
payload = payload or {'name': 'new name', 'description': 'new description'}
return self.client.patch(self.url, payload)
def test_authorized_user_can_update_resource(self):
response = self.make_request(self.fixture.staff)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.resource.refresh_from_db()
self.assertEqual(self.resource.name, 'new name')
self.assertEqual(self.resource.description, 'new description')
def test_unauthorized_user_can_not_update_resource(self):
response = self.make_request(self.fixture.user)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_renaming_of_resource_should_generate_audit_log(self):
old_name = self.resource.name
response = self.make_request(self.fixture.staff)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.resource.refresh_from_db()
self.assertTrue(
logging_models.Event.objects.filter(
message='Marketplace resource %s has been renamed. Old name: %s.'
% (self.resource.name, old_name)
).exists()
)
def test_authorized_user_can_update_end_date(self):
with freeze_time('2020-01-01'):
response = self.make_request(self.fixture.staff, {'end_date': '2021-01-01'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.resource.refresh_from_db()
self.assertTrue(self.resource.end_date)
def test_authorized_user_can_set_current_past_date(self):
with freeze_time('2020-01-01'):
response = self.make_request(self.fixture.staff, {'end_date': '2020-01-01'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.resource.refresh_from_db()
self.assertTrue(self.resource.end_date)
def test_user_cannot_set_past_date(self):
with freeze_time('2022-01-01'):
response = self.make_request(self.fixture.staff, {'end_date': '2020-01-01'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_update_end_date_should_generate_audit_log(self):
with freeze_time('2020-01-01'):
response = self.make_request(self.fixture.staff, {'end_date': '2021-01-01'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.resource.refresh_from_db()
self.assertTrue(
logging_models.Event.objects.filter(
message='End date of marketplace resource %s has been updated. End date: %s. User: %s.'
% (self.resource.name, self.resource.end_date, self.fixture.staff)
).exists()
)
class ResourceSetEndDateByProviderTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = MarketplaceFixture()
self.resource = self.fixture.resource
self.url = factories.ResourceFactory.get_url(
self.resource, 'set_end_date_by_provider'
)
def make_request(self, user, payload):
self.client.force_authenticate(user)
return self.client.post(self.url, payload)
@freeze_time('2020-01-01')
def test_resource_is_not_used_for_last_3_months_and_end_date_is_7_days_in_future(
self,
):
self.resource.state = models.Resource.States.OK
self.resource.save()
with freeze_time('2020-05-01'):
response = self.make_request(
self.fixture.offering_owner, {'end_date': '2020-05-08'}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.resource.refresh_from_db()
self.assertEqual(self.resource.end_date, parse_date('2020-05-08'))
@freeze_time('2020-01-01')
def test_resource_is_not_used_for_last_3_months_and_end_date_is_not_7_days_in_future(
self,
):
self.resource.state = models.Resource.States.OK
self.resource.save()
with freeze_time('2020-05-01'):
response = self.make_request(
self.fixture.offering_owner, {'end_date': '2020-05-05'}
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@freeze_time('2020-01-01')
def test_resource_is_used_for_last_3_months_and_end_date_is_not_7_days_in_future(
self,
):
self.resource.state = models.Resource.States.OK
self.resource.save()
response = self.make_request(
self.fixture.offering_owner, {'end_date': '2020-01-05'}
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@freeze_time('2020-01-01')
def test_resource_is_used_for_last_3_months_and_end_date_is_more_than_7_days_in_future(
self,
):
self.resource.state = models.Resource.States.OK
self.resource.save()
response = self.make_request(
self.fixture.offering_owner, {'end_date': '2020-01-10'}
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class ResourceUpdateLimitsTest(test.APITransactionTestCase):
def setUp(self):
plugins.manager.register(
offering_type='TEST_TYPE',
create_resource_processor=test_utils.TestCreateProcessor,
update_resource_processor=test_utils.TestUpdateScopedProcessor,
can_update_limits=True,
)
self.fixture = fixtures.ServiceFixture()
self.resource = factories.ResourceFactory()
self.resource.state = models.Resource.States.OK
self.resource.project.customer = self.fixture.customer
self.resource.project.save()
self.resource.limits = {'vcpu': 1}
self.resource.save()
self.resource.offering.type = 'TEST_TYPE'
self.resource.offering.save()
def update_limits(self, user, resource, limits=None):
limits = limits or {'vcpu': 10}
self.client.force_authenticate(user)
url = factories.ResourceFactory.get_url(resource, 'update_limits')
payload = {'limits': limits}
return self.client.post(url, payload)
def test_create_update_limits_order(self):
response = self.update_limits(self.fixture.owner, self.resource)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_update_limits_is_not_available_if_resource_is_not_OK(self):
# Arrange
self.resource.state = models.Resource.States.UPDATING
self.resource.save()
# Act
response = self.update_limits(self.fixture.owner, self.resource)
# Assert
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
def test_order_item_is_created(self):
# Act
response = self.update_limits(self.fixture.owner, self.resource)
# Assert
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.assertTrue(
models.OrderItem.objects.filter(
type=models.OrderItem.Types.UPDATE, resource=self.resource,
).exists()
)
def test_order_is_created(self):
# Act
response = self.update_limits(self.fixture.owner, self.resource)
# Assert
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.assertTrue(
models.Order.objects.filter(
project=self.resource.project, created_by=self.fixture.owner
).exists()
)
def test_order_is_approved_implicitly_for_authorized_user(self):
# Act
response = self.update_limits(self.fixture.staff, self.resource)
# Assert
order = models.Order.objects.get(uuid=response.data['order_uuid'])
self.assertEqual(order.state, models.Order.States.EXECUTING)
self.assertEqual(order.approved_by, self.fixture.staff)
def test_update_limits_is_not_allowed_if_pending_order_item_for_resource_already_exists(
self,
):
# Arrange
factories.OrderItemFactory(
resource=self.resource, state=models.OrderItem.States.PENDING
)
# Act
response = self.update_limits(self.fixture.owner, self.resource)
# Assert
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_update_limits_is_not_available_for_blocked_organization(self):
customer = self.resource.project.customer
customer.blocked = True
customer.save()
response = self.update_limits(self.fixture.owner, self.resource)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@mock.patch('waldur_mastermind.marketplace.tasks.process_order')
def test_order_has_been_approved_if_user_has_got_permissions(self, mock_task):
# Act
response = self.update_limits(self.fixture.staff, self.resource)
# Assert
order = models.Order.objects.get(uuid=response.data['order_uuid'])
self.assertEqual(response.status_code, status.HTTP_200_OK)
mock_task.delay.assert_called_once_with(
'marketplace.order:%s' % order.id, 'core.user:%s' % self.fixture.staff.id
)
@mock.patch('waldur_mastermind.marketplace.views.tasks')
def test_order_has_not_been_approved_if_user_has_not_got_permissions(
self, mock_tasks
):
# Act
response = self.update_limits(self.fixture.owner, self.resource)
# Assert
self.assertEqual(response.status_code, status.HTTP_200_OK)
mock_tasks.process_order.delay.assert_not_called()
def test_update_limit_process(self):
response = self.update_limits(self.fixture.staff, self.resource)
self.assertEqual(response.status_code, status.HTTP_200_OK)
order_item = models.OrderItem.objects.get(
type=models.OrderItem.Types.UPDATE, resource=self.resource,
)
utils.process_order_item(order_item, self.fixture.staff)
self.resource.refresh_from_db()
self.assertEqual(self.resource.limits['vcpu'], 10)
class ResourceMoveTest(test.APITransactionTestCase):
def setUp(self):
self.tenant = openstack_factories.TenantFactory()
self.fixture = fixtures.ProjectFixture()
self.new_project = ProjectFactory()
self.project = self.fixture.project
self.resource = factories.ResourceFactory(project=self.project)
self.resource.scope = self.tenant
self.resource.save()
self.url = factories.ResourceFactory.get_url(
self.resource, action='move_resource'
)
def get_response(self, role):
self.client.force_authenticate(role)
payload = {'project': {'url': ProjectFactory.get_url(self.new_project)}}
return self.client.post(self.url, payload)
def test_move_resource_rest(self):
response = self.get_response(self.fixture.staff)
self.resource.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.resource.project, self.new_project)
def test_move_resource_is_not_possible_for_project_owner(self):
response = self.get_response(self.fixture.owner)
self.resource.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(self.resource.project, self.project)
def test_move_resource_is_not_possible_when_new_customer_is_blocked(self):
new_customer = self.new_project.customer
new_customer.blocked = True
new_customer.save()
response = self.get_response(self.fixture.staff)
self.resource.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(self.resource.project, self.project)
def test_move_resource_exception_handling(self):
start_invoice = invoices_factories.InvoiceFactory(
customer=self.project.customer,
year=2020,
month=1,
state=invoices_models.Invoice.States.PENDING,
)
invoices_factories.InvoiceItemFactory(
invoice=start_invoice, project=self.project, resource=self.resource,
)
invoices_factories.InvoiceFactory(
customer=self.new_project.customer,
year=2020,
month=1,
state=invoices_models.Invoice.States.CREATED,
)
response = self.get_response(self.fixture.staff)
self.resource.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(
response.json(),
{
'error_message': 'Resource moving is not possible, because invoice items moving is not possible.'
},
)
self.assertEqual(self.resource.project, self.project)
@ddt
class ResourceBackendIDTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ProjectFixture()
self.project = self.fixture.project
self.resource = factories.ResourceFactory(project=self.project)
self.url = factories.ResourceFactory.get_url(
self.resource, action='set_backend_id'
)
service_manager = UserFactory()
self.resource.offering.customer.add_user(
service_manager, role=structure_models.CustomerRole.SERVICE_MANAGER
)
setattr(self.fixture, 'service_manager', service_manager)
def make_request(self, role):
self.client.force_authenticate(role)
payload = {'backend_id': 'new_backend_id'}
return self.client.post(self.url, payload)
@data('staff', 'owner', 'service_manager')
def test_user_can_set_backend_id_of_resource(self, user):
response = self.make_request(getattr(self.fixture, user))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.resource.refresh_from_db()
self.assertEqual(self.resource.backend_id, 'new_backend_id')
def test_admin_can_not_set_backend_id_of_resource(self):
response = self.make_request(self.fixture.admin)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@ddt
class ResourceReportTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ProjectFixture()
self.project = self.fixture.project
self.resource = factories.ResourceFactory(project=self.project)
self.url = factories.ResourceFactory.get_url(
self.resource, action='submit_report'
)
self.valid_report = [{'header': 'Section header', 'body': 'Section body'}]
service_manager = UserFactory()
self.resource.offering.customer.add_user(
service_manager, role=structure_models.CustomerRole.SERVICE_MANAGER
)
setattr(self.fixture, 'service_manager', service_manager)
service_owner = UserFactory()
self.resource.offering.customer.add_user(
service_owner, role=structure_models.CustomerRole.OWNER
)
setattr(self.fixture, 'service_owner', service_manager)
def make_request(self, role, payload):
self.client.force_authenticate(role)
return self.client.post(self.url, {'report': payload})
@data('staff', 'service_owner', 'service_manager')
def test_user_can_submit_report(self, user):
response = self.make_request(getattr(self.fixture, user), self.valid_report)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.resource.refresh_from_db()
self.assertEqual(self.resource.report, self.valid_report)
def test_admin_can_not_submit_report(self):
response = self.make_request(self.fixture.admin, self.valid_report)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_report_should_contain_at_least_one_section(self):
response = self.make_request(self.fixture.staff, [])
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_report_section_should_contain_header_and_body(self):
response = self.make_request(self.fixture.staff, [1, 2])
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class ResourceDetailsTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ProjectFixture()
self.project = self.fixture.project
self.offering = factories.OfferingFactory(customer=self.fixture.customer)
self.offering.add_user(self.fixture.user)
self.resource = factories.ResourceFactory(
project=self.project, offering=self.offering
)
def make_request(self):
url = factories.ResourceFactory.get_url(self.resource, action='details')
self.client.force_authenticate(self.fixture.user)
return self.client.get(url)
def test_resource_without_scope_returns_error_404(self):
response = self.make_request()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_resource_with_scope_returns_valid_resource_details(self):
self.resource.scope = openstack_factories.TenantFactory(project=self.project)
self.resource.save()
response = self.make_request()
self.assertEqual(response.status_code, status.HTTP_200_OK)
@ddt
class ResourceSetStateTest(test.APITransactionTestCase):
def setUp(self) -> None:
self.fixture = fixtures.ProjectFixture()
self.offering = factories.OfferingFactory(customer=self.fixture.customer)
self.producer_owner = self.fixture.owner
self.consumer_fixture = fixtures.ProjectFixture()
self.consumer_owner = self.consumer_fixture.owner
self.consumer_project = self.consumer_fixture.project
self.resource = factories.ResourceFactory(
project=self.consumer_project, offering=self.offering
)
self.url = factories.ResourceFactory.get_url(
self.resource, action='set_state_by_provider'
)
@data('ok', 'terminated', 'erred')
def test_service_owner_can_change_resource_state(self, state):
self.client.force_authenticate(self.producer_owner)
states_dict = {
'ok': models.Resource.States.OK,
'erred': models.Resource.States.ERRED,
'terminated': models.Resource.States.TERMINATED,
}
response = self.client.post(self.url, {'state': state})
self.resource.refresh_from_db()
self.assertEqual(200, response.status_code)
self.assertEqual(states_dict[state], self.resource.state)
def test_consumer_can_not_change_resource_state(self):
self.client.force_authenticate(self.consumer_owner)
response = self.client.post(self.url, {'state': 'ok'})
self.assertEqual(403, response.status_code)
self.resource.refresh_from_db()
self.assertEqual(models.Resource.States.CREATING, self.resource.state)
class ResourceGetTeamTest(test.APITransactionTestCase):
def setUp(self) -> None:
self.fixture = fixtures.ProjectFixture()
self.project = self.fixture.project
self.offering = factories.OfferingFactory(customer=self.fixture.customer)
self.service_owner = self.fixture.owner
self.admin = self.fixture.admin
self.resource = factories.ResourceFactory(
project=self.project, offering=self.offering
)
self.url = factories.ResourceFactory.get_url(self.resource, action='team')
def test_service_owner_can_get_resource_team(self):
self.client.force_authenticate(self.service_owner)
response = self.client.get(self.url)
users = response.data
self.assertEqual(200, response.status_code)
self.assertEqual(1, len(users))
user = users[0]
self.assertEqual(self.admin.full_name, user['full_name'])
def test_user_can_not_get_resource_team(self):
self.client.force_authenticate(self.admin)
response = self.client.get(self.url)
self.assertEqual(403, response.status_code)
|
|
<<<<<<< HEAD
<<<<<<< HEAD
# To fully test this module, we would need a copy of the stringprep tables.
# Since we don't have them, this test checks only a few code points.
import unittest
from test import support
from stringprep import *
class StringprepTests(unittest.TestCase):
def test(self):
self.assertTrue(in_table_a1("\u0221"))
self.assertFalse(in_table_a1("\u0222"))
self.assertTrue(in_table_b1("\u00ad"))
self.assertFalse(in_table_b1("\u00ae"))
self.assertTrue(map_table_b2("\u0041"), "\u0061")
self.assertTrue(map_table_b2("\u0061"), "\u0061")
self.assertTrue(map_table_b3("\u0041"), "\u0061")
self.assertTrue(map_table_b3("\u0061"), "\u0061")
self.assertTrue(in_table_c11("\u0020"))
self.assertFalse(in_table_c11("\u0021"))
self.assertTrue(in_table_c12("\u00a0"))
self.assertFalse(in_table_c12("\u00a1"))
self.assertTrue(in_table_c12("\u00a0"))
self.assertFalse(in_table_c12("\u00a1"))
self.assertTrue(in_table_c11_c12("\u00a0"))
self.assertFalse(in_table_c11_c12("\u00a1"))
self.assertTrue(in_table_c21("\u001f"))
self.assertFalse(in_table_c21("\u0020"))
self.assertTrue(in_table_c22("\u009f"))
self.assertFalse(in_table_c22("\u00a0"))
self.assertTrue(in_table_c21_c22("\u009f"))
self.assertFalse(in_table_c21_c22("\u00a0"))
self.assertTrue(in_table_c3("\ue000"))
self.assertFalse(in_table_c3("\uf900"))
self.assertTrue(in_table_c4("\uffff"))
self.assertFalse(in_table_c4("\u0000"))
self.assertTrue(in_table_c5("\ud800"))
self.assertFalse(in_table_c5("\ud7ff"))
self.assertTrue(in_table_c6("\ufff9"))
self.assertFalse(in_table_c6("\ufffe"))
self.assertTrue(in_table_c7("\u2ff0"))
self.assertFalse(in_table_c7("\u2ffc"))
self.assertTrue(in_table_c8("\u0340"))
self.assertFalse(in_table_c8("\u0342"))
# C.9 is not in the bmp
# self.assertTrue(in_table_c9(u"\U000E0001"))
# self.assertFalse(in_table_c8(u"\U000E0002"))
self.assertTrue(in_table_d1("\u05be"))
self.assertFalse(in_table_d1("\u05bf"))
self.assertTrue(in_table_d2("\u0041"))
self.assertFalse(in_table_d2("\u0040"))
# This would generate a hash of all predicates. However, running
# it is quite expensive, and only serves to detect changes in the
# unicode database. Instead, stringprep.py asserts the version of
# the database.
# import hashlib
# predicates = [k for k in dir(stringprep) if k.startswith("in_table")]
# predicates.sort()
# for p in predicates:
# f = getattr(stringprep, p)
# # Collect all BMP code points
# data = ["0"] * 0x10000
# for i in range(0x10000):
# if f(unichr(i)):
# data[i] = "1"
# data = "".join(data)
# h = hashlib.sha1()
# h.update(data)
# print p, h.hexdigest()
def test_main():
support.run_unittest(StringprepTests)
if __name__ == '__main__':
test_main()
=======
# To fully test this module, we would need a copy of the stringprep tables.
# Since we don't have them, this test checks only a few code points.
import unittest
from test import support
from stringprep import *
class StringprepTests(unittest.TestCase):
def test(self):
self.assertTrue(in_table_a1("\u0221"))
self.assertFalse(in_table_a1("\u0222"))
self.assertTrue(in_table_b1("\u00ad"))
self.assertFalse(in_table_b1("\u00ae"))
self.assertTrue(map_table_b2("\u0041"), "\u0061")
self.assertTrue(map_table_b2("\u0061"), "\u0061")
self.assertTrue(map_table_b3("\u0041"), "\u0061")
self.assertTrue(map_table_b3("\u0061"), "\u0061")
self.assertTrue(in_table_c11("\u0020"))
self.assertFalse(in_table_c11("\u0021"))
self.assertTrue(in_table_c12("\u00a0"))
self.assertFalse(in_table_c12("\u00a1"))
self.assertTrue(in_table_c12("\u00a0"))
self.assertFalse(in_table_c12("\u00a1"))
self.assertTrue(in_table_c11_c12("\u00a0"))
self.assertFalse(in_table_c11_c12("\u00a1"))
self.assertTrue(in_table_c21("\u001f"))
self.assertFalse(in_table_c21("\u0020"))
self.assertTrue(in_table_c22("\u009f"))
self.assertFalse(in_table_c22("\u00a0"))
self.assertTrue(in_table_c21_c22("\u009f"))
self.assertFalse(in_table_c21_c22("\u00a0"))
self.assertTrue(in_table_c3("\ue000"))
self.assertFalse(in_table_c3("\uf900"))
self.assertTrue(in_table_c4("\uffff"))
self.assertFalse(in_table_c4("\u0000"))
self.assertTrue(in_table_c5("\ud800"))
self.assertFalse(in_table_c5("\ud7ff"))
self.assertTrue(in_table_c6("\ufff9"))
self.assertFalse(in_table_c6("\ufffe"))
self.assertTrue(in_table_c7("\u2ff0"))
self.assertFalse(in_table_c7("\u2ffc"))
self.assertTrue(in_table_c8("\u0340"))
self.assertFalse(in_table_c8("\u0342"))
# C.9 is not in the bmp
# self.assertTrue(in_table_c9(u"\U000E0001"))
# self.assertFalse(in_table_c8(u"\U000E0002"))
self.assertTrue(in_table_d1("\u05be"))
self.assertFalse(in_table_d1("\u05bf"))
self.assertTrue(in_table_d2("\u0041"))
self.assertFalse(in_table_d2("\u0040"))
# This would generate a hash of all predicates. However, running
# it is quite expensive, and only serves to detect changes in the
# unicode database. Instead, stringprep.py asserts the version of
# the database.
# import hashlib
# predicates = [k for k in dir(stringprep) if k.startswith("in_table")]
# predicates.sort()
# for p in predicates:
# f = getattr(stringprep, p)
# # Collect all BMP code points
# data = ["0"] * 0x10000
# for i in range(0x10000):
# if f(unichr(i)):
# data[i] = "1"
# data = "".join(data)
# h = hashlib.sha1()
# h.update(data)
# print p, h.hexdigest()
def test_main():
support.run_unittest(StringprepTests)
if __name__ == '__main__':
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
# To fully test this module, we would need a copy of the stringprep tables.
# Since we don't have them, this test checks only a few code points.
import unittest
from test import support
from stringprep import *
class StringprepTests(unittest.TestCase):
def test(self):
self.assertTrue(in_table_a1("\u0221"))
self.assertFalse(in_table_a1("\u0222"))
self.assertTrue(in_table_b1("\u00ad"))
self.assertFalse(in_table_b1("\u00ae"))
self.assertTrue(map_table_b2("\u0041"), "\u0061")
self.assertTrue(map_table_b2("\u0061"), "\u0061")
self.assertTrue(map_table_b3("\u0041"), "\u0061")
self.assertTrue(map_table_b3("\u0061"), "\u0061")
self.assertTrue(in_table_c11("\u0020"))
self.assertFalse(in_table_c11("\u0021"))
self.assertTrue(in_table_c12("\u00a0"))
self.assertFalse(in_table_c12("\u00a1"))
self.assertTrue(in_table_c12("\u00a0"))
self.assertFalse(in_table_c12("\u00a1"))
self.assertTrue(in_table_c11_c12("\u00a0"))
self.assertFalse(in_table_c11_c12("\u00a1"))
self.assertTrue(in_table_c21("\u001f"))
self.assertFalse(in_table_c21("\u0020"))
self.assertTrue(in_table_c22("\u009f"))
self.assertFalse(in_table_c22("\u00a0"))
self.assertTrue(in_table_c21_c22("\u009f"))
self.assertFalse(in_table_c21_c22("\u00a0"))
self.assertTrue(in_table_c3("\ue000"))
self.assertFalse(in_table_c3("\uf900"))
self.assertTrue(in_table_c4("\uffff"))
self.assertFalse(in_table_c4("\u0000"))
self.assertTrue(in_table_c5("\ud800"))
self.assertFalse(in_table_c5("\ud7ff"))
self.assertTrue(in_table_c6("\ufff9"))
self.assertFalse(in_table_c6("\ufffe"))
self.assertTrue(in_table_c7("\u2ff0"))
self.assertFalse(in_table_c7("\u2ffc"))
self.assertTrue(in_table_c8("\u0340"))
self.assertFalse(in_table_c8("\u0342"))
# C.9 is not in the bmp
# self.assertTrue(in_table_c9(u"\U000E0001"))
# self.assertFalse(in_table_c8(u"\U000E0002"))
self.assertTrue(in_table_d1("\u05be"))
self.assertFalse(in_table_d1("\u05bf"))
self.assertTrue(in_table_d2("\u0041"))
self.assertFalse(in_table_d2("\u0040"))
# This would generate a hash of all predicates. However, running
# it is quite expensive, and only serves to detect changes in the
# unicode database. Instead, stringprep.py asserts the version of
# the database.
# import hashlib
# predicates = [k for k in dir(stringprep) if k.startswith("in_table")]
# predicates.sort()
# for p in predicates:
# f = getattr(stringprep, p)
# # Collect all BMP code points
# data = ["0"] * 0x10000
# for i in range(0x10000):
# if f(unichr(i)):
# data[i] = "1"
# data = "".join(data)
# h = hashlib.sha1()
# h.update(data)
# print p, h.hexdigest()
def test_main():
support.run_unittest(StringprepTests)
if __name__ == '__main__':
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
|
""" Tree Objects """
import types
import re
import sys
import logging
from collections import deque
from baal.nlp.lexicon import get_entries
from copy import copy
from baal.utils.general import cformat, nonstr_join
from baal.utils.hlf import gensym, unboundgensym
from baal.utils.data_structures.nodes import *
# logging tips:
# http://stackoverflow.com/questions/13733552/logger-configuration-to-log-to-file-and-print-to-stdout
class enforcetype:
def __init__(self, *dargs):
self.enforce_args = dargs
def __call__(self, func):
def inner(*args, **kwargs):
if len(args[1:]) != len(self.enforce_args):
raise TypeError("Passed wrong number of parameters " +
"to either the decorator or the function")
for arg, darg in zip(args[1:], self.enforce_args):
if not isinstance(arg, darg):
raise TypeError("'%s' instance was type '%s'. "
"Class '%s' wants type '%s'" %
(arg, type(arg).__name__,
type(args[0]).__name__, darg.__name__))
return func(*args, **kwargs)
return inner
class Frontier(object):
def __init__(self, iterable, *args, **kwargs):
super(Frontier, self).__init__(*args, **kwargs)
self.items = deque(iterable)
self.span = [0, 0]
self.logger = logging.getLogger('trees')
self.initialize_span()
def initialize_span(self):
"""
Initialize the span.
span[0] should point at the most left lexical item
span[1] should point at the most right lexical item
Will break down if there is a gap between lexical items
Procedure:
The span marks the boundary edges of the lexical material
First, we iterate over the non lexical items to find the left boundary
both span numbers will equal the index of the first lexical item
if this is the case (span[0] + 1 == len(self.items)), exit
Then, we want the right span number to point at the rightmost unit
"""
span = [0, 0]
for s_i, item in enumerate(self.items):
# find the left boundary
if not isinstance(item, LexicalNode):
span[0] += 1
span[1] += 1
else:
break
if span[0] + 1 == len(self.items):
# single lexical item case
self.span = span
return
for s_i, item in enumerate(list(self.items)[span[0]+1:]):
# find the right boundary
if not isinstance(item, LexicalNode):
break
span[1] += 1
self.span = span
def update_span(self):
""" Move the span to incorporate new lexical material """
while self.span[0] > 0 and \
isinstance(self.items[self.span[0]-1], LexicalNode):
self.span[0] -= 1
while self.span[1] < len(self.items)-1 and \
isinstance(self.items[self.span[1]+1], LexicalNode):
self.span[1] += 1
@property
def complete(self):
return self.span[0] == 0 and self.span[1] == len(self.items) - 1
@property
def nextitems(self):
"""
return the items just beyond the lexical material
will return none for the span boundary that has no material beyond
"""
ret = []
if self.span[0] > 0:
ret.append(self.items[self.span[0]-1])
else:
ret.append(None)
if self.span[1] < len(self.items)-1:
ret.append(self.items[self.span[1]+1])
else:
ret.append(None)
return ret
@property
def currentitems(self):
""" Return the items at the span boundaries (should be lexical) """
return self.items[self.span[0]], self.items[self.span[1]]
def update(self, f_i, other_tree):
""" Updates the span for substitution operations """
if f_i: # the right case.
ind = self.span[1]+1
else:
ind = self.span[0]-1
address = self.items[ind].get_address()
self._insert(ind, other_tree.frontier.items,False)
self.update_span()
return tuple(address)
def spineiter(self, node):
""" used for insertions. move up the spine to see if it can insert """
while node.parent is not None:
yield node.parent
node = node.parent
def insert(self, subtree, direction):
insert_func = {"left":self.insertleft, "right":self.insertright}[direction]
insert_func(subtree)
def insertleft(self, subtree):
""" Leftward insertion: add items to left side """
self._insert(self.span[0], subtree.frontier.items, True)
self.update_span()
def insertright(self, subtree):
""" rightward insertion: add items to right side """
self._insert(self.span[1], subtree.frontier.items, True)
self.update_span()
def _insert(self, ind, iterable,include=True):
"""
Insert the new material in. Used as helper functions, don't call
directly
Note: we don't the things that were at that spot if it's a
substitution.
"""
if not include:
end = ind+1
else:
end = ind
item_list = list(self.items)
self.items = deque(item_list[:ind]+list(iterable)+item_list[end:])
def copy(self,node_dict):
""" Making a __copy__ is too hard because I need a node dictionary """
self.logger.debug("\n Frontier's node dict")
for v in node_dict.values():
self.logger.debug(repr(v))
try:
new_frontier = [node_dict[hash(n)] for n in self.items]
except KeyError as e:
self.logger.debug(cformat("Culprit: %s" % str(n), "b2"))
self.logger.debug("\n")
self.logger.debug(cformat("nodedict", "f"))
for v in node_dict.values():
if v.symbol not in [x.symbol for x in self.items]:
continue
self.logger.debug(cformat(str(v),"1"))
self.logger.debug("Symbol: %s" % v.symbol)
self.logger.debug("Parent symbol: %s " % v.parent.symbol)
self.logger.debug("Children: %s" % v.children)
self.logger.debug("Type: %s" % v.node_type)
self.logger.debug("Parent Type: %s" % v.parent.node_type)
self.logger.debug("Hash: %s" % hash(v))
self.logger.debug("\n")
self.logger.debug(cformat("items","f"))
for v in self.items:
self.logger.debug(cformat(str(v),"1"))
self.logger.debug("Symbol: %s" % v.symbol)
self.logger.debug("Parent symbol: %s " % v.parent.symbol)
self.logger.debug("Children: %s" % v.children)
self.logger.debug("Type: %s" % v.node_type)
self.logger.debug("Parent Type: %s" % v.parent.node_type)
self.logger.debug("Hash: %s" % hash(v))
raise e
new_frontier = Frontier(new_frontier)
assert new_frontier.span == self.span, (
"Things have broken!",
"Possible thing: Frontiers!",
"Old frontier: %s" % self.items,
" New frontier: %s" % new_frontier.items,
"Other things",
"Old span: %s" % self.span,
"New span: %s" % new_frontier.span
)
return new_frontier
def __str__(self):
return "Frontier: %s" % ",".join([str(x) for x in self.items])
def __repr__(self):
return "Frontier: %s" % [repr(x) for x in self.items]
def __hash__(self):
"""
if frontier is list of nodes ala defined below
then hash the symbols to avoid duplicates in charts
else
hash the list itself
"""
if len(self.items) > 0 and isinstance(self.items[0], Node):
return tuple([n.symbol for n in self.items])
return tuple(self.items).__hash__()
@enforcetype(int)
def __getitem__(self, k):
""" going to assume k is an integer """
return self.items[k]
@enforcetype(int, Node)
def __setitem__(self, k, v):
self.items[k] = v
def __iter__(self):
return iter(self.items)
def __contains__(self, k):
if len(self.items) > 0 and isinstance(self.items[0], Node):
return tuple([n.symbol for n in self.items])
return k in set([])
def __len__(self):
return len(self.items)
def __getslice__():
raise NotImplementedError("Slices not supported in Frontiers")
def __setslice__():
raise NotImplementedError("Slices not supported in Frontiers")
def __delslice__():
raise NotImplementedError("Slices not supported in Frontiers")
def __missing__(self):
raise KeyError("There no such thing here")
def from_string(in_str):
"""
modeled from NLTK's version in their class.
Assuming () as open and close patterns
e.g. (S (NP (NNP John)) (VP (V runs)))
TODO: we want parent to be insertion, foot to be foot. fix.
"""
tree_starting = lambda x: x[0] == "("
tree_ending = lambda x: x[0] == ")"
token_re = re.compile("\(\s*([^\s\(\)]+)?|\)|([^\s\(\)]+)")
stack = [(None, [])]
for match in token_re.finditer(in_str):
token = match.group()
# Case: tree/subtree starting. prepare structure
if tree_starting(token):
stack.append((token[1:].lstrip(), []))
# Case: tree/subtree is ending. make sure it's buttoned up
elif tree_ending(token):
label, children = stack.pop()
stack[-1][1].append(Node(symbol=label,
children=children,
parent=stack[-1][0]))
# Case: leaf node.
else:
stack[-1][1].append(token)
assert len(stack) == 1
assert len(stack[0][1]) == 1
assert stack[0][0] is None
resulting_tree = stack[0][1][0]
if isinstance(resulting_tree, types.ListType):
resulting_tree = resulting_tree[0]
assert isinstance(resulting_tree, Node)
return clean_tree(resulting_tree)
def clean_tree(root_node):
"""
We need to:
1. mark nodes as substitution or insertion.
2. find the head node.
3. convert string labels to symbol objects?
"""
is_insertion = False
insert_direction = ""
logger = logging.getLogger('trees')
head = ""
for c_i, child in root_node:
if isinstance(child, Node):
if len(child) > 0:
root_node[c_i] = child = clean_tree(child)
root_node.head = child.head
elif child.symbol[-1]=="*" and child.symbol[:-1] == root_node.symbol:
is_insertion = True
child.symbol = child.symbol[:-1]
assert c_i == 0 or c_i == len(root_node)-1, (
"root node at error: %s. " % root_node.symbol,
"Sometimes happens from badly formed bracket"
)
insert_direction = {0: "right",
(len(root_node)-1): "left"}[c_i]
root_node[c_i] = child = FootNode.from_node(child)
else:
root_node[c_i] = child = SubstitutionNode.from_node(child)
else:
# Found the head
root_node[c_i] = head = child = LexicalNode(symbol=child,
parent=root_node)
root_node.head = head
child.parent = root_node
try:
assert len(root_node.head.symbol) > 0, type(root_node)
except AttributeError as e:
logger.debug(root_node)
raise e
if is_insertion:
root_node = InsertionNode.from_node(root_node,
direction=insert_direction)
return root_node
# ////////////////////////////////////////////
# Tree and its descendants
# ////////////////////////////////////////////
class Tree(object):
"""
A generic tree object for tree grammars
based on http://www.nltk.org/_modules/nltk/tree.html
and
https://github.com/tomerfiliba/tau/blob/master/tag/tig.py#L270
Modified for Grounded Tree Grammars
Properties:
1. head --- Terminal node, represents core lexical item
2. structure ---
An initial tree is a spine plus offshoots at particular spots for
substitution
an auxillary tree is a spine plus an off shoot for the foot.
so how do we want to represent this?
we pick an initial tree. we then pick a second tree which substitutes.
the top level structure should have a pointer to both trees
but there should be a representation that says tree 2 substitutes
into tree 1.
I think a tree should have a finger on a root node.
It should also have a finger on its frontier nodes.
a node is an object
Terminology from an amalgamation of places:
1. Elementary Trees,T_E, consist of
initial, T_I, and auxillary, T_A, trees
ala original TAG
2. V_T is the set of Terminal Categories
ala (stone,2002)
3. V_NT is the set of Non-Terminal Categories
ala (stone,2002)
4. V_T, in natural language, are words or production units
5. All V in T_E are in V_T.union(V_NT)
6. In lexicalized trees, exactly 1 frontier node is in V_T
7. In lexicalized trees, the V_T node is the head
anchor in (stone,2002), head else
8. The SPINE is the path from the root to the head
ala (stone,2002).
Note: Schabes calls this a trunk, and reserves spine for the path
from the root to the foot
In general, I will be trying to keep schabes' spine to ply-1, so
spine is used for root-head.
9. V_NT on the frontier in T_i are substitution nodes and usually
required (although, in theory, fragments can make sense
especially if the referent is pragmatically salient)
10. Substitution is one of the two operations allowed on trees
11. The other goes by a variety of names:
modification, adjunction, left/right-adjunction, insertion,
forward/backward sister adjunction
12. I will use the term "insertion"
13. I will restrict insertions as in (Shindo et al, 2011) & (stone,2002)
+ Insertions are left-right only.
+ No simultaneous left-right insertions.
+ Auxillary trees will be kept 1-ply from root to foot
14. I will assume that, on input, any non-spine, non-lexicalized nodes
are marked for substitution
"""
def __init__(self, root_node, parent, head, frontier,derived={}):
"""
A tree will track:
its root node
all subtrees that have combined with it
if its a subtree, where it attaches
its frontier and the left-right indices of what it lexicalizes
in the input sentence
"""
self.logger = logging.getLogger("trees")
self.debug = False
self.root = root_node
self.head = head
self.frontier = frontier
self.parent = parent
self.derived = derived
self.hlf_form = None
self.terms = None
self.yielded = None
self.logger.debug("\n------------------------\n")
self.logger.debug("Node with root %s and head %s has been created" % (self.root.symbol, self.head))
self.logger.debug("\t%s" % self.frontier)
self.logger.debug("\t Span(%s,%s)" % tuple(self.frontier.span))
self.logger.debug("\t Parent: %s" % self.parent)
self.logger.debug("\t Self type: %s" % self.root.node_type)
self.logger.debug("\n------------------------\n")
@property
def next_frontier(self):
return self.frontier.nextitems
@property
def current_frontier(self):
return self.frontier.currentitems
@classmethod
def instantiate(cls, root=None, parent=None, i=-1,
bracketed_string=None, lexical_item=None):
""" Instantiation check order: bracketed_string, lexical_item, root.
so leave bracketed_string and lexical_item empty if root.
lexical_item will only work if you have a lexicon
and even then, it expects the lexicon to yield bracketed strings
TODO Note:
I imagine that one day I will have generic forms for
parts of speech. For instance, a PP has attachment options
and a PP can modify different properties of the thing it
attaches to. So, in the case that I don't know what is
going on, or when I don't have the right tree for a word
I instantiate all possible forms, and see if I can use that
to infer what is going on.
But, this also requires a bit of ground truth.
So, for example, for INCA, I will have built in a
"I don't get it. program me" with a more rigid syntax
to select building blocks and connections.
Thus, INCA can incrementally learn which things refer
to other things.
What would be the shortcut to this?
We combinatorically sample from our function set
we ask people on mechanical turk to generate descriptions
we feed that into our parsing model and attempt to
build parse trees. we verify on another set of people.
"""
if not root and not lexical_item and not bracketed_string:
raise TypeError("tree.instantiate takes either a root node"
+ " or a lexical item to make into a root node"
+ " or a bracketed string structure to convert")
if bracketed_string:
root = from_string(bracketed_string)
elif lexical_item:
root = from_string(get_entries(lexical_item))
head,frontier = root.initialize_frontier()
return cls(root,parent,head,Frontier(frontier))
@property
def complete(self):
return self.frontier.complete
def __copy__(self):
"""
Extend the copy operation to include deep copies of nodes
Note: we only want to deep copy nodes because of child structure
we want to maintain as minimal repeated objects as possible
so, in derived, since we aren't extending it, we only copy
and the copy will return back references to the objects found within
"""
newroot, node_dict = type(self.root).clone(self.root)
return Tree(newroot, self.parent, newroot.head, self.frontier.copy(node_dict),
copy(self.derived))
def __str__(self):
return self.__repr__()
def __repr__(self):
addressbook = sorted(self.root.get_addressbook().items())
# Debugging
for address,node in addressbook:
self.logger.debug(cformat(str(address),'wu'))
self.logger.debug(cformat("%s :: %s" % (node.symbol, node.ops), "b"))
if not self.yielded:
self._format_derived(addressbook)
# Debugging
for address,node in addressbook:
self.logger.debug(cformat(str(address),'wu'))
self.logger.debug(cformat("%s :: %s" % (node.symbol, node.ops), "b"))
border = cformat("\n{:_^30}\n".format(""),"fu")
d_fun = lambda (l,x): ("%s" % (l,))
if len(self.derived) > 0:
derived_str = "\n\t".join(map(d_fun, self.derived.items()))
st_str = cformat("\nSubtree root labels:","wu") + \
"\n\t%s" % derived_str
else:
st_str = ""
return border+cformat("\nDerivation Tree:","wu")+"%s" % self.root + \
"%s" % st_str + \
cformat("\nYield:", "wu")+" %s" % self.yielded + \
cformat("\nHLF Form:", "wu")+" %s" % self.hlf_form + border
def _format_derived(self, addressbook=None):
""" Purpose: use the addressbook to make the derived tree. """
if addressbook is None:
addressbook = sorted(self.root.get_addressbook().items())
form_addr = lambda addr: ",".join(map(str,addr[1:])) if addr[0] == -1 \
else ",".join(map(str,addr))
headbook = {}
for address,node in addressbook:
address_str = form_addr(address)
# node.address = list(address)
if node.head is not None:
headbook[node] = gensym(node.head.symbol,form_addr(node.head.get_address()))
elif isinstance(node, LexicalNode):
headbook[node] = gensym(node.symbol, form_addr(node.get_address()))
else:
headbook[node] = unboundgensym(address_str)
parent_node_address,parent = addressbook[0]
head = headbook[parent]
#addressbook = addressbook[1:]
last_address, last_node = addressbook[0]
terms = {head:[head]}
# a major assumption: when we enter children, it's the first time we
# see the parent node. this dictates how we treat heads
# i.e. a head's address is when we see it for the first time
# Enter the set of children of a node
enter_child_cond = lambda addr, last_addr: len(addr) > len(last_addr)
# exit the set of children of a node
exit_child_cond = lambda addr, last_addr: len(addr) < len(last_addr)
stack = deque()
yielder = []
derived = {}
for address,node in addressbook:
address_str = ",".join(map(str,address[1:]))
if isinstance(node,LexicalNode):
yielder.append(node.symbol)
if enter_child_cond(address, last_address):
self.logger.debug("pushing %s" % parent)
stack.append(parent)
parent, head = last_node, headbook[last_node]
terms.setdefault(head,[head])
elif exit_child_cond(address, last_address):
try:
parent = stack.pop()
head = headbook[parent]
self.logger.debug("popping %s" % parent)
except IndexError as e:
self.logger.debug(address, node)
self.logger.debug(len(address), len(last_address))
self.logger.debug(last_node)
for address in addressbook:
self.logger.debug(address)
raise e
else:
try:
assert len(stack) == len(address) - 2, \
"Stack: %s, Address: %s" % (stack, address)
except:
self.logger.debug(cformat('THE STACK IS NOT CORRECT... =(', 'f'))
self.logger.debug(cformat('Stack: %s, address: %s' % (stack, address),'b'))
headbook_str = "\n".join([str(x) for x in headbook.items()])
self.logger.debug(cformat('Also, check the headbook: %s' % headbook_str, 'b'))
if len(node.ops) == 0 and node.node_type == "substitution":
assert not node.complete
terms[head] = terms.setdefault(head,[head]) + \
[unboundgensym(address_str)]
# Hobbsian Logical Form calculations
for opname,ophead in node.ops:
if opname == "substitution":
# we assume a subbed symbol is an argument for the head
# word but also that it has a function itself
key = "sub(%s)@%s" % (ophead.symbol, address_str)
# reminder about update sym:
# It both adds and returns the symbol
# Call help(dict().setdefault) in terminal
ophead = headbook[ophead]
sub_syms = terms.setdefault(ophead,[ophead])
sub_sym = sub_syms[0]
terms[head].append(sub_sym)
elif opname.split("-")[0] == "insertion":
# Insertions are functions on the head word
opname,direction = opname.split("-")
headsym = "%s->%s" % (ophead.symbol,node.symbol) \
if direction == "left" \
else "%s<-%s" % (node.symbol,ophead.symbol)
key = "ins(%s)@%s" % (headsym,address_str)
ophead = headbook[ophead]
terms[ophead] = terms.setdefault(ophead,[ophead]) + \
[headbook[node]]
if key in derived.keys():
key = key+"x"
self.logger.warning("Duplicate derived keys")
derived[key] = node
self.logger.debug('end deriv. loop. node: %s, address: %s' %
(node,address))
last_node = node
last_address = address
self.logger.debug("Total: %s" % len(addressbook))
hlf_make = lambda func,variables: "%s(%s)" % \
(func.headword, nonstr_join(variables, ','))
hlf_expr = " & ".join([hlf_make(f,vs) for f,vs in terms.items()])
self.derived = derived
self.yielded = " ".join(yielder)
self.hlf_form = hlf_expr
self.terms = terms
def get_head(self):
return self.head
def _compatible_subst(self, node, other_tree):
"""
Test for compatibility
Other things to add:
feature unification
semantic unification
contextual unification (just semantics?)
Going to pass in the contextual span.
"""
if not node:
return False
same_type = node.symbol == other_tree.root.symbol
is_subst_site = isinstance(node, SubstitutionNode)
return same_type and is_subst_site
def _compatible_insert(self, node, other_tree):
"""
Same things as in subst but with different semantics probably
"""
if not node:
return False
same_type = node.symbol == other_tree.root.symbol
return same_type
def update_derived(self):
new_derived = {}
for key,value in self.derived.items():
new_derived[tuple(value.root.get_address())] = value
self.derived = new_derived
def get_node_by_address(self, address):
if address[0] == -1:
address = address[1:]
return self._get_node_by_address(self.root, address)
def _get_node_by_address(self, node, address):
if len(address) is 0:
return node
elif len(address) is 1:
return node.children[address[0]]
else:
try:
return self._get_node_by_address(node.children[address[0]],address[1:])
except IndexError as e:
print node.children
print address
raise e
def combine(self, other_tree, edge_conditionals=(True, True)):
""" We assume the other tree is the inserter or substituter """
self.logger.debug("Inside the combine.")
self.logger.debug("My symbol: %s" % self.root.symbol)
self.logger.debug("My frontier with span %s: %s" % (self.frontier.span,
repr(self.frontier)))
self.logger.debug("Other symbol: %s" % other_tree.root.symbol)
if isinstance(other_tree.root, InsertionNode):
self.logger.debug("Found an insertion node")
left, right = zip(self.current_frontier, edge_conditionals)
direction = other_tree.root.direction
spine_leaf,edge_bool = right if direction == "right" else left
for frontier_spine in self.frontier.spineiter(spine_leaf):
if self._compatible_insert(frontier_spine, other_tree) and \
edge_bool:
newtree = copy(self)
new_other_tree = copy(other_tree)
insert_address = frontier_spine.get_address()
insertee_node = newtree.get_node_by_address(insert_address)
tree_index = insertee_node.insert_into(direction,
new_other_tree.root)
assert tuple(insert_address) == tree_index
newtree.frontier.insert(new_other_tree, direction)
newtree._format_derived()
yield newtree
elif isinstance(other_tree.root, Node):
left, right = self.next_frontier
self.logger.debug("In the substitution condition")
# a note about limiting subs
# This basically allows us to restrict which substitution site will
# get looked at. This is useful when the lex material is
# surrounded by the same substitution site symbol (e.g. two NP)
# the incoming other tree is only compatible with one of them
# as determined by the edge indices. But we don't want to
# complicate the logic here with edge indices, so we just pass
# in the verdict on whether an edge substitution can happen or not
limiting_subs = zip([left,right],edge_conditionals)
for f_i,(frontier_item,edge_bool) in enumerate(limiting_subs):
if self._compatible_subst(frontier_item, other_tree) \
and edge_bool:
self.logger.debug("self: %s substituting %s in" %
(repr(self.root),repr(other_tree.root)))
self.logger.debug("Complete other tree: %s" % other_tree)
newtree = copy(self)
new_other_tree = copy(other_tree)
self.logger.debug("Substituting into %s" % newtree.next_frontier[f_i])
newtree.next_frontier[f_i].substitute_into(new_other_tree.root)
new_other_tree.root = newtree.next_frontier[f_i]
tree_index = newtree.frontier.update(f_i,new_other_tree)
new_other_tree.parent = newtree
newtree._format_derived()
yield newtree
def tests(debug=False):
"""
testing procedure:
make trees.
combine them.
print output.
"""
frombstr = lambda x: Tree.instantiate(bracketed_string=x)
debug_level = logging.DEBUG if debug else logging.WARNING
logging.basicConfig(level=debug_level, stream=sys.stdout)
base_string = "(S (NP) (VP (V loves) (NP)))"
base_tree = frombstr(base_string)
comp_strings = ["(NP Chris)",
"(NP Sandy)"]
comptrees = [frombstr(x) for x in comp_strings]
insert_strings = ["(VP (ADVP madly) (VP*))",
"(VP (VP*) (ADVP madly))"]
inserttrees = [frombstr(x) for x in insert_strings]
insertion_tests(*([base_tree]+inserttrees))
composition_test(*([base_tree]+comptrees))
print "\nMaking 'Chris loves Sandy madly'\n"
for newt1 in base_tree.combine(comptrees[1]):
for newt2 in newt1.combine(comptrees[0]):
print newt2
for newt3 in newt2.combine(inserttrees[1]):
print "Result: %s" % newt3
def insertion_tests(base_tree, inserttree1,inserttree2):
print "Base tree: %s \n" % base_tree
print "Insertion"
for newt in base_tree.combine(inserttree1):
for newt2 in newt.combine(inserttree2):
print "Result of first combine: %s" % newt
print "Result of second combine: %s" % newt2
def composition_test(base_tree, comptree1, comptree2):
print "Base tree: %s \n" % base_tree
print "Composition"
for newt in base_tree.combine(comptree1):
for newt2 in newt.combine(comptree2):
print "Result of first combine: %s" % newt
print "Result of second combine: %s" % newt2
# print "Post composition, base tree unchanged %s" % base_tree
# print "\n---\n"
# print "Composition 2"
# for newt in test_trees[1].combine(test_trees[2]):
# print newt
# print "Post Comp"
# print test_trees[1]
if __name__ == "__main__":
print "Running tests"
tests()
|
|
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Backup manager manages volume backups.
Volume Backups are full copies of persistent volumes stored in a backup
store e.g. an object store or any other backup store if and when support is
added. They are usable without the original object being available. A
volume backup can be restored to the original volume it was created from or
any other available volume with a minimum size of the original volume.
Volume backups can be created, restored, deleted and listed.
**Related Flags**
:backup_topic: What :mod:`rpc` topic to listen to (default:
`cinder-backup`).
:backup_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.backup.manager.Manager`).
"""
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import excutils
from oslo_utils import importutils
import six
from cinder.backup import driver
from cinder.backup import rpcapi as backup_rpcapi
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import manager
from cinder import objects
from cinder.objects import fields
from cinder import quota
from cinder import rpc
from cinder import utils
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
backup_manager_opts = [
cfg.StrOpt('backup_driver',
default='cinder.backup.drivers.swift',
help='Driver to use for backups.',),
cfg.BoolOpt('backup_service_inithost_offload',
default=False,
help='Offload pending backup delete during '
'backup service startup.',),
]
# This map doesn't need to be extended in the future since it's only
# for old backup services
mapper = {'cinder.backup.services.swift': 'cinder.backup.drivers.swift',
'cinder.backup.services.ceph': 'cinder.backup.drivers.ceph'}
CONF = cfg.CONF
CONF.register_opts(backup_manager_opts)
CONF.import_opt('use_multipath_for_image_xfer', 'cinder.volume.driver')
CONF.import_opt('num_volume_device_scan_tries', 'cinder.volume.driver')
QUOTAS = quota.QUOTAS
class BackupManager(manager.SchedulerDependentManager):
"""Manages backup of block storage devices."""
RPC_API_VERSION = '1.2'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, service_name=None, *args, **kwargs):
self.service = importutils.import_module(self.driver_name)
self.az = CONF.storage_availability_zone
self.volume_managers = {}
self.backup_rpcapi = backup_rpcapi.BackupAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
super(BackupManager, self).__init__(service_name='backup',
*args, **kwargs)
@property
def driver_name(self):
"""This function maps old backup services to backup drivers."""
return self._map_service_to_driver(CONF.backup_driver)
def _map_service_to_driver(self, service):
"""Maps services to drivers."""
if service in mapper:
return mapper[service]
return service
def _update_backup_error(self, backup, context, err):
backup.status = fields.BackupStatus.ERROR
backup.fail_reason = err
backup.save()
def init_host(self):
"""Run initialization needed for a standalone service."""
ctxt = context.get_admin_context()
try:
self._cleanup_incomplete_backup_operations(ctxt)
except Exception:
# Don't block startup of the backup service.
LOG.exception(_LE("Problem cleaning incomplete backup "
"operations."))
def reset(self):
super(BackupManager, self).reset()
self.backup_rpcapi = backup_rpcapi.BackupAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
def _cleanup_incomplete_backup_operations(self, ctxt):
LOG.info(_LI("Cleaning up incomplete backup operations."))
# TODO(smulcahy) implement full resume of backup and restore
# operations on restart (rather than simply resetting)
backups = objects.BackupList.get_all_by_host(ctxt, self.host)
for backup in backups:
try:
self._cleanup_one_backup(ctxt, backup)
except Exception:
LOG.exception(_LE("Problem cleaning up backup %(bkup)s."),
{'bkup': backup['id']})
try:
self._cleanup_temp_volumes_snapshots_for_one_backup(ctxt,
backup)
except Exception:
LOG.exception(_LE("Problem cleaning temp volumes and "
"snapshots for backup %(bkup)s."),
{'bkup': backup['id']})
def _cleanup_one_volume(self, ctxt, volume):
if volume['status'] == 'backing-up':
self._detach_all_attachments(ctxt, volume)
LOG.info(_LI('Resetting volume %(vol_id)s to previous '
'status %(status)s (was backing-up).'),
{'vol_id': volume['id'],
'status': volume['previous_status']})
self.db.volume_update(ctxt, volume['id'],
{'status': volume['previous_status']})
elif volume['status'] == 'restoring-backup':
self._detach_all_attachments(ctxt, volume)
LOG.info(_LI('setting volume %s to error_restoring '
'(was restoring-backup).'), volume['id'])
self.db.volume_update(ctxt, volume['id'],
{'status': 'error_restoring'})
def _cleanup_one_backup(self, ctxt, backup):
if backup['status'] == fields.BackupStatus.CREATING:
LOG.info(_LI('Resetting backup %s to error (was creating).'),
backup['id'])
volume = objects.Volume.get_by_id(ctxt, backup.volume_id)
self._cleanup_one_volume(ctxt, volume)
err = 'incomplete backup reset on manager restart'
self._update_backup_error(backup, ctxt, err)
elif backup['status'] == fields.BackupStatus.RESTORING:
LOG.info(_LI('Resetting backup %s to '
'available (was restoring).'),
backup['id'])
volume = objects.Volume.get_by_id(ctxt, backup.restore_volume_id)
self._cleanup_one_volume(ctxt, volume)
backup.status = fields.BackupStatus.AVAILABLE
backup.save()
elif backup['status'] == fields.BackupStatus.DELETING:
LOG.info(_LI('Resuming delete on backup: %s.'), backup['id'])
if CONF.backup_service_inithost_offload:
# Offload all the pending backup delete operations to the
# threadpool to prevent the main backup service thread
# from being blocked.
self._add_to_threadpool(self.delete_backup, ctxt, backup)
else:
# By default, delete backups sequentially
self.delete_backup(ctxt, backup)
def _detach_all_attachments(self, ctxt, volume):
attachments = volume['volume_attachment'] or []
for attachment in attachments:
if (attachment['attached_host'] == self.host and
attachment['instance_uuid'] is None):
try:
rpcapi = self.volume_rpcapi
rpcapi.detach_volume(ctxt, volume, attachment['id'])
except Exception:
LOG.exception(_LE("Detach attachment %(attach_id)s"
" failed."),
{'attach_id': attachment['id']},
resource=volume)
def _delete_temp_volume(self, ctxt, backup):
try:
temp_volume = objects.Volume.get_by_id(
ctxt, backup.temp_volume_id)
self.volume_rpcapi.delete_volume(ctxt, temp_volume)
except exception.VolumeNotFound:
LOG.debug("Could not find temp volume %(vol)s to clean up "
"for backup %(backup)s.",
{'vol': backup.temp_volume_id,
'backup': backup.id})
backup.temp_volume_id = None
backup.save()
def _delete_temp_snapshot(self, ctxt, backup):
try:
temp_snapshot = objects.Snapshot.get_by_id(
ctxt, backup.temp_snapshot_id)
volume = objects.Volume.get_by_id(
ctxt, backup.volume_id)
# The temp snapshot should be deleted directly thru the
# volume driver, not thru the volume manager.
self.volume_rpcapi.delete_snapshot(ctxt, temp_snapshot,
volume.host)
except exception.SnapshotNotFound:
LOG.debug("Could not find temp snapshot %(snap)s to clean "
"up for backup %(backup)s.",
{'snap': backup.temp_snapshot_id,
'backup': backup.id})
backup.temp_snapshot_id = None
backup.save()
def _cleanup_temp_volumes_snapshots_for_one_backup(self, ctxt, backup):
# NOTE(xyang): If the service crashes or gets restarted during the
# backup operation, there could be temporary volumes or snapshots
# that are not deleted. Make sure any temporary volumes or snapshots
# create by the backup job are deleted when service is started.
if (backup.temp_volume_id
and backup.status == fields.BackupStatus.ERROR):
self._delete_temp_volume(ctxt, backup)
if (backup.temp_snapshot_id
and backup.status == fields.BackupStatus.ERROR):
self._delete_temp_snapshot(ctxt, backup)
def _cleanup_temp_volumes_snapshots_when_backup_created(
self, ctxt, backup):
# Delete temp volumes or snapshots when backup creation is completed.
if backup.temp_volume_id:
self._delete_temp_volume(ctxt, backup)
if backup.temp_snapshot_id:
self._delete_temp_snapshot(ctxt, backup)
def create_backup(self, context, backup):
"""Create volume backups using configured backup service."""
volume_id = backup.volume_id
volume = objects.Volume.get_by_id(context, volume_id)
previous_status = volume.get('previous_status', None)
LOG.info(_LI('Create backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.'),
{'backup_id': backup.id, 'volume_id': volume_id})
self._notify_about_backup_usage(context, backup, "create.start")
backup.host = self.host
backup.service = self.driver_name
backup.availability_zone = self.az
backup.save()
expected_status = 'backing-up'
actual_status = volume['status']
if actual_status != expected_status:
err = _('Create backup aborted, expected volume status '
'%(expected_status)s but got %(actual_status)s.') % {
'expected_status': expected_status,
'actual_status': actual_status,
}
self._update_backup_error(backup, context, err)
raise exception.InvalidVolume(reason=err)
expected_status = fields.BackupStatus.CREATING
actual_status = backup.status
if actual_status != expected_status:
err = _('Create backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') % {
'expected_status': expected_status,
'actual_status': actual_status,
}
self._update_backup_error(backup, context, err)
backup.save()
raise exception.InvalidBackup(reason=err)
try:
self._run_backup(context, backup, volume)
except Exception as err:
with excutils.save_and_reraise_exception():
self.db.volume_update(context, volume_id,
{'status': previous_status,
'previous_status': 'error_backing-up'})
self._update_backup_error(backup, context, six.text_type(err))
# Restore the original status.
self.db.volume_update(context, volume_id,
{'status': previous_status,
'previous_status': 'backing-up'})
backup.status = fields.BackupStatus.AVAILABLE
backup.size = volume['size']
backup.save()
# Handle the num_dependent_backups of parent backup when child backup
# has created successfully.
if backup.parent_id:
parent_backup = objects.Backup.get_by_id(context,
backup.parent_id)
parent_backup.num_dependent_backups += 1
parent_backup.save()
LOG.info(_LI('Create backup finished. backup: %s.'), backup.id)
self._notify_about_backup_usage(context, backup, "create.end")
def _run_backup(self, context, backup, volume):
backup_service = self.service.get_backup_driver(context)
properties = utils.brick_get_connector_properties()
backup_dic = self.volume_rpcapi.get_backup_device(context,
backup, volume)
try:
backup_device = backup_dic.get('backup_device')
is_snapshot = backup_dic.get('is_snapshot')
attach_info = self._attach_device(context, backup_device,
properties, is_snapshot)
try:
device_path = attach_info['device']['path']
if isinstance(device_path, six.string_types):
if backup_dic.get('secure_enabled', False):
with open(device_path) as device_file:
backup_service.backup(backup, device_file)
else:
with utils.temporary_chown(device_path):
with open(device_path) as device_file:
backup_service.backup(backup, device_file)
else:
backup_service.backup(backup, device_path)
finally:
self._detach_device(context, attach_info,
backup_device, properties,
is_snapshot)
finally:
backup = objects.Backup.get_by_id(context, backup.id)
self._cleanup_temp_volumes_snapshots_when_backup_created(
context, backup)
def restore_backup(self, context, backup, volume_id):
"""Restore volume backups from configured backup service."""
LOG.info(_LI('Restore backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.'),
{'backup_id': backup.id, 'volume_id': volume_id})
volume = objects.Volume.get_by_id(context, volume_id)
self._notify_about_backup_usage(context, backup, "restore.start")
backup.host = self.host
backup.save()
expected_status = 'restoring-backup'
actual_status = volume['status']
if actual_status != expected_status:
err = (_('Restore backup aborted, expected volume status '
'%(expected_status)s but got %(actual_status)s.') %
{'expected_status': expected_status,
'actual_status': actual_status})
backup.status = fields.BackupStatus.AVAILABLE
backup.save()
raise exception.InvalidVolume(reason=err)
expected_status = fields.BackupStatus.RESTORING
actual_status = backup['status']
if actual_status != expected_status:
err = (_('Restore backup aborted: expected backup status '
'%(expected_status)s but got %(actual_status)s.') %
{'expected_status': expected_status,
'actual_status': actual_status})
self._update_backup_error(backup, context, err)
self.db.volume_update(context, volume_id, {'status': 'error'})
raise exception.InvalidBackup(reason=err)
if volume['size'] > backup['size']:
LOG.info(_LI('Volume: %(vol_id)s, size: %(vol_size)d is '
'larger than backup: %(backup_id)s, '
'size: %(backup_size)d, continuing with restore.'),
{'vol_id': volume['id'],
'vol_size': volume['size'],
'backup_id': backup['id'],
'backup_size': backup['size']})
backup_service = self._map_service_to_driver(backup['service'])
configured_service = self.driver_name
if backup_service != configured_service:
err = _('Restore backup aborted, the backup service currently'
' configured [%(configured_service)s] is not the'
' backup service that was used to create this'
' backup [%(backup_service)s].') % {
'configured_service': configured_service,
'backup_service': backup_service,
}
backup.status = fields.BackupStatus.AVAILABLE
backup.save()
self.db.volume_update(context, volume_id, {'status': 'error'})
raise exception.InvalidBackup(reason=err)
try:
self._run_restore(context, backup, volume)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_update(context, volume_id,
{'status': 'error_restoring'})
backup.status = fields.BackupStatus.AVAILABLE
backup.save()
self.db.volume_update(context, volume_id, {'status': 'available'})
backup.status = fields.BackupStatus.AVAILABLE
backup.save()
LOG.info(_LI('Restore backup finished, backup %(backup_id)s restored'
' to volume %(volume_id)s.'),
{'backup_id': backup.id, 'volume_id': volume_id})
self._notify_about_backup_usage(context, backup, "restore.end")
def _run_restore(self, context, backup, volume):
backup_service = self.service.get_backup_driver(context)
properties = utils.brick_get_connector_properties()
secure_enabled = (
self.volume_rpcapi.secure_file_operations_enabled(context,
volume))
attach_info = self._attach_device(context, volume, properties)
try:
device_path = attach_info['device']['path']
if isinstance(device_path, six.string_types):
if secure_enabled:
with open(device_path, 'wb') as device_file:
backup_service.restore(backup, volume.id, device_file)
else:
with utils.temporary_chown(device_path):
with open(device_path, 'wb') as device_file:
backup_service.restore(backup, volume.id,
device_file)
else:
backup_service.restore(backup, volume.id, device_path)
finally:
self._detach_device(context, attach_info, volume, properties)
def delete_backup(self, context, backup):
"""Delete volume backup from configured backup service."""
LOG.info(_LI('Delete backup started, backup: %s.'), backup.id)
self._notify_about_backup_usage(context, backup, "delete.start")
backup.host = self.host
backup.save()
expected_status = fields.BackupStatus.DELETING
actual_status = backup.status
if actual_status != expected_status:
err = _('Delete_backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') \
% {'expected_status': expected_status,
'actual_status': actual_status}
self._update_backup_error(backup, context, err)
raise exception.InvalidBackup(reason=err)
backup_service = self._map_service_to_driver(backup['service'])
if backup_service is not None:
configured_service = self.driver_name
if backup_service != configured_service:
err = _('Delete backup aborted, the backup service currently'
' configured [%(configured_service)s] is not the'
' backup service that was used to create this'
' backup [%(backup_service)s].')\
% {'configured_service': configured_service,
'backup_service': backup_service}
self._update_backup_error(backup, context, err)
raise exception.InvalidBackup(reason=err)
try:
backup_service = self.service.get_backup_driver(context)
backup_service.delete(backup)
except Exception as err:
with excutils.save_and_reraise_exception():
self._update_backup_error(backup, context,
six.text_type(err))
# Get reservations
try:
reserve_opts = {
'backups': -1,
'backup_gigabytes': -backup.size,
}
reservations = QUOTAS.reserve(context,
project_id=backup.project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting backup"))
backup.destroy()
# If this backup is incremental backup, handle the
# num_dependent_backups of parent backup
if backup.parent_id:
parent_backup = objects.Backup.get_by_id(context,
backup.parent_id)
if parent_backup.has_dependent_backups:
parent_backup.num_dependent_backups -= 1
parent_backup.save()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations,
project_id=backup.project_id)
LOG.info(_LI('Delete backup finished, backup %s deleted.'), backup.id)
self._notify_about_backup_usage(context, backup, "delete.end")
def _notify_about_backup_usage(self,
context,
backup,
event_suffix,
extra_usage_info=None):
volume_utils.notify_about_backup_usage(
context, backup, event_suffix,
extra_usage_info=extra_usage_info,
host=self.host)
def export_record(self, context, backup):
"""Export all volume backup metadata details to allow clean import.
Export backup metadata so it could be re-imported into the database
without any prerequisite in the backup database.
:param context: running context
:param backup: backup object to export
:returns: backup_record - a description of how to import the backup
:returns: contains 'backup_url' - how to import the backup, and
:returns: 'backup_service' describing the needed driver.
:raises: InvalidBackup
"""
LOG.info(_LI('Export record started, backup: %s.'), backup.id)
expected_status = fields.BackupStatus.AVAILABLE
actual_status = backup.status
if actual_status != expected_status:
err = (_('Export backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') %
{'expected_status': expected_status,
'actual_status': actual_status})
raise exception.InvalidBackup(reason=err)
backup_record = {}
backup_record['backup_service'] = backup.service
backup_service = self._map_service_to_driver(backup.service)
configured_service = self.driver_name
if backup_service != configured_service:
err = (_('Export record aborted, the backup service currently'
' configured [%(configured_service)s] is not the'
' backup service that was used to create this'
' backup [%(backup_service)s].') %
{'configured_service': configured_service,
'backup_service': backup_service})
raise exception.InvalidBackup(reason=err)
# Call driver to create backup description string
try:
backup_service = self.service.get_backup_driver(context)
driver_info = backup_service.export_record(backup)
backup_url = backup.encode_record(driver_info=driver_info)
backup_record['backup_url'] = backup_url
except Exception as err:
msg = six.text_type(err)
raise exception.InvalidBackup(reason=msg)
LOG.info(_LI('Export record finished, backup %s exported.'), backup.id)
return backup_record
def import_record(self,
context,
backup,
backup_service,
backup_url,
backup_hosts):
"""Import all volume backup metadata details to the backup db.
:param context: running context
:param backup: The new backup object for the import
:param backup_service: The needed backup driver for import
:param backup_url: An identifier string to locate the backup
:param backup_hosts: Potential hosts to execute the import
:raises: InvalidBackup
:raises: ServiceNotFound
"""
LOG.info(_LI('Import record started, backup_url: %s.'), backup_url)
# Can we import this backup?
if (backup_service != self.driver_name):
# No, are there additional potential backup hosts in the list?
if len(backup_hosts) > 0:
# try the next host on the list, maybe he can import
first_host = backup_hosts.pop()
self.backup_rpcapi.import_record(context,
first_host,
backup,
backup_service,
backup_url,
backup_hosts)
else:
# empty list - we are the last host on the list, fail
err = _('Import record failed, cannot find backup '
'service to perform the import. Request service '
'%(service)s') % {'service': backup_service}
self._update_backup_error(backup, context, err)
raise exception.ServiceNotFound(service_id=backup_service)
else:
# Yes...
try:
# Deserialize backup record information
backup_options = backup.decode_record(backup_url)
# Extract driver specific info and pass it to the driver
driver_options = backup_options.pop('driver_info', {})
backup_service = self.service.get_backup_driver(context)
backup_service.import_record(backup, driver_options)
except Exception as err:
msg = six.text_type(err)
self._update_backup_error(backup, context, msg)
raise exception.InvalidBackup(reason=msg)
required_import_options = {
'display_name',
'display_description',
'container',
'size',
'service_metadata',
'service',
'object_count',
'id'
}
# Check for missing fields in imported data
missing_opts = required_import_options - set(backup_options)
if missing_opts:
msg = (_('Driver successfully decoded imported backup data, '
'but there are missing fields (%s).') %
', '.join(missing_opts))
self._update_backup_error(backup, context, msg)
raise exception.InvalidBackup(reason=msg)
# Confirm the ID from the record in the DB is the right one
backup_id = backup_options['id']
if backup_id != backup.id:
msg = (_('Trying to import backup metadata from id %(meta_id)s'
' into backup %(id)s.') %
{'meta_id': backup_id, 'id': backup.id})
self._update_backup_error(backup, context, msg)
raise exception.InvalidBackup(reason=msg)
# Overwrite some fields
backup_options['status'] = fields.BackupStatus.AVAILABLE
backup_options['service'] = self.driver_name
backup_options['availability_zone'] = self.az
backup_options['host'] = self.host
# Remove some values which are not actual fields and some that
# were set by the API node
for key in ('name', 'user_id', 'project_id'):
backup_options.pop(key, None)
# Update the database
backup.update(backup_options)
backup.save()
# Verify backup
try:
if isinstance(backup_service, driver.BackupDriverWithVerify):
backup_service.verify(backup.id)
else:
LOG.warning(_LW('Backup service %(service)s does not '
'support verify. Backup id %(id)s is '
'not verified. Skipping verify.'),
{'service': self.driver_name,
'id': backup.id})
except exception.InvalidBackup as err:
with excutils.save_and_reraise_exception():
self._update_backup_error(backup, context,
six.text_type(err))
LOG.info(_LI('Import record id %s metadata from driver '
'finished.'), backup.id)
def reset_status(self, context, backup, status):
"""Reset volume backup status.
:param context: running context
:param backup: The backup object for reset status operation
:param status: The status to be set
:raises: InvalidBackup
:raises: BackupVerifyUnsupportedDriver
:raises: AttributeError
"""
LOG.info(_LI('Reset backup status started, backup_id: '
'%(backup_id)s, status: %(status)s.'),
{'backup_id': backup.id,
'status': status})
backup_service = self._map_service_to_driver(backup.service)
LOG.info(_LI('Backup service: %s.'), backup_service)
if backup_service is not None:
configured_service = self.driver_name
if backup_service != configured_service:
err = _('Reset backup status aborted, the backup service'
' currently configured [%(configured_service)s] '
'is not the backup service that was used to create'
' this backup [%(backup_service)s].') % \
{'configured_service': configured_service,
'backup_service': backup_service}
raise exception.InvalidBackup(reason=err)
# Verify backup
try:
# check whether the backup is ok or not
if (status == fields.BackupStatus.AVAILABLE
and backup['status'] != fields.BackupStatus.RESTORING):
# check whether we could verify the backup is ok or not
if isinstance(backup_service,
driver.BackupDriverWithVerify):
backup_service.verify(backup.id)
backup.status = status
backup.save()
# driver does not support verify function
else:
msg = (_('Backup service %(configured_service)s '
'does not support verify. Backup id'
' %(id)s is not verified. '
'Skipping verify.') %
{'configured_service': self.driver_name,
'id': backup.id})
raise exception.BackupVerifyUnsupportedDriver(
reason=msg)
# reset status to error or from restoring to available
else:
if (status == fields.BackupStatus.ERROR or
(status == fields.BackupStatus.AVAILABLE and
backup.status == fields.BackupStatus.RESTORING)):
backup.status = status
backup.save()
except exception.InvalidBackup:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Backup id %s is not invalid. "
"Skipping reset."), backup.id)
except exception.BackupVerifyUnsupportedDriver:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Backup service %(configured_service)s '
'does not support verify. Backup id '
'%(id)s is not verified. '
'Skipping verify.'),
{'configured_service': self.driver_name,
'id': backup.id})
except AttributeError:
msg = (_('Backup service %(service)s does not support '
'verify. Backup id %(id)s is not verified. '
'Skipping reset.') %
{'service': self.driver_name,
'id': backup.id})
LOG.error(msg)
raise exception.BackupVerifyUnsupportedDriver(
reason=msg)
# Needs to clean temporary volumes and snapshots.
try:
self._cleanup_temp_volumes_snapshots_for_one_backup(
context, backup)
except Exception:
LOG.exception(_LE("Problem cleaning temp volumes and "
"snapshots for backup %(bkup)s."),
{'bkup': backup.id})
# send notification to ceilometer
notifier_info = {'id': backup.id, 'update': {'status': status}}
notifier = rpc.get_notifier('backupStatusUpdate')
notifier.info(context, "backups.reset_status.end",
notifier_info)
def check_support_to_force_delete(self, context):
"""Check if the backup driver supports force delete operation.
:param context: running context
"""
backup_service = self.service.get_backup_driver(context)
return backup_service.support_force_delete
def _attach_device(self, context, backup_device,
properties, is_snapshot=False):
"""Attach backup device."""
if not is_snapshot:
return self._attach_volume(context, backup_device, properties)
else:
msg = _("Can't attach snapshot.")
raise NotImplementedError(msg)
def _attach_volume(self, context, volume, properties):
"""Attach a volume."""
try:
conn = self.volume_rpcapi.initialize_connection(context,
volume,
properties)
return self._connect_device(conn)
except Exception:
with excutils.save_and_reraise_exception():
try:
self.volume_rpcapi.terminate_connection(context, volume,
properties,
force=True)
except Exception:
LOG.warning(_LW("Failed to terminate the connection "
"of volume %(volume_id)s, but it is "
"acceptable."),
{'volume_id', volume.id})
def _connect_device(self, conn):
"""Establish connection to device."""
use_multipath = CONF.use_multipath_for_image_xfer
device_scan_attempts = CONF.num_volume_device_scan_tries
protocol = conn['driver_volume_type']
connector = utils.brick_get_connector(
protocol,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
conn=conn)
vol_handle = connector.connect_volume(conn['data'])
return {'conn': conn, 'device': vol_handle, 'connector': connector}
def _detach_device(self, context, attach_info, volume,
properties, is_snapshot=False, force=False):
"""Disconnect the volume from the host. """
connector = attach_info['connector']
connector.disconnect_volume(attach_info['conn']['data'],
attach_info['device'])
rpcapi = self.volume_rpcapi
rpcapi.terminate_connection(context, volume, properties, force=force)
rpcapi.remove_export(context, volume)
|
|
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import uuid
import yaml
import sys
from mock import call, patch, MagicMock
from test_utils import CharmTestCase
# python-apt is not installed as part of test-requirements but is imported by
# some charmhelpers modules so create a fake import.
sys.modules['apt'] = MagicMock()
os.environ['JUJU_UNIT_NAME'] = 'keystone'
with patch('charmhelpers.core.hookenv.config') as config:
config.return_value = 'keystone'
import keystone_utils as utils
_reg = utils.register_configs
_map = utils.restart_map
utils.register_configs = MagicMock()
utils.restart_map = MagicMock()
with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec:
mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f:
lambda *args, **kwargs: f(*args, **kwargs))
with patch('keystone_utils.run_in_apache') as mock_run_in_apache:
import keystone_hooks as hooks
from charmhelpers.contrib import unison
utils.register_configs = _reg
utils.restart_map = _map
TO_PATCH = [
# charmhelpers.core.hookenv
'Hooks',
'config',
'is_relation_made',
'log',
'local_unit',
'filter_installed_packages',
'relation_ids',
'relation_set',
'relation_get',
'related_units',
'unit_get',
'peer_echo',
'network_get_primary_address',
'open_port',
'is_leader',
# charmhelpers.core.host
'apt_install',
'apt_update',
'service_restart',
# charmhelpers.contrib.openstack.utils
'configure_installation_source',
'git_install_requested',
# charmhelpers.contrib.openstack.ip
'resolve_address',
# charmhelpers.contrib.openstack.ha.utils
'update_dns_ha_resource_params',
'expect_ha',
# charmhelpers.contrib.hahelpers.cluster_utils
'is_elected_leader',
'get_hacluster_config',
'is_clustered',
# keystone_utils
'restart_map',
'register_configs',
'do_openstack_upgrade_reexec',
'openstack_upgrade_available',
'save_script_rc',
'migrate_database',
'ensure_initial_admin',
'add_service_to_keystone',
'synchronize_ca_if_changed',
'update_nrpe_config',
'ensure_ssl_dirs',
'is_db_ready',
'keystone_service',
'create_or_show_domain',
'get_api_version',
# other
'check_call',
'execd_preinstall',
'mkdir',
'os',
# ip
'get_iface_for_address',
'get_netmask_for_address',
'get_address_in_network',
'git_install',
'is_service_present',
'delete_service_entry',
'os_release',
'service_pause',
'disable_unused_apache_sites',
'run_in_apache',
# unitdata
'unitdata',
]
class KeystoneRelationTests(CharmTestCase):
def setUp(self):
super(KeystoneRelationTests, self).setUp(hooks, TO_PATCH)
self.config.side_effect = self.test_config.get
self.ssh_user = 'juju_keystone'
@patch.object(utils, 'os_release')
@patch.object(utils, 'git_install_requested')
@patch.object(unison, 'ensure_user')
def test_install_hook(self, ensure_user, git_requested, os_release):
git_requested.return_value = False
self.run_in_apache.return_value = False
repo = 'cloud:precise-grizzly'
self.test_config.set('openstack-origin', repo)
hooks.install()
self.assertTrue(self.execd_preinstall.called)
self.configure_installation_source.assert_called_with(repo)
ensure_user.assert_called_with(user=self.ssh_user, group='keystone')
self.assertTrue(self.apt_update.called)
self.apt_install.assert_called_with(
['apache2', 'haproxy', 'keystone', 'openssl', 'pwgen',
'python-keystoneclient', 'python-mysqldb', 'python-psycopg2',
'python-six', 'unison', 'uuid'], fatal=True)
self.git_install.assert_called_with(None)
self.disable_unused_apache_sites.assert_not_called()
@patch.object(utils, 'os_release')
@patch.object(utils, 'git_install_requested')
@patch.object(unison, 'ensure_user')
def test_install_hook_apache2(self, ensure_user,
git_requested, os_release):
git_requested.return_value = False
self.run_in_apache.return_value = True
repo = 'cloud:xenial-newton'
self.test_config.set('openstack-origin', repo)
self.os.path.exists.return_value = True
hooks.install()
self.assertTrue(self.execd_preinstall.called)
self.configure_installation_source.assert_called_with(repo)
ensure_user.assert_called_with(user=self.ssh_user, group='keystone')
self.assertTrue(self.apt_update.called)
self.apt_install.assert_called_with(
['apache2', 'haproxy', 'keystone', 'openssl', 'pwgen',
'python-keystoneclient', 'python-mysqldb', 'python-psycopg2',
'python-six', 'unison', 'uuid'], fatal=True)
self.git_install.assert_called_with(None)
self.disable_unused_apache_sites.assert_called_with()
@patch.object(utils, 'os_release')
@patch.object(utils, 'git_install_requested')
@patch.object(unison, 'ensure_user')
def test_install_hook_git(self, ensure_user, git_requested, os_release):
git_requested.return_value = True
repo = 'cloud:trusty-juno'
openstack_origin_git = {
'repositories': [
{'name': 'requirements',
'repository': 'git://git.openstack.org/openstack/requirements', # noqa
'branch': 'stable/juno'},
{'name': 'keystone',
'repository': 'git://git.openstack.org/openstack/keystone',
'branch': 'stable/juno'}
],
'directory': '/mnt/openstack-git',
}
projects_yaml = yaml.dump(openstack_origin_git)
self.test_config.set('openstack-origin', repo)
self.test_config.set('openstack-origin-git', projects_yaml)
hooks.install()
self.assertTrue(self.execd_preinstall.called)
self.configure_installation_source.assert_called_with(repo)
ensure_user.assert_called_with(user=self.ssh_user, group='keystone')
self.assertTrue(self.apt_update.called)
self.apt_install.assert_called_with(
['apache2', 'haproxy', 'libffi-dev', 'libmysqlclient-dev',
'libssl-dev', 'libxml2-dev', 'libxslt1-dev', 'libyaml-dev',
'openssl', 'pwgen', 'python-dev', 'python-keystoneclient',
'python-mysqldb', 'python-pip', 'python-psycopg2',
'python-setuptools', 'python-six', 'unison', 'uuid',
'zlib1g-dev'], fatal=True)
self.git_install.assert_called_with(projects_yaml)
mod_ch_openstack_utils = 'charmhelpers.contrib.openstack.utils'
@patch.object(utils, 'os_release')
@patch.object(hooks, 'config')
@patch('%s.config' % (mod_ch_openstack_utils))
@patch('%s.relation_set' % (mod_ch_openstack_utils))
@patch('%s.relation_ids' % (mod_ch_openstack_utils))
@patch('%s.get_ipv6_addr' % (mod_ch_openstack_utils))
@patch('%s.sync_db_with_multi_ipv6_addresses' % (mod_ch_openstack_utils))
def test_db_joined(self, mock_sync_db_with_multi, mock_get_ipv6_addr,
mock_relation_ids, mock_relation_set, mock_config,
mock_hooks_config, os_release):
cfg_dict = {'prefer-ipv6': False,
'database': 'keystone',
'database-user': 'keystone',
'vip': None}
class mock_cls_config():
def __call__(self, key):
return cfg_dict[key]
cfg = mock_cls_config()
mock_hooks_config.side_effect = cfg
mock_config.side_effect = cfg
self.network_get_primary_address.side_effect = NotImplementedError
self.is_relation_made.return_value = False
self.unit_get.return_value = 'keystone.foohost.com'
hooks.db_joined()
self.relation_set.assert_called_with(database='keystone',
username='keystone',
hostname='keystone.foohost.com')
self.unit_get.assert_called_with('private-address')
self.network_get_primary_address.side_effect = None
self.network_get_primary_address.return_value = '192.168.20.1'
self.is_relation_made.return_value = False
self.unit_get.return_value = 'keystone.foohost.com'
hooks.db_joined()
self.relation_set.assert_called_with(database='keystone',
username='keystone',
hostname='192.168.20.1')
self.network_get_primary_address.side_effect = NotImplementedError
cfg_dict['prefer-ipv6'] = True
mock_hooks_config.side_effect = mock_cls_config()
mock_relation_ids.return_value = ['shared-db']
mock_get_ipv6_addr.return_value = ['keystone.foohost.com']
self.is_relation_made.return_value = False
hooks.db_joined()
hosts = json.dumps(['keystone.foohost.com'])
mock_relation_set.assert_called_with(relation_id='shared-db',
database='keystone',
username='keystone',
hostname=hosts)
def test_postgresql_db_joined(self):
self.unit_get.return_value = 'keystone.foohost.com'
self.is_relation_made.return_value = False
hooks.pgsql_db_joined()
self.relation_set.assert_called_with(database='keystone'),
def test_db_joined_with_postgresql(self):
self.is_relation_made.return_value = True
with self.assertRaises(Exception) as context:
hooks.db_joined()
self.assertEqual(
context.exception.message,
'Attempting to associate a mysql database when there '
'is already associated a postgresql one')
def test_postgresql_joined_with_db(self):
self.is_relation_made.return_value = True
with self.assertRaises(Exception) as context:
hooks.pgsql_db_joined()
self.assertEqual(
context.exception.message,
'Attempting to associate a postgresql database when there '
'is already associated a mysql one')
@patch('keystone_utils.log')
@patch('keystone_utils.ensure_ssl_cert_master')
@patch.object(hooks, 'CONFIGS')
def test_db_changed_missing_relation_data(self, configs,
mock_ensure_ssl_cert_master,
mock_log):
mock_ensure_ssl_cert_master.return_value = False
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = []
hooks.db_changed()
self.log.assert_called_with(
'shared-db relation incomplete. Peer not ready?'
)
@patch('keystone_utils.log')
@patch('keystone_utils.ensure_ssl_cert_master')
@patch.object(hooks, 'CONFIGS')
def test_postgresql_db_changed_missing_relation_data(self, configs,
mock_ensure_leader,
mock_log):
mock_ensure_leader.return_value = False
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = []
hooks.pgsql_db_changed()
self.log.assert_called_with(
'pgsql-db relation incomplete. Peer not ready?'
)
def _shared_db_test(self, configs, unit_name):
self.relation_get.return_value = 'keystone/0 keystone/3'
self.local_unit.return_value = unit_name
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = ['shared-db']
configs.write = MagicMock()
hooks.db_changed()
def _postgresql_db_test(self, configs):
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = ['pgsql-db']
configs.write = MagicMock()
hooks.pgsql_db_changed()
@patch.object(hooks, 'leader_init_db_if_ready')
@patch('keystone_utils.ensure_ssl_cert_master')
@patch.object(hooks, 'CONFIGS')
def test_db_changed(self, configs,
mock_ensure_ssl_cert_master,
leader_init):
mock_ensure_ssl_cert_master.return_value = False
self._shared_db_test(configs, 'keystone/3')
self.assertEquals([call('/etc/keystone/keystone.conf')],
configs.write.call_args_list)
self.assertTrue(leader_init.called)
@patch.object(hooks, 'leader_init_db_if_ready')
@patch('keystone_utils.ensure_ssl_cert_master')
@patch.object(hooks, 'CONFIGS')
def test_postgresql_db_changed(self, configs,
mock_ensure_ssl_cert_master,
leader_init):
mock_ensure_ssl_cert_master.return_value = False
self._postgresql_db_test(configs)
self.assertEquals([call('/etc/keystone/keystone.conf')],
configs.write.call_args_list)
self.assertTrue(leader_init.called)
@patch.object(hooks, 'update_all_domain_backends')
@patch.object(hooks, 'update_all_identity_relation_units')
@patch.object(hooks, 'run_in_apache')
@patch.object(hooks, 'is_db_initialised')
@patch.object(hooks, 'git_install_requested')
@patch('keystone_utils.log')
@patch('keystone_utils.ensure_ssl_cert_master')
@patch('keystone_utils.ensure_ssl_dirs')
@patch.object(hooks, 'ensure_permissions')
@patch.object(hooks, 'ensure_pki_cert_paths')
@patch.object(hooks, 'ensure_pki_dir_permissions')
@patch.object(hooks, 'ensure_ssl_dir')
@patch.object(hooks, 'is_ssl_cert_master')
@patch.object(hooks, 'send_ssl_sync_request')
@patch.object(hooks, 'peer_units')
@patch.object(hooks, 'admin_relation_changed')
@patch.object(hooks, 'cluster_joined')
@patch.object(unison, 'ensure_user')
@patch.object(unison, 'get_homedir')
@patch.object(hooks, 'CONFIGS')
@patch.object(hooks, 'identity_changed')
@patch.object(hooks, 'configure_https')
def test_config_changed_no_upgrade_leader(self, configure_https,
identity_changed,
configs, get_homedir,
ensure_user,
cluster_joined,
admin_relation_changed,
mock_peer_units,
mock_send_ssl_sync_request,
mock_is_ssl_cert_master,
mock_ensure_ssl_dir,
mock_ensure_pki_cert_paths,
mock_ensure_permissions,
mock_ensure_pki_dir_permissions,
mock_ensure_ssl_dirs,
mock_ensure_ssl_cert_master,
mock_log, git_requested,
mock_is_db_initialised,
mock_run_in_apache,
update,
mock_update_domains):
mock_run_in_apache.return_value = False
git_requested.return_value = False
mock_is_ssl_cert_master.return_value = True
mock_is_db_initialised.return_value = True
self.is_db_ready.return_value = True
self.openstack_upgrade_available.return_value = False
self.is_elected_leader.return_value = True
# avoid having to mock syncer
mock_ensure_ssl_cert_master.return_value = False
mock_peer_units.return_value = []
self.relation_ids.return_value = ['identity-service:0']
self.related_units.return_value = ['unit/0']
hooks.config_changed()
ensure_user.assert_called_with(user=self.ssh_user, group='keystone')
get_homedir.assert_called_with(self.ssh_user)
self.save_script_rc.assert_called_with()
configure_https.assert_called_with()
self.assertTrue(configs.write_all.called)
self.open_port.assert_called_with(5000)
self.assertTrue(update.called)
self.assertTrue(mock_update_domains.called)
@patch.object(hooks, 'update_all_domain_backends')
@patch.object(hooks, 'update_all_identity_relation_units')
@patch.object(hooks, 'run_in_apache')
@patch.object(hooks, 'git_install_requested')
@patch('keystone_utils.log')
@patch('keystone_utils.ensure_ssl_cert_master')
@patch('keystone_utils.ensure_ssl_dirs')
@patch.object(hooks, 'ensure_permissions')
@patch.object(hooks, 'ensure_pki_cert_paths')
@patch.object(hooks, 'ensure_pki_dir_permissions')
@patch.object(hooks, 'ensure_ssl_dir')
@patch.object(hooks, 'peer_units')
@patch.object(hooks, 'is_ssl_cert_master')
@patch.object(hooks, 'cluster_joined')
@patch.object(unison, 'ensure_user')
@patch.object(unison, 'get_homedir')
@patch.object(hooks, 'CONFIGS')
@patch.object(hooks, 'identity_changed')
@patch.object(hooks, 'configure_https')
def test_config_changed_no_upgrade_not_leader(self, configure_https,
identity_changed,
configs, get_homedir,
ensure_user, cluster_joined,
mock_is_ssl_cert_master,
mock_peer_units,
mock_ensure_ssl_dir,
mock_ensure_permissions,
mock_ensure_pki_cert_paths,
mock_ensure_pki_permissions,
ensure_ssl_dirs,
mock_ensure_ssl_cert_master,
mock_log, git_requested,
mock_run_in_apache, update,
mock_update_domains):
mock_run_in_apache.return_value = False
git_requested.return_value = False
mock_is_ssl_cert_master.return_value = True
mock_peer_units.return_value = []
self.openstack_upgrade_available.return_value = False
self.is_elected_leader.return_value = False
mock_ensure_ssl_cert_master.return_value = False
hooks.config_changed()
ensure_user.assert_called_with(user=self.ssh_user, group='keystone')
get_homedir.assert_called_with(self.ssh_user)
self.save_script_rc.assert_called_with()
configure_https.assert_called_with()
self.assertTrue(configs.write_all.called)
self.assertFalse(self.migrate_database.called)
self.assertTrue(update.called)
self.assertTrue(mock_update_domains.called)
@patch.object(hooks, 'update_all_domain_backends')
@patch.object(hooks, 'update_all_identity_relation_units')
@patch.object(hooks, 'run_in_apache')
@patch.object(hooks, 'is_db_initialised')
@patch.object(hooks, 'git_install_requested')
@patch('keystone_utils.log')
@patch('keystone_utils.ensure_ssl_cert_master')
@patch('keystone_utils.ensure_ssl_dirs')
@patch.object(hooks, 'ensure_permissions')
@patch.object(hooks, 'ensure_pki_cert_paths')
@patch.object(hooks, 'ensure_pki_dir_permissions')
@patch.object(hooks, 'ensure_ssl_dir')
@patch.object(hooks, 'is_ssl_cert_master')
@patch.object(hooks, 'send_ssl_sync_request')
@patch.object(hooks, 'peer_units')
@patch.object(hooks, 'admin_relation_changed')
@patch.object(hooks, 'cluster_joined')
@patch.object(unison, 'ensure_user')
@patch.object(unison, 'get_homedir')
@patch.object(hooks, 'CONFIGS')
@patch.object(hooks, 'identity_changed')
@patch.object(hooks, 'configure_https')
def test_config_changed_with_openstack_upgrade(self, configure_https,
identity_changed,
configs, get_homedir,
ensure_user, cluster_joined,
admin_relation_changed,
mock_peer_units,
mock_send_ssl_sync_request,
mock_is_ssl_cert_master,
mock_ensure_ssl_dir,
mock_ensure_permissions,
mock_ensure_pki_cert_paths,
mock_ensure_pki_permissions,
mock_ensure_ssl_dirs,
mock_ensure_ssl_cert_master,
mock_log, git_requested,
mock_is_db_initialised,
mock_run_in_apache,
update,
mock_update_domains):
mock_run_in_apache.return_value = False
git_requested.return_value = False
mock_is_ssl_cert_master.return_value = True
self.is_db_ready.return_value = True
mock_is_db_initialised.return_value = True
self.openstack_upgrade_available.return_value = True
self.is_elected_leader.return_value = True
# avoid having to mock syncer
mock_ensure_ssl_cert_master.return_value = False
mock_peer_units.return_value = []
self.relation_ids.return_value = ['identity-service:0']
self.related_units.return_value = ['unit/0']
hooks.config_changed()
ensure_user.assert_called_with(user=self.ssh_user, group='keystone')
get_homedir.assert_called_with(self.ssh_user)
self.assertTrue(self.do_openstack_upgrade_reexec.called)
self.save_script_rc.assert_called_with()
configure_https.assert_called_with()
self.assertTrue(configs.write_all.called)
self.assertTrue(update.called)
self.assertTrue(mock_update_domains.called)
@patch.object(hooks, 'update_all_domain_backends')
@patch.object(hooks, 'update_all_identity_relation_units')
@patch.object(hooks, 'run_in_apache')
@patch.object(hooks, 'initialise_pki')
@patch.object(hooks, 'git_install_requested')
@patch.object(hooks, 'config_value_changed')
@patch('keystone_utils.log')
@patch('keystone_utils.ensure_ssl_cert_master')
@patch.object(hooks, 'ensure_ssl_dir')
@patch.object(hooks, 'send_ssl_sync_request')
@patch.object(hooks, 'is_db_initialised')
@patch.object(hooks, 'is_db_ready')
@patch.object(hooks, 'peer_units')
@patch.object(hooks, 'admin_relation_changed')
@patch.object(hooks, 'cluster_joined')
@patch.object(unison, 'ensure_user')
@patch.object(unison, 'get_homedir')
@patch.object(hooks, 'CONFIGS')
@patch.object(hooks, 'identity_changed')
@patch.object(hooks, 'configure_https')
def test_config_changed_git_updated(self, configure_https,
identity_changed,
configs, get_homedir, ensure_user,
cluster_joined, admin_relation_changed,
mock_peer_units,
mock_is_db_ready,
mock_is_db_initialised,
mock_send_ssl_sync_request,
mock_ensure_ssl_dir,
mock_ensure_ssl_cert_master,
mock_log, config_val_changed,
git_requested,
mock_initialise_pki,
mock_run_in_apache,
update,
mock_update_domains):
mock_run_in_apache.return_value = False
git_requested.return_value = True
mock_ensure_ssl_cert_master.return_value = False
self.openstack_upgrade_available.return_value = False
self.is_elected_leader.return_value = True
mock_peer_units.return_value = []
self.relation_ids.return_value = ['identity-service:0']
self.related_units.return_value = ['unit/0']
repo = 'cloud:trusty-juno'
openstack_origin_git = {
'repositories': [
{'name': 'requirements',
'repository': 'git://git.openstack.org/openstack/requirements', # noqa
'branch': 'stable/juno'},
{'name': 'keystone',
'repository': 'git://git.openstack.org/openstack/keystone',
'branch': 'stable/juno'}
],
'directory': '/mnt/openstack-git',
}
projects_yaml = yaml.dump(openstack_origin_git)
self.test_config.set('openstack-origin', repo)
self.test_config.set('openstack-origin-git', projects_yaml)
hooks.config_changed()
self.git_install.assert_called_with(projects_yaml)
self.assertFalse(self.openstack_upgrade_available.called)
self.assertFalse(self.do_openstack_upgrade_reexec.called)
self.assertTrue(update.called)
self.assertTrue(mock_update_domains.called)
@patch.object(hooks, 'run_in_apache')
@patch.object(hooks, 'initialise_pki')
@patch.object(hooks, 'is_db_initialised')
@patch.object(hooks, 'git_install_requested')
@patch.object(hooks, 'config_value_changed')
@patch.object(hooks, 'ensure_ssl_dir')
@patch.object(hooks, 'configure_https')
@patch.object(hooks, 'is_ssl_cert_master')
@patch.object(hooks, 'peer_units')
@patch.object(unison, 'get_homedir')
@patch.object(unison, 'ensure_user')
@patch('keystone_utils.ensure_ssl_cert_master')
def test_config_changed_with_openstack_upgrade_action(self,
ensure_ssl_cert,
ensure_user,
get_home,
peer_units, is_ssl,
config_https,
ensure_ssl_dir,
config_value_changed,
git_requested,
mock_db_init,
mock_initialise_pki,
mock_run_in_apache):
mock_run_in_apache.return_value = False
ensure_ssl_cert.return_value = False
peer_units.return_value = []
git_requested.return_value = False
self.openstack_upgrade_available.return_value = True
self.test_config.set('action-managed-upgrade', True)
hooks.config_changed()
self.assertFalse(self.do_openstack_upgrade_reexec.called)
@patch.object(hooks, 'is_db_initialised')
@patch('keystone_utils.log')
@patch('keystone_utils.ensure_ssl_cert_master')
@patch.object(hooks, 'hashlib')
@patch.object(hooks, 'send_notifications')
def test_identity_changed_leader(self, mock_send_notifications,
mock_hashlib, mock_ensure_ssl_cert_master,
mock_log, mock_is_db_initialised):
self.expect_ha.return_value = False
mock_is_db_initialised.return_value = True
self.is_db_ready.return_value = True
self.is_service_present.return_value = True
mock_ensure_ssl_cert_master.return_value = False
hooks.identity_changed(
relation_id='identity-service:0',
remote_unit='unit/0')
self.add_service_to_keystone.assert_called_with(
'identity-service:0',
'unit/0')
self.delete_service_entry.assert_called_with(
'quantum',
'network')
@patch.object(hooks, 'is_db_initialised')
@patch('keystone_utils.log')
@patch('keystone_utils.ensure_ssl_cert_master')
@patch.object(hooks, 'hashlib')
@patch.object(hooks, 'send_notifications')
def test_identity_changed_leader_no_neutron(self, mock_send_notifications,
mock_hashlib,
mock_ensure_ssl_cert_master,
mock_log,
mock_is_db_initialised):
self.expect_ha.return_value = False
mock_is_db_initialised.return_value = True
self.is_db_ready.return_value = True
self.is_service_present.return_value = False
mock_ensure_ssl_cert_master.return_value = False
hooks.identity_changed(
relation_id='identity-service:0',
remote_unit='unit/0')
self.assertFalse(self.delete_service_entry.called)
@patch.object(hooks, 'local_unit')
@patch('keystone_utils.log')
@patch('keystone_utils.ensure_ssl_cert_master')
def test_identity_changed_no_leader(self, mock_ensure_ssl_cert_master,
mock_log, mock_local_unit):
mock_ensure_ssl_cert_master.return_value = False
mock_local_unit.return_value = 'unit/0'
self.is_elected_leader.return_value = False
hooks.identity_changed(
relation_id='identity-service:0',
remote_unit='unit/0')
self.assertFalse(self.add_service_to_keystone.called)
self.log.assert_called_with(
'Deferring identity_changed() to service leader.')
@patch.object(hooks, 'local_unit')
@patch.object(hooks, 'peer_units')
@patch.object(unison, 'ssh_authorized_peers')
def test_cluster_joined(self, ssh_authorized_peers, mock_peer_units,
mock_local_unit):
mock_local_unit.return_value = 'unit/0'
mock_peer_units.return_value = ['unit/0']
hooks.cluster_joined()
ssh_authorized_peers.assert_called_with(
user=self.ssh_user, group='juju_keystone',
peer_interface='cluster', ensure_local_user=True)
@patch.object(hooks, 'initialise_pki')
@patch.object(hooks, 'update_all_identity_relation_units')
@patch.object(hooks, 'get_ssl_sync_request_units')
@patch.object(hooks, 'is_ssl_cert_master')
@patch.object(hooks, 'peer_units')
@patch('keystone_utils.relation_ids')
@patch('keystone_utils.config')
@patch('keystone_utils.log')
@patch('keystone_utils.ensure_ssl_cert_master')
@patch('keystone_utils.synchronize_ca')
@patch.object(hooks, 'check_peer_actions')
@patch.object(unison, 'ssh_authorized_peers')
@patch.object(hooks, 'CONFIGS')
def test_cluster_changed(self, configs, ssh_authorized_peers,
check_peer_actions, mock_synchronize_ca,
mock_ensure_ssl_cert_master,
mock_log, mock_config, mock_relation_ids,
mock_peer_units,
mock_is_ssl_cert_master,
mock_get_ssl_sync_request_units,
mock_update_all_identity_relation_units,
mock_initialise_pki):
relation_settings = {'foo_passwd': '123',
'identity-service:16_foo': 'bar'}
mock_is_ssl_cert_master.return_value = False
mock_peer_units.return_value = ['unit/0']
mock_ensure_ssl_cert_master.return_value = False
mock_relation_ids.return_value = []
self.is_elected_leader.return_value = False
def fake_rel_get(attribute=None, *args, **kwargs):
if not attribute:
return relation_settings
return relation_settings.get(attribute)
self.relation_get.side_effect = fake_rel_get
mock_config.return_value = None
hooks.cluster_changed()
whitelist = ['_passwd', 'identity-service:', 'ssl-cert-master',
'db-initialised', 'ssl-cert-available-updates']
self.peer_echo.assert_called_with(force=True, includes=whitelist)
ssh_authorized_peers.assert_called_with(
user=self.ssh_user, group='juju_keystone',
peer_interface='cluster', ensure_local_user=True)
self.assertFalse(mock_synchronize_ca.called)
self.assertTrue(configs.write_all.called)
@patch.object(hooks.CONFIGS, 'write')
def test_leader_elected(self, mock_write):
hooks.leader_elected()
mock_write.assert_has_calls([call(utils.TOKEN_FLUSH_CRON_FILE)])
@patch.object(hooks, 'update_all_identity_relation_units')
@patch.object(hooks.CONFIGS, 'write')
def test_leader_settings_changed(self, mock_write, update):
self.relation_ids.return_value = ['identity:1']
self.related_units.return_value = ['keystone/1']
hooks.leader_settings_changed()
mock_write.assert_has_calls([call(utils.TOKEN_FLUSH_CRON_FILE)])
self.assertTrue(update.called)
def test_ha_joined(self):
self.get_hacluster_config.return_value = {
'vip': '10.10.10.10',
'ha-bindiface': 'em0',
'ha-mcastport': '8080'
}
self.get_iface_for_address.return_value = 'em1'
self.get_netmask_for_address.return_value = '255.255.255.0'
hooks.ha_joined()
args = {
'relation_id': None,
'corosync_bindiface': 'em0',
'corosync_mcastport': '8080',
'init_services': {'res_ks_haproxy': 'haproxy'},
'resources': {'res_ks_em1_vip': 'ocf:heartbeat:IPaddr2',
'res_ks_haproxy': 'lsb:haproxy'},
'resource_params': {
'res_ks_em1_vip': 'params ip="10.10.10.10"'
' cidr_netmask="255.255.255.0" nic="em1"',
'res_ks_haproxy': 'op monitor interval="5s"'},
'clones': {'cl_ks_haproxy': 'res_ks_haproxy'}
}
self.relation_set.assert_called_with(**args)
def test_ha_joined_duplicate_vip_key(self):
self.get_hacluster_config.return_value = {
'vip': '10.10.10.10 10.10.10.11',
'ha-bindiface': 'em0',
'ha-mcastport': '8080'
}
self.get_iface_for_address.return_value = 'em1'
self.get_netmask_for_address.return_value = '255.255.255.0'
hooks.ha_joined()
args = {
'relation_id': None,
'corosync_bindiface': 'em0',
'corosync_mcastport': '8080',
'init_services': {'res_ks_haproxy': 'haproxy'},
'resources': {'res_ks_em1_vip': 'ocf:heartbeat:IPaddr2',
'res_ks_haproxy': 'lsb:haproxy'},
'resource_params': {
'res_ks_em1_vip': 'params ip="10.10.10.10"'
' cidr_netmask="255.255.255.0" nic="em1"',
'res_ks_haproxy': 'op monitor interval="5s"'},
'clones': {'cl_ks_haproxy': 'res_ks_haproxy'}
}
self.relation_set.assert_called_with(**args)
def test_ha_joined_no_bound_ip(self):
self.get_hacluster_config.return_value = {
'vip': '10.10.10.10',
'ha-bindiface': 'em0',
'ha-mcastport': '8080'
}
self.test_config.set('vip_iface', 'eth120')
self.test_config.set('vip_cidr', '21')
self.get_iface_for_address.return_value = None
self.get_netmask_for_address.return_value = None
hooks.ha_joined()
args = {
'relation_id': None,
'corosync_bindiface': 'em0',
'corosync_mcastport': '8080',
'init_services': {'res_ks_haproxy': 'haproxy'},
'resources': {'res_ks_eth120_vip': 'ocf:heartbeat:IPaddr2',
'res_ks_haproxy': 'lsb:haproxy'},
'resource_params': {
'res_ks_eth120_vip': 'params ip="10.10.10.10"'
' cidr_netmask="21" nic="eth120"',
'res_ks_haproxy': 'op monitor interval="5s"'},
'clones': {'cl_ks_haproxy': 'res_ks_haproxy'}
}
self.relation_set.assert_called_with(**args)
def test_ha_joined_with_ipv6(self):
self.test_config.set('prefer-ipv6', True)
self.get_hacluster_config.return_value = {
'vip': '2001:db8:1::1',
'ha-bindiface': 'em0',
'ha-mcastport': '8080'
}
self.get_iface_for_address.return_value = 'em1'
self.get_netmask_for_address.return_value = '64'
hooks.ha_joined()
args = {
'relation_id': None,
'corosync_bindiface': 'em0',
'corosync_mcastport': '8080',
'init_services': {'res_ks_haproxy': 'haproxy'},
'resources': {'res_ks_em1_vip': 'ocf:heartbeat:IPv6addr',
'res_ks_haproxy': 'lsb:haproxy'},
'resource_params': {
'res_ks_em1_vip': 'params ipv6addr="2001:db8:1::1"'
' cidr_netmask="64" nic="em1"',
'res_ks_haproxy': 'op monitor interval="5s"'},
'clones': {'cl_ks_haproxy': 'res_ks_haproxy'}
}
self.relation_set.assert_called_with(**args)
def test_ha_joined_dns_ha(self):
def _fake_update(resources, resource_params, relation_id=None):
resources.update({'res_keystone_public_hostname': 'ocf:maas:dns'})
resource_params.update({'res_keystone_public_hostname':
'params fqdn="keystone.maas" '
'ip_address="10.0.0.1"'})
self.test_config.set('dns-ha', True)
self.get_hacluster_config.return_value = {
'vip': None,
'ha-bindiface': 'em0',
'ha-mcastport': '8080',
'os-admin-hostname': None,
'os-internal-hostname': None,
'os-public-hostname': 'keystone.maas',
}
args = {
'relation_id': None,
'corosync_bindiface': 'em0',
'corosync_mcastport': '8080',
'init_services': {'res_ks_haproxy': 'haproxy'},
'resources': {'res_keystone_public_hostname': 'ocf:maas:dns',
'res_ks_haproxy': 'lsb:haproxy'},
'resource_params': {
'res_keystone_public_hostname': 'params fqdn="keystone.maas" '
'ip_address="10.0.0.1"',
'res_ks_haproxy': 'op monitor interval="5s"'},
'clones': {'cl_ks_haproxy': 'res_ks_haproxy'}
}
self.update_dns_ha_resource_params.side_effect = _fake_update
hooks.ha_joined()
self.assertTrue(self.update_dns_ha_resource_params.called)
self.relation_set.assert_called_with(**args)
@patch('keystone_utils.log')
@patch('keystone_utils.ensure_ssl_cert_master')
@patch('keystone_utils.synchronize_ca')
@patch.object(hooks, 'CONFIGS')
def test_ha_relation_changed_not_clustered_not_leader(self, configs,
mock_synchronize_ca,
mock_is_master,
mock_log):
mock_is_master.return_value = False
self.relation_get.return_value = False
self.is_elected_leader.return_value = False
hooks.ha_changed()
self.assertTrue(configs.write_all.called)
self.assertFalse(mock_synchronize_ca.called)
@patch.object(hooks, 'update_all_identity_relation_units')
@patch.object(hooks, 'is_db_initialised')
@patch('keystone_utils.log')
@patch('keystone_utils.ensure_ssl_cert_master')
@patch.object(hooks, 'identity_changed')
@patch.object(hooks, 'CONFIGS')
def test_ha_relation_changed_clustered_leader(self, configs,
identity_changed,
mock_ensure_ssl_cert_master,
mock_log,
mock_is_db_initialised,
update):
mock_is_db_initialised.return_value = True
self.is_db_ready.return_value = True
mock_ensure_ssl_cert_master.return_value = False
self.relation_get.return_value = True
self.is_elected_leader.return_value = True
self.relation_ids.return_value = ['identity-service:0']
self.related_units.return_value = ['unit/0']
hooks.ha_changed()
self.assertTrue(configs.write_all.called)
self.assertTrue(update.called)
@patch('keystone_utils.log')
@patch('keystone_utils.ensure_ssl_cert_master')
@patch.object(hooks, 'CONFIGS')
def test_configure_https_enable(self, configs, mock_ensure_ssl_cert_master,
mock_log):
mock_ensure_ssl_cert_master.return_value = False
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = ['https']
configs.write = MagicMock()
hooks.configure_https()
self.assertTrue(configs.write_all.called)
cmd = ['a2ensite', 'openstack_https_frontend']
self.check_call.assert_called_with(cmd)
@patch('keystone_utils.log')
@patch('keystone_utils.ensure_ssl_cert_master')
@patch.object(hooks, 'CONFIGS')
def test_configure_https_disable(self, configs,
mock_ensure_ssl_cert_master,
mock_log):
mock_ensure_ssl_cert_master.return_value = False
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = ['']
configs.write = MagicMock()
hooks.configure_https()
self.assertTrue(configs.write_all.called)
cmd = ['a2dissite', 'openstack_https_frontend']
self.check_call.assert_called_with(cmd)
@patch.object(hooks, 'update_all_identity_relation_units')
@patch.object(utils, 'os_release')
@patch.object(utils, 'git_install_requested')
@patch.object(hooks, 'is_db_ready')
@patch.object(hooks, 'is_db_initialised')
@patch('keystone_utils.log')
@patch('keystone_utils.relation_ids')
@patch('keystone_utils.is_elected_leader')
@patch('keystone_utils.ensure_ssl_cert_master')
@patch('keystone_utils.update_hash_from_path')
@patch('keystone_utils.synchronize_ca')
@patch.object(unison, 'ssh_authorized_peers')
def test_upgrade_charm_leader(self, ssh_authorized_peers,
mock_synchronize_ca,
mock_update_hash_from_path,
mock_ensure_ssl_cert_master,
mock_is_elected_leader,
mock_relation_ids,
mock_log,
mock_is_db_initialised,
mock_is_db_ready,
git_requested,
os_release,
update):
mock_is_db_initialised.return_value = True
mock_is_db_ready.return_value = True
mock_is_elected_leader.return_value = False
mock_relation_ids.return_value = []
mock_ensure_ssl_cert_master.return_value = True
# Ensure always returns diff
mock_update_hash_from_path.side_effect = \
lambda hash, *args, **kwargs: hash.update(str(uuid.uuid4()))
self.is_elected_leader.return_value = True
self.filter_installed_packages.return_value = []
git_requested.return_value = False
hooks.upgrade_charm()
self.assertTrue(self.apt_install.called)
ssh_authorized_peers.assert_called_with(
user=self.ssh_user, group='juju_keystone',
peer_interface='cluster', ensure_local_user=True)
self.assertTrue(mock_synchronize_ca.called)
self.assertTrue(update.called)
@patch.object(hooks, 'update_all_identity_relation_units')
@patch.object(hooks, 'is_db_initialised')
def test_leader_init_db_if_ready(self, is_db_initialized,
update):
""" Verify leader initilaizes db """
self.is_elected_leader.return_value = True
is_db_initialized.return_value = False
self.is_db_ready.return_value = True
hooks.leader_init_db_if_ready()
self.is_db_ready.assert_called_with(use_current_context=False)
self.migrate_database.assert_called_with()
update.assert_called_with(check_db_ready=False)
@patch.object(hooks, 'update_all_identity_relation_units')
def test_leader_init_db_not_leader(self, update):
""" Verify non-leader does not initilaize db """
self.is_elected_leader.return_value = False
hooks.leader_init_db_if_ready()
self.is_elected_leader.assert_called_with('grp_ks_vips')
self.log.assert_called_with("Not leader - skipping db init",
level='DEBUG')
self.assertFalse(self.migrate_database.called)
self.assertFalse(update.called)
@patch.object(hooks, 'update_all_identity_relation_units')
@patch.object(hooks, 'is_db_initialised')
def test_leader_init_db_not_initilaized(self, is_db_initialized, update):
""" Verify leader does not initilaize db when already initialized """
self.is_elected_leader.return_value = True
is_db_initialized.return_value = True
hooks.leader_init_db_if_ready()
self.log.assert_called_with('Database already initialised - skipping '
'db init', level='DEBUG')
self.assertFalse(self.migrate_database.called)
self.assertTrue(update.called)
@patch.object(hooks, 'update_all_identity_relation_units')
@patch.object(hooks, 'is_db_initialised')
def test_leader_init_db_not_ready(self, is_db_initialized, update):
""" Verify leader does not initilaize db when db not ready """
self.is_elected_leader.return_value = True
is_db_initialized.return_value = False
self.is_db_ready.return_value = False
hooks.leader_init_db_if_ready()
self.is_db_ready.assert_called_with(use_current_context=False)
self.log.assert_called_with('Allowed_units list provided and this '
'unit not present', level='INFO')
self.assertFalse(self.migrate_database.called)
self.assertFalse(update.called)
@patch.object(hooks, 'admin_relation_changed')
@patch.object(hooks, 'identity_credentials_changed')
@patch.object(hooks, 'identity_changed')
@patch.object(hooks, 'is_db_initialised')
@patch.object(hooks, 'CONFIGS')
def test_update_all_identity_relation_units(self, configs,
is_db_initialized,
identity_changed,
identity_credentials_changed,
admin_relation_changed):
""" Verify all identity relations are updated """
is_db_initialized.return_value = True
self.relation_ids.return_value = ['identity-relation:0']
self.related_units.return_value = ['unit/0']
log_calls = [call('Firing identity_changed hook for all related '
'services.'),
call('Firing admin_relation_changed hook for all related '
'services.'),
call('Firing identity_credentials_changed hook for all '
'related services.')]
hooks.update_all_identity_relation_units(check_db_ready=False)
self.assertTrue(configs.write_all.called)
identity_changed.assert_called_with(
relation_id='identity-relation:0',
remote_unit='unit/0')
identity_credentials_changed.assert_called_with(
relation_id='identity-relation:0',
remote_unit='unit/0')
admin_relation_changed.assert_called_with('identity-relation:0')
self.log.assert_has_calls(log_calls, any_order=True)
@patch.object(hooks, 'CONFIGS')
def test_update_all_db_not_ready(self, configs):
""" Verify update identity relations when DB is not ready """
self.is_db_ready.return_value = False
hooks.update_all_identity_relation_units(check_db_ready=True)
self.assertTrue(configs.write_all.called)
self.assertTrue(self.is_db_ready.called)
self.log.assert_called_with('Allowed_units list provided and this '
'unit not present', level='INFO')
self.assertFalse(self.relation_ids.called)
@patch.object(hooks, 'is_db_initialised')
@patch.object(hooks, 'CONFIGS')
def test_update_all_db_not_initializd(self, configs, is_db_initialized):
""" Verify update identity relations when DB is not initialized """
is_db_initialized.return_value = False
hooks.update_all_identity_relation_units(check_db_ready=False)
self.assertTrue(configs.write_all.called)
self.assertFalse(self.is_db_ready.called)
self.log.assert_called_with('Database not yet initialised - '
'deferring identity-relation updates',
level='INFO')
self.assertFalse(self.relation_ids.called)
@patch.object(hooks, 'is_db_initialised')
@patch.object(hooks, 'CONFIGS')
def test_update_all_leader(self, configs, is_db_initialized):
""" Verify update identity relations when the leader"""
self.is_elected_leader.return_value = True
is_db_initialized.return_value = True
hooks.update_all_identity_relation_units(check_db_ready=False)
self.assertTrue(configs.write_all.called)
self.assertTrue(self.ensure_initial_admin.called)
# Still updates relations
self.assertTrue(self.relation_ids.called)
@patch.object(hooks, 'is_db_initialised')
@patch.object(hooks, 'CONFIGS')
def test_update_all_not_leader(self, configs, is_db_initialized):
""" Verify update identity relations when not the leader"""
self.is_elected_leader.return_value = False
is_db_initialized.return_value = True
hooks.update_all_identity_relation_units(check_db_ready=False)
self.assertTrue(configs.write_all.called)
self.assertFalse(self.ensure_initial_admin.called)
# Still updates relations
self.assertTrue(self.relation_ids.called)
@patch.object(hooks, 'update_all_identity_relation_units')
@patch.object(utils, 'os_release')
@patch.object(utils, 'git_install_requested')
@patch('keystone_utils.log')
@patch('keystone_utils.relation_ids')
@patch('keystone_utils.ensure_ssl_cert_master')
@patch('keystone_utils.update_hash_from_path')
@patch.object(unison, 'ssh_authorized_peers')
def test_upgrade_charm_not_leader(self, ssh_authorized_peers,
mock_update_hash_from_path,
mock_ensure_ssl_cert_master,
mock_relation_ids,
mock_log, git_requested,
os_release, update):
mock_relation_ids.return_value = []
mock_ensure_ssl_cert_master.return_value = False
# Ensure always returns diff
mock_update_hash_from_path.side_effect = \
lambda hash, *args, **kwargs: hash.update(str(uuid.uuid4()))
self.is_elected_leader.return_value = False
self.filter_installed_packages.return_value = []
git_requested.return_value = False
hooks.upgrade_charm()
self.assertTrue(self.apt_install.called)
ssh_authorized_peers.assert_called_with(
user=self.ssh_user, group='juju_keystone',
peer_interface='cluster', ensure_local_user=True)
self.assertTrue(self.log.called)
self.assertFalse(update.called)
def test_domain_backend_changed_v2(self):
self.get_api_version.return_value = 2
hooks.domain_backend_changed()
self.assertTrue(self.get_api_version.called)
self.assertFalse(self.relation_get.called)
def test_domain_backend_changed_incomplete(self):
self.get_api_version.return_value = 3
self.relation_get.return_value = None
hooks.domain_backend_changed()
self.assertTrue(self.get_api_version.called)
self.relation_get.assert_called_with(
attribute='domain-name',
unit=None,
rid=None
)
self.assertFalse(self.is_leader.called)
@patch.object(hooks, 'is_unit_paused_set')
@patch.object(hooks, 'is_db_initialised')
def test_domain_backend_changed_complete(self,
is_db_initialised,
is_unit_paused_set):
self.get_api_version.return_value = 3
self.relation_get.side_effect = ['mydomain', 'nonce2']
self.is_leader.return_value = True
self.is_db_ready.return_value = True
is_db_initialised.return_value = True
mock_kv = MagicMock()
mock_kv.get.return_value = None
self.unitdata.kv.return_value = mock_kv
is_unit_paused_set.return_value = False
self.keystone_service.return_value = 'apache2'
hooks.domain_backend_changed()
self.assertTrue(self.get_api_version.called)
self.relation_get.assert_has_calls([
call(attribute='domain-name',
unit=None,
rid=None),
call(attribute='restart-nonce',
unit=None,
rid=None),
])
self.create_or_show_domain.assert_called_with('mydomain')
self.service_restart.assert_called_with('apache2')
mock_kv.set.assert_called_with('domain-restart-nonce-mydomain',
'nonce2')
self.assertTrue(mock_kv.flush.called)
@patch.object(hooks, 'is_unit_paused_set')
@patch.object(hooks, 'is_db_initialised')
def test_domain_backend_changed_complete_follower(self,
is_db_initialised,
is_unit_paused_set):
self.get_api_version.return_value = 3
self.relation_get.side_effect = ['mydomain', 'nonce2']
self.is_leader.return_value = False
self.is_db_ready.return_value = True
is_db_initialised.return_value = True
mock_kv = MagicMock()
mock_kv.get.return_value = None
self.unitdata.kv.return_value = mock_kv
is_unit_paused_set.return_value = False
self.keystone_service.return_value = 'apache2'
hooks.domain_backend_changed()
self.assertTrue(self.get_api_version.called)
self.relation_get.assert_has_calls([
call(attribute='domain-name',
unit=None,
rid=None),
call(attribute='restart-nonce',
unit=None,
rid=None),
])
# Only lead unit will create the domain
self.assertFalse(self.create_or_show_domain.called)
self.service_restart.assert_called_with('apache2')
mock_kv.set.assert_called_with('domain-restart-nonce-mydomain',
'nonce2')
self.assertTrue(mock_kv.flush.called)
|
|
#!/usr/bin/env python3
import argparse
import glob
import os
import shutil
import sys
from typing import List
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(ZULIP_PATH)
import pygments
from pytz import VERSION as timezones_version
from scripts.lib import clean_unused_caches
from scripts.lib.zulip_tools import (
ENDC,
OKBLUE,
get_dev_uuid_var_path,
is_digest_obsolete,
run,
write_new_digest,
)
from tools.setup.generate_zulip_bots_static_files import generate_zulip_bots_static_files
from version import PROVISION_VERSION
pygments_version = pygments.__version__ # type: ignore[attr-defined] # private member missing from stubs
VENV_PATH = "/srv/zulip-py3-venv"
UUID_VAR_PATH = get_dev_uuid_var_path()
def create_var_directories() -> None:
# create var/coverage, var/log, etc.
var_dir = os.path.join(ZULIP_PATH, "var")
sub_dirs = [
"coverage",
"log",
"node-coverage",
"test_uploads",
"uploads",
"xunit-test-results",
]
for sub_dir in sub_dirs:
path = os.path.join(var_dir, sub_dir)
os.makedirs(path, exist_ok=True)
def build_pygments_data_paths() -> List[str]:
paths = [
"tools/setup/build_pygments_data",
"tools/setup/lang.json",
]
return paths
def build_timezones_data_paths() -> List[str]:
paths = [
"tools/setup/build_timezone_values",
]
return paths
def compilemessages_paths() -> List[str]:
paths = ["zerver/management/commands/compilemessages.py"]
paths += glob.glob("locale/*/LC_MESSAGES/*.po")
paths += glob.glob("locale/*/translations.json")
return paths
def inline_email_css_paths() -> List[str]:
paths = [
"scripts/setup/inline_email_css.py",
"templates/zerver/emails/email.css",
]
paths += glob.glob("templates/zerver/emails/*.source.html")
return paths
def configure_rabbitmq_paths() -> List[str]:
paths = [
"scripts/setup/configure-rabbitmq",
]
return paths
def setup_shell_profile(shell_profile: str) -> None:
shell_profile_path = os.path.expanduser(shell_profile)
def write_command(command: str) -> None:
if os.path.exists(shell_profile_path):
with open(shell_profile_path) as shell_profile_file:
lines = [line.strip() for line in shell_profile_file.readlines()]
if command not in lines:
with open(shell_profile_path, "a+") as shell_profile_file:
shell_profile_file.writelines(command + "\n")
else:
with open(shell_profile_path, "w") as shell_profile_file:
shell_profile_file.writelines(command + "\n")
source_activate_command = "source " + os.path.join(VENV_PATH, "bin", "activate")
write_command(source_activate_command)
if os.path.exists("/srv/zulip"):
write_command("cd /srv/zulip")
def setup_bash_profile() -> None:
"""Select a bash profile file to add setup code to."""
BASH_PROFILES = [
os.path.expanduser(p) for p in ("~/.bash_profile", "~/.bash_login", "~/.profile")
]
def clear_old_profile() -> None:
# An earlier version of this script would output a fresh .bash_profile
# even though a .profile existed in the image used. As a convenience to
# existing developers (and, perhaps, future developers git-bisecting the
# provisioning scripts), check for this situation, and blow away the
# created .bash_profile if one is found.
BASH_PROFILE = BASH_PROFILES[0]
DOT_PROFILE = BASH_PROFILES[2]
OLD_PROFILE_TEXT = "source /srv/zulip-py3-venv/bin/activate\ncd /srv/zulip\n"
if os.path.exists(DOT_PROFILE):
try:
with open(BASH_PROFILE) as f:
profile_contents = f.read()
if profile_contents == OLD_PROFILE_TEXT:
os.unlink(BASH_PROFILE)
except FileNotFoundError:
pass
clear_old_profile()
for candidate_profile in BASH_PROFILES:
if os.path.exists(candidate_profile):
setup_shell_profile(candidate_profile)
break
else:
# no existing bash profile found; claim .bash_profile
setup_shell_profile(BASH_PROFILES[0])
def need_to_run_build_pygments_data() -> bool:
if not os.path.exists("static/generated/pygments_data.json"):
return True
return is_digest_obsolete(
"build_pygments_data_hash",
build_pygments_data_paths(),
[pygments_version],
)
def need_to_run_build_timezone_data() -> bool:
if not os.path.exists("static/generated/timezones.json"):
return True
return is_digest_obsolete(
"build_timezones_data_hash",
build_timezones_data_paths(),
[timezones_version],
)
def need_to_run_compilemessages() -> bool:
if not os.path.exists("locale/language_name_map.json"):
# User may have cleaned their git checkout.
print("Need to run compilemessages due to missing language_name_map.json")
return True
return is_digest_obsolete(
"last_compilemessages_hash",
compilemessages_paths(),
)
def need_to_run_inline_email_css() -> bool:
if not os.path.exists("templates/zerver/emails/compiled/"):
return True
return is_digest_obsolete(
"last_email_source_files_hash",
inline_email_css_paths(),
)
def need_to_run_configure_rabbitmq(settings_list: List[str]) -> bool:
obsolete = is_digest_obsolete(
"last_configure_rabbitmq_hash",
configure_rabbitmq_paths(),
settings_list,
)
if obsolete:
return True
try:
from zerver.lib.queue import SimpleQueueClient
SimpleQueueClient()
return False
except Exception:
return True
def main(options: argparse.Namespace) -> int:
setup_bash_profile()
setup_shell_profile("~/.zprofile")
# This needs to happen before anything that imports zproject.settings.
run(["scripts/setup/generate_secrets.py", "--development"])
create_var_directories()
# The `build_emoji` script requires `emoji-datasource` package
# which we install via npm; thus this step is after installing npm
# packages.
run(["tools/setup/emoji/build_emoji"])
# copy over static files from the zulip_bots package
generate_zulip_bots_static_files()
if options.is_force or need_to_run_build_pygments_data():
run(["tools/setup/build_pygments_data"])
write_new_digest(
"build_pygments_data_hash",
build_pygments_data_paths(),
[pygments_version],
)
else:
print("No need to run `tools/setup/build_pygments_data`.")
if options.is_force or need_to_run_build_timezone_data():
run(["tools/setup/build_timezone_values"])
write_new_digest(
"build_timezones_data_hash",
build_timezones_data_paths(),
[timezones_version],
)
else:
print("No need to run `tools/setup/build_timezone_values`.")
if options.is_force or need_to_run_inline_email_css():
run(["scripts/setup/inline_email_css.py"])
write_new_digest(
"last_email_source_files_hash",
inline_email_css_paths(),
)
else:
print("No need to run `scripts/setup/inline_email_css.py`.")
if not options.is_build_release_tarball_only:
# The following block is skipped when we just need the development
# environment to build a release tarball.
# Need to set up Django before using template_status
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zproject.settings")
import django
django.setup()
from django.conf import settings
from zerver.lib.test_fixtures import (
DEV_DATABASE,
TEST_DATABASE,
destroy_leaked_test_databases,
)
if options.is_force or need_to_run_configure_rabbitmq([settings.RABBITMQ_PASSWORD]):
run(["scripts/setup/configure-rabbitmq"])
write_new_digest(
"last_configure_rabbitmq_hash",
configure_rabbitmq_paths(),
[settings.RABBITMQ_PASSWORD],
)
else:
print("No need to run `scripts/setup/configure-rabbitmq.")
dev_template_db_status = DEV_DATABASE.template_status()
if options.is_force or dev_template_db_status == "needs_rebuild":
run(["tools/setup/postgresql-init-dev-db"])
if options.skip_dev_db_build:
# We don't need to build the manual development
# database on continuous integration for running tests, so we can
# just leave it as a template db and save a minute.
#
# Important: We don't write a digest as that would
# incorrectly claim that we ran migrations.
pass
else:
run(["tools/rebuild-dev-database"])
DEV_DATABASE.write_new_db_digest()
elif dev_template_db_status == "run_migrations":
DEV_DATABASE.run_db_migrations()
elif dev_template_db_status == "current":
print("No need to regenerate the dev DB.")
test_template_db_status = TEST_DATABASE.template_status()
if options.is_force or test_template_db_status == "needs_rebuild":
run(["tools/setup/postgresql-init-test-db"])
run(["tools/rebuild-test-database"])
TEST_DATABASE.write_new_db_digest()
elif test_template_db_status == "run_migrations":
TEST_DATABASE.run_db_migrations()
elif test_template_db_status == "current":
print("No need to regenerate the test DB.")
if options.is_force or need_to_run_compilemessages():
run(["./manage.py", "compilemessages"])
write_new_digest(
"last_compilemessages_hash",
compilemessages_paths(),
)
else:
print("No need to run `manage.py compilemessages`.")
destroyed = destroy_leaked_test_databases()
if destroyed:
print(f"Dropped {destroyed} stale test databases!")
clean_unused_caches.main(
argparse.Namespace(
threshold_days=6,
# The defaults here should match parse_cache_script_args in zulip_tools.py
dry_run=False,
verbose=False,
no_headings=True,
)
)
# Keeping this cache file around can cause eslint to throw
# random TypeErrors when new/updated dependencies are added
if os.path.isfile(".eslintcache"):
# Remove this block when
# https://github.com/eslint/eslint/issues/11639 is fixed
# upstream.
os.remove(".eslintcache")
# Clean up the root of the `var/` directory for various
# testing-related files that we have migrated to
# `var/<uuid>/test-backend`.
print("Cleaning var/ directory files...")
var_paths = glob.glob("var/test*")
var_paths.append("var/bot_avatar")
for path in var_paths:
try:
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
except FileNotFoundError:
pass
version_file = os.path.join(UUID_VAR_PATH, "provision_version")
print(f"writing to {version_file}\n")
with open(version_file, "w") as f:
f.write(PROVISION_VERSION + "\n")
print()
print(OKBLUE + "Zulip development environment setup succeeded!" + ENDC)
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--force",
action="store_true",
dest="is_force",
help="Ignore all provisioning optimizations.",
)
parser.add_argument(
"--build-release-tarball-only",
action="store_true",
dest="is_build_release_tarball_only",
help="Provision for test suite with production settings.",
)
parser.add_argument(
"--skip-dev-db-build", action="store_true", help="Don't run migrations on dev database."
)
options = parser.parse_args()
sys.exit(main(options))
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import tf_logging as logging
class SamplingOpsTest(tf.test.TestCase):
def testGraphBuildAssertionFailures(self):
val = [tf.zeros([1, 3]), tf.ones([1, 5])]
label = tf.constant([1], shape=[1]) # must have batch dimension
probs = [.2] * 5
initial_p = [.1, .3, .1, .3, .2] # only used for stratified_sample
batch_size = 16
# Curry the rejection sampler so we can easily run the same tests on both
# stratified_sample and stratified_sample_unknown_dist.
def curried_sampler(tensors, labels, probs, batch_size, enqueue_many=True):
return tf.contrib.framework.sampling_ops.stratified_sample(
tensors=tensors,
labels=labels,
target_probs=probs,
batch_size=batch_size,
init_probs=initial_p,
enqueue_many=enqueue_many)
samplers = [
tf.contrib.framework.sampling_ops.stratified_sample_unknown_dist,
curried_sampler,
]
for sampler in samplers:
logging.info('Now testing `%s`', sampler.__class__.__name__)
# Label must have only batch dimension if enqueue_many is True.
with self.assertRaises(ValueError):
sampler(val, tf.zeros([]), probs, batch_size, enqueue_many=True)
with self.assertRaises(ValueError):
sampler(val, tf.zeros([1, 1]), probs, batch_size, enqueue_many=True)
# Label must not be one-hot.
with self.assertRaises(ValueError):
sampler(val, tf.constant([0, 1, 0, 0, 0]), probs, batch_size)
# Data must be list, not singleton tensor.
with self.assertRaises(TypeError):
sampler(tf.zeros([1, 3]), label, probs, batch_size)
# Data must have batch dimension if enqueue_many is True.
with self.assertRaises(ValueError):
sampler(val, tf.constant(1), probs, batch_size, enqueue_many=True)
# Batch dimensions on data and labels should be equal.
with self.assertRaises(ValueError):
sampler([tf.zeros([2, 1])], label, probs, batch_size, enqueue_many=True)
# Probabilities must be numpy array, python list, or tensor.
with self.assertRaises(ValueError):
sampler(val, label, 1, batch_size)
# Probabilities shape must be fully defined.
with self.assertRaises(ValueError):
sampler(
val, label, tf.placeholder(
tf.float32, shape=[None]), batch_size)
# In the rejection sampling case, make sure that probability lengths are
# the same.
with self.assertRaises(ValueError):
tf.contrib.framework.sampling_ops.stratified_sample(
val, label, [.1] * 10, batch_size, init_probs=[.2] * 5)
# In the rejection sampling case, make sure that zero initial probability
# classes also have zero target probability.
with self.assertRaises(ValueError):
tf.contrib.framework.sampling_ops.stratified_sample(
val, label, [.2, .4, .4], batch_size, init_probs=[0, .5, .5])
# Probabilities must be 1D.
with self.assertRaises(ValueError):
tf.contrib.framework.sampling_ops.stratified_sample_unknown_dist(
val, label, np.array([[.25, .25], [.25, .25]]), batch_size)
def testRuntimeAssertionFailures(self):
valid_probs = [.2] * 5
valid_labels = [1, 2, 3]
vals = [tf.zeros([3, 1])]
illegal_labels = [
[0, -1, 1], # classes must be nonnegative
[5, 1, 1], # classes must be less than number of classes
[2, 3], # data and label batch size must be the same
]
illegal_probs = [
[.1] * 5, # probabilities must sum to one
[-.5, .5, .5, .4, .1], # probabilities must be non-negative
]
# Set up graph with illegal label vector.
label_ph = tf.placeholder(tf.int32, shape=[None])
probs_ph = tf.placeholder(tf.float32, shape=[5]) # shape must be defined
val_tf, lbl_tf, prob_tf = tf.contrib.framework.sampling_ops._verify_input(
vals, label_ph, [probs_ph])
for illegal_label in illegal_labels:
# Run session that should fail.
with self.test_session() as sess:
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run([val_tf, lbl_tf],
feed_dict={label_ph: illegal_label,
probs_ph: valid_probs})
for illegal_prob in illegal_probs:
# Run session that should fail.
with self.test_session() as sess:
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run([prob_tf],
feed_dict={label_ph: valid_labels,
probs_ph: illegal_prob})
def batchingBehaviorHelper(self, sampler):
batch_size = 20
input_batch_size = 11
val_input_batch = [tf.zeros([input_batch_size, 2, 3, 4])]
lbl_input_batch = tf.cond(
tf.greater(.5, tf.random_uniform([])),
lambda: tf.ones([input_batch_size], dtype=tf.int32) * 1,
lambda: tf.ones([input_batch_size], dtype=tf.int32) * 3)
probs = np.array([0, .2, 0, .8, 0])
data_batch, labels = sampler(
val_input_batch, lbl_input_batch, probs, batch_size, enqueue_many=True)
with self.test_session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
sess.run([data_batch, labels])
coord.request_stop()
coord.join(threads)
def testCanBeCalledMultipleTimes(self):
batch_size = 20
val_input_batch = [tf.zeros([2, 3, 4])]
lbl_input_batch = tf.ones([], dtype=tf.int32)
probs = np.array([0, 1, 0, 0, 0])
batches = tf.contrib.framework.sampling_ops.stratified_sample(
val_input_batch, lbl_input_batch, probs, batch_size, init_probs=probs)
batches += tf.contrib.framework.sampling_ops.stratified_sample(
val_input_batch, lbl_input_batch, probs, batch_size, init_probs=probs)
batches += tf.contrib.framework.sampling_ops.stratified_sample_unknown_dist(
val_input_batch, lbl_input_batch, probs, batch_size)
batches += tf.contrib.framework.sampling_ops.stratified_sample_unknown_dist(
val_input_batch, lbl_input_batch, probs, batch_size)
summary_op = tf.merge_summary(tf.get_collection(tf.GraphKeys.SUMMARIES))
with self.test_session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
sess.run(batches + (summary_op,))
coord.request_stop()
coord.join(threads)
def testBatchingBehavior(self):
self.batchingBehaviorHelper(
tf.contrib.framework.sampling_ops.stratified_sample_unknown_dist)
def testRejectionBatchingBehavior(self):
initial_p = [0, .3, 0, .7, 0]
def curried_sampler(val, lbls, probs, batch, enqueue_many=True):
return tf.contrib.framework.sampling_ops.stratified_sample(
val,
lbls,
probs,
batch,
init_probs=initial_p,
enqueue_many=enqueue_many)
self.batchingBehaviorHelper(curried_sampler)
def testProbabilitiesCanBeChanged(self):
# Set up graph.
tf.set_random_seed(1234)
lbl1 = 0
lbl2 = 3
# This cond allows the necessary class queues to be populated.
label = tf.cond(
tf.greater(.5, tf.random_uniform([])), lambda: tf.constant(lbl1),
lambda: tf.constant(lbl2))
val = [np.array([1, 4]) * label]
probs = tf.placeholder(tf.float32, shape=[5])
batch_size = 2
data_batch, labels = tf.contrib.framework.sampling_ops.stratified_sample_unknown_dist( # pylint: disable=line-too-long
val, label, probs, batch_size)
with self.test_session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for _ in range(5):
[data], lbls = sess.run([data_batch, labels],
feed_dict={probs: [1, 0, 0, 0, 0]})
for data_example in data:
self.assertListEqual([0, 0], list(data_example))
self.assertListEqual([0, 0], list(lbls))
# Now change distribution and expect different output.
for _ in range(5):
[data], lbls = sess.run([data_batch, labels],
feed_dict={probs: [0, 0, 0, 1, 0]})
for data_example in data:
self.assertListEqual([3, 12], list(data_example))
self.assertListEqual([3, 3], list(lbls))
coord.request_stop()
coord.join(threads)
def testBatchDimensionNotRequired(self):
classes = 5
# Probs must be a tensor, since we pass it directly to _verify_input.
probs = tf.constant([1.0 / classes] * classes)
# Make sure that these vals/labels pairs don't throw any runtime exceptions.
legal_input_pairs = [
(np.zeros([2, 3]), [x % classes for x in range(2)]), # batch dim 2
(np.zeros([4, 15]), [x % classes for x in range(4)]), # batch dim 4
(np.zeros([10, 1]), [x % classes for x in range(10)]), # batch dim 10
]
# Set up graph with placeholders.
vals_ph = tf.placeholder(tf.float32) # completely undefined shape
labels_ph = tf.placeholder(tf.int32) # completely undefined shape
val_tf, labels_tf, _ = tf.contrib.framework.sampling_ops._verify_input(
[vals_ph], labels_ph, [probs])
# Run graph to make sure there are no shape-related runtime errors.
for vals, labels in legal_input_pairs:
with self.test_session() as sess:
sess.run([val_tf, labels_tf],
feed_dict={vals_ph: vals,
labels_ph: labels})
def dataListHelper(self, sampler):
batch_size = 20
val_input_batch = [tf.zeros([2, 3, 4]), tf.ones([2, 4]), tf.ones(2) * 3]
lbl_input_batch = tf.ones([], dtype=tf.int32)
probs = np.array([0, 1, 0, 0, 0])
val_list, lbls = sampler(val_input_batch, lbl_input_batch, probs,
batch_size)
# Check output shapes.
self.assertTrue(isinstance(val_list, list))
self.assertEqual(len(val_list), len(val_input_batch))
self.assertTrue(isinstance(lbls, tf.Tensor))
with self.test_session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
out = sess.run(val_list + [lbls])
coord.request_stop()
coord.join(threads)
# Check output shapes.
self.assertEqual(len(out), len(val_input_batch) + 1)
def testDataListInput(self):
self.dataListHelper(
tf.contrib.framework.sampling_ops.stratified_sample_unknown_dist)
def testRejectionDataListInput(self):
initial_p = [0, 1, 0, 0, 0]
def curried_sampler(val, lbls, probs, batch, enqueue_many=False):
return tf.contrib.framework.sampling_ops.stratified_sample(
val,
lbls,
probs,
batch,
init_probs=initial_p,
enqueue_many=enqueue_many)
self.dataListHelper(curried_sampler)
def normalBehaviorHelper(self, sampler):
# Set up graph.
tf.set_random_seed(1234)
lbl1 = 0
lbl2 = 3
# This cond allows the necessary class queues to be populated.
label = tf.cond(
tf.greater(.5, tf.random_uniform([])), lambda: tf.constant(lbl1),
lambda: tf.constant(lbl2))
val = [np.array([1, 4]) * label]
probs = np.array([.8, 0, 0, .2, 0])
batch_size = 16
data_batch, labels = sampler(val, label, probs, batch_size)
# Run session and keep track of how frequently the labels and values appear.
data_l = []
label_l = []
with self.test_session() as sess:
# Need to initialize variables that keep running total of classes seen.
tf.initialize_all_variables().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for _ in range(20):
[data], lbls = sess.run([data_batch, labels])
data_l.append(data)
label_l.append(lbls)
coord.request_stop()
coord.join(threads)
# First check that the data matches the labels.
for lbl, data in zip(label_l, data_l):
for i in range(batch_size):
self.assertListEqual(list(np.array([1, 4]) * lbl[i]), list(data[i, :]))
# Check that the labels are approximately correct.
expected_label = probs[0] * lbl1 + probs[3] * lbl2
lbl_list = range(len(probs))
lbl_std_dev = np.sqrt(np.sum((np.square(lbl_list - expected_label))))
lbl_std_dev_of_mean = lbl_std_dev / np.sqrt(len(label_l)) # CLT
actual_lbl = np.mean(label_l)
# Tolerance is 3 standard deviations of the mean. According to the central
# limit theorem, this should cover 99.7% of cases. Note that since the seed
# is fixed, for a given implementation, this test will pass or fail 100% of
# the time. This use of assertNear is to cover cases where someone changes
# an implementation detail, which would cause the random behavior to differ.
self.assertNear(actual_lbl, expected_label, 3 * lbl_std_dev_of_mean)
def testNormalBehavior(self):
self.normalBehaviorHelper(
tf.contrib.framework.sampling_ops.stratified_sample_unknown_dist)
def testRejectionNormalBehavior(self):
initial_p = [.7, 0, 0, .3, 0]
def curried_sampler(val, lbls, probs, batch, enqueue_many=False):
return tf.contrib.framework.sampling_ops.stratified_sample(
val,
lbls,
probs,
batch,
init_probs=initial_p,
enqueue_many=enqueue_many)
self.normalBehaviorHelper(curried_sampler)
def testRejectionNormalBehaviorWithOnlineInitPEstimate(self):
def curried_sampler(val, lbls, probs, batch, enqueue_many=False):
return tf.contrib.framework.sampling_ops.stratified_sample(
val, lbls, probs, batch, init_probs=None, enqueue_many=enqueue_many)
self.normalBehaviorHelper(curried_sampler)
def testConditionallyEnqueueAndBatch(self):
tf.set_random_seed(1234)
tensor = tf.cond(
tf.greater(.5, tf.random_uniform([])),
lambda: tf.constant(1.0),
lambda: tf.constant(2.0))
accept_prob = tensor - 1
batch_size = 4
# Set up the test graph.
[batch] = tf.contrib.framework.sampling_ops._conditional_batch(
[tensor], accept_prob, batch_size)
# Check conditional operation.
with self.test_session():
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
batch_np = batch.eval()
coord.request_stop()
coord.join(threads)
# Check that all elements in batch come from tensors with acceptance prob
# 1, so that none come from acceptance prob 0.
self.assertListEqual(list(batch_np), [2.0] * batch_size)
def testConditionallyEnqueueAndBatchTypes(self):
tensor = tf.constant(1.0)
accept_prob = tensor - 1
batch_size = 4
# Check that output types are the same for 1 and 2-length input lists.
output1 = tf.contrib.framework.sampling_ops._conditional_batch(
[tensor], accept_prob, batch_size)
output2 = tf.contrib.framework.sampling_ops._conditional_batch(
[tensor, tensor], accept_prob, batch_size)
self.assertEqual(type(output1), type(output2))
if __name__ == '__main__':
tf.test.main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworksOperations:
"""VirtualNetworksOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
virtual_network_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
virtual_network_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
async def get(
self,
resource_group_name: str,
virtual_network_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.VirtualNetwork":
"""Gets the specified virtual network by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetwork, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_02_01.models.VirtualNetwork
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
virtual_network_name: str,
parameters: "_models.VirtualNetwork",
**kwargs: Any
) -> "_models.VirtualNetwork":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualNetwork')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
virtual_network_name: str,
parameters: "_models.VirtualNetwork",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualNetwork"]:
"""Creates or updates a virtual network in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param parameters: Parameters supplied to the create or update virtual network operation.
:type parameters: ~azure.mgmt.network.v2018_02_01.models.VirtualNetwork
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualNetwork or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_02_01.models.VirtualNetwork]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
virtual_network_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.VirtualNetwork":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
virtual_network_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualNetwork"]:
"""Updates a virtual network tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param parameters: Parameters supplied to update virtual network tags.
:type parameters: ~azure.mgmt.network.v2018_02_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualNetwork or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_02_01.models.VirtualNetwork]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.VirtualNetworkListResult"]:
"""Gets all virtual networks in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_02_01.models.VirtualNetworkListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualNetworks'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualNetworkListResult"]:
"""Gets all virtual networks in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_02_01.models.VirtualNetworkListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks'} # type: ignore
async def check_ip_address_availability(
self,
resource_group_name: str,
virtual_network_name: str,
ip_address: Optional[str] = None,
**kwargs: Any
) -> "_models.IPAddressAvailabilityResult":
"""Checks whether a private IP address is available for use.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param ip_address: The private IP address to be verified.
:type ip_address: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IPAddressAvailabilityResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_02_01.models.IPAddressAvailabilityResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IPAddressAvailabilityResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json, text/json"
# Construct URL
url = self.check_ip_address_availability.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if ip_address is not None:
query_parameters['ipAddress'] = self._serialize.query("ip_address", ip_address, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IPAddressAvailabilityResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_ip_address_availability.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/CheckIPAddressAvailability'} # type: ignore
def list_usage(
self,
resource_group_name: str,
virtual_network_name: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualNetworkListUsageResult"]:
"""Lists usage stats.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkListUsageResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_02_01.models.VirtualNetworkListUsageResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkListUsageResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_usage.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkListUsageResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_usage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/usages'} # type: ignore
|
|
# Copyright 2015-2016 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import os.path
import json
import yaml
import filelock
from flask import jsonify, request
import minemeld.loader
from . import config
from .utils import running_config, committed_config
from .aaa import MMBlueprint
from .logger import LOG
__all__ = ['BLUEPRINT']
PROTOTYPE_ENV = 'MINEMELD_PROTOTYPE_PATH'
LOCAL_PROTOTYPE_PATH = 'MINEMELD_LOCAL_PROTOTYPE_PATH'
BLUEPRINT = MMBlueprint('prototype', __name__, url_prefix='')
PROTOTYPE_PATHS = None
def _prototype_paths():
global PROTOTYPE_PATHS
if PROTOTYPE_PATHS is not None:
return PROTOTYPE_PATHS
paths = config.get(PROTOTYPE_ENV, None)
if paths is None:
raise RuntimeError('{} environment variable not set'.format(PROTOTYPE_ENV))
paths = paths.split(':')
prototype_eps = minemeld.loader.map(minemeld.loader.MM_PROTOTYPES_ENTRYPOINT)
for pname, mmep in prototype_eps.iteritems():
if not mmep.loadable:
LOG.info('Prototype entry point {} not loadable, ignored'.format(pname))
continue
try:
# even if old dist is no longer available, old module could be cached
cmodule = sys.modules.get(mmep.ep.module_name, None)
cmodule_path = getattr(cmodule, '__path__', None)
if cmodule is not None and cmodule_path is not None:
if not cmodule_path[0].startswith(mmep.ep.dist.location):
LOG.info('Invalidating cache for {}'.format(mmep.ep.module_name))
sys.modules.pop(mmep.ep.module_name)
ep = mmep.ep.load()
# we add prototype paths in front, to let extensions override default protos
paths.insert(0, ep())
except:
LOG.exception('Exception loading paths from {}'.format(pname))
PROTOTYPE_PATHS = paths
return paths
def _local_library_path(prototypename):
toks = prototypename.split('.', 1)
if len(toks) != 2:
raise ValueError('bad prototype name')
library, prototype = toks
if os.path.basename(library) != library:
raise ValueError('bad library name, nice try')
if library != 'minemeldlocal':
raise ValueError('invalid library')
library_filename = library+'.yml'
local_path = config.get(LOCAL_PROTOTYPE_PATH)
if local_path is None:
paths = os.getenv(PROTOTYPE_ENV, None)
if paths is None:
raise RuntimeError(
'%s environment variable not set' %
(PROTOTYPE_ENV)
)
paths = paths.split(':')
for p in paths:
if '/local/' in p:
local_path = p
break
if local_path is None:
raise RuntimeError(
'No local path in %s' % PROTOTYPE_ENV
)
library_path = os.path.join(local_path, library_filename)
return library_path, prototype
@BLUEPRINT.route('/prototype', methods=['GET'], read_write=False)
def list_prototypes():
paths = _prototype_paths()
prototypes = {}
for p in paths:
try:
for plibrary in os.listdir(p):
if not plibrary.endswith('.yml'):
continue
plibraryname, _ = plibrary.rsplit('.', 1)
with open(os.path.join(p, plibrary), 'r') as f:
pcontents = yaml.safe_load(f)
if plibraryname not in prototypes:
prototypes[plibraryname] = pcontents
continue
# oldest has precedence
newprotos = pcontents.get('prototypes', {})
currprotos = prototypes[plibraryname].get('prototypes', {})
newprotos.update(currprotos)
prototypes[plibraryname]['prototypes'] = newprotos
except:
LOG.exception('Error loading libraries from %s', p)
return jsonify(result=prototypes)
@BLUEPRINT.route('/prototype/<prototypename>', methods=['GET'], read_write=False)
def get_prototype(prototypename):
toks = prototypename.split('.', 1)
if len(toks) != 2:
return jsonify(error={'message': 'bad prototype name'}), 400
library, prototype = toks
if os.path.basename(library) != library:
return jsonify(error={'message': 'bad library name, nice try'}), 400
library_filename = library+'.yml'
paths = _prototype_paths()
for path in paths:
full_library_name = os.path.join(path, library_filename)
if not os.path.isfile(full_library_name):
continue
with open(full_library_name, 'r') as f:
library_contents = yaml.safe_load(f)
prototypes = library_contents.get('prototypes', None)
if prototypes is None:
continue
if prototype not in prototypes:
continue
curr_prototype = prototypes[prototype]
result = {
'class': curr_prototype['class'],
'developmentStatus': None,
'config': None,
'nodeType': None,
'description': None,
'indicatorTypes': None,
'tags': None
}
if 'config' in curr_prototype:
result['config'] = yaml.dump(
curr_prototype['config'],
indent=4,
default_flow_style=False
)
if 'development_status' in curr_prototype:
result['developmentStatus'] = curr_prototype['development_status']
if 'node_type' in curr_prototype:
result['nodeType'] = curr_prototype['node_type']
if 'description' in curr_prototype:
result['description'] = curr_prototype['description']
if 'indicator_types' in curr_prototype:
result['indicatorTypes'] = curr_prototype['indicator_types']
if 'tags' in curr_prototype:
result['tags'] = curr_prototype['tags']
return jsonify(result=result), 200
@BLUEPRINT.route('/prototype/<prototypename>', methods=['POST'], read_write=True)
def add_local_prototype(prototypename):
AUTHOR_ = 'minemeld-web'
DESCRIPTION_ = 'Local prototype library managed via MineMeld WebUI'
try:
library_path, prototype = _local_library_path(prototypename)
except ValueError as e:
return jsonify(error={'message': str(e)}), 400
lock = filelock.FileLock('{}.lock'.format(library_path))
with lock.acquire(timeout=10):
if os.path.isfile(library_path):
with open(library_path, 'r') as f:
library_contents = yaml.safe_load(f)
if not isinstance(library_contents, dict):
library_contents = {}
if 'description' not in library_contents:
library_contents['description'] = DESCRIPTION_
if 'prototypes' not in library_contents:
library_contents['prototypes'] = {}
if 'author' not in library_contents:
library_contents['author'] = AUTHOR_
else:
library_contents = {
'author': AUTHOR_,
'description': DESCRIPTION_,
'prototypes': {}
}
try:
incoming_prototype = request.get_json()
except Exception as e:
return jsonify(error={'message': str(e)}), 400
new_prototype = {
'class': incoming_prototype['class'],
}
if 'config' in incoming_prototype:
try:
new_prototype['config'] = yaml.safe_load(
incoming_prototype['config']
)
except Exception as e:
return jsonify(error={'message': 'invalid YAML in config'}), 400
if 'developmentStatus' in incoming_prototype:
new_prototype['development_status'] = \
incoming_prototype['developmentStatus']
if 'nodeType' in incoming_prototype:
new_prototype['node_type'] = incoming_prototype['nodeType']
if 'description' in incoming_prototype:
new_prototype['description'] = incoming_prototype['description']
if 'indicatorTypes' in incoming_prototype:
new_prototype['indicator_types'] = incoming_prototype['indicatorTypes']
if 'tags' in incoming_prototype:
new_prototype['tags'] = incoming_prototype['tags']
library_contents['prototypes'][prototype] = new_prototype
with open(library_path, 'w') as f:
yaml.safe_dump(library_contents, f, indent=4, default_flow_style=False)
return jsonify(result='OK'), 200
@BLUEPRINT.route('/prototype/<prototypename>', methods=['DELETE'], read_write=True)
def delete_local_prototype(prototypename):
try:
library_path, prototype = _local_library_path(prototypename)
except ValueError as e:
return jsonify(error={'message': str(e)}), 400
if not os.path.isfile(library_path):
return jsonify(error={'message': 'missing local prototype library'}), 400
# check if the proto is in use in running or committed config
rcconfig = running_config()
for nodename, nodevalue in rcconfig.get('nodes', {}).iteritems():
if 'prototype' not in nodevalue:
continue
if nodevalue['prototype'] == prototypename:
return jsonify(error={'message': 'prototype in use in running config'}), 400
ccconfig = committed_config()
for nodename, nodevalue in ccconfig.get('nodes', {}).iteritems():
if 'prototype' not in nodevalue:
continue
if nodevalue['prototype'] == prototypename:
return jsonify(error={'message': 'prototype in use in committed config'}), 400
lock = filelock.FileLock('{}.lock'.format(library_path))
with lock.acquire(timeout=10):
with open(library_path, 'r') as f:
library_contents = yaml.safe_load(f)
if not isinstance(library_contents, dict):
return jsonify(error={'message': 'invalid local prototype library'}), 400
library_contents['prototypes'].pop(prototype, None)
with open(library_path, 'w') as f:
yaml.safe_dump(library_contents, f, indent=4, default_flow_style=False)
return jsonify(result='OK'), 200
def reset_prototype_paths():
global PROTOTYPE_PATHS
PROTOTYPE_PATHS = None
|
|
###
# Copyright (c) 2004, Brett Kelly
# Copyright (c) 2010, James Vega
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import re
import time
import operator
import supybot.dbi as dbi
import supybot.log as log
import supybot.conf as conf
import supybot.utils as utils
import supybot.ircdb as ircdb
from supybot.commands import *
import supybot.ircmsgs as ircmsgs
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
from supybot import commands
class NoteRecord(dbi.Record):
__fields__ = [
'frm',
'to',
'at',
'notified',
'read',
'public',
'text',
]
class DbiNoteDB(dbi.DB):
Mapping = 'flat'
Record = NoteRecord
def __init__(self, *args, **kwargs):
dbi.DB.__init__(self, *args, **kwargs)
self.unRead = {}
self.unNotified = {}
for record in self:
self._addCache(record)
def _addCache(self, record):
if not record.read:
self.unRead.setdefault(record.to, []).append(record.id)
if not record.notified:
self.unNotified.setdefault(record.to, []).append(record.id)
def _removeCache(self, record):
if record.notified:
try:
self.unNotified[record.to].remove(record.id)
except (KeyError, ValueError):
pass
if record.read:
try:
self.unRead[record.to].remove(record.id)
except (KeyError, ValueError):
pass
def setRead(self, id):
n = self.get(id)
n.read = True
n.notified = True
self._removeCache(n)
self.set(id, n)
def setNotified(self, id):
n = self.get(id)
n.notified = True
self._removeCache(n)
self.set(id, n)
def getUnnotifiedIds(self, to):
return self.unNotified.get(to, [])
def getUnreadIds(self, to):
return self.unRead.get(to, [])
def send(self, frm, to, public, text):
n = self.Record(frm=frm, to=to, text=text,
at=time.time(), public=public)
id = self.add(n)
self._addCache(n)
return id
def unsend(self, id):
self.remove(id)
for cache in self.unRead, self.unNotified:
for (to, ids) in cache.items():
while id in ids:
ids.remove(id)
NoteDB = plugins.DB('Note', {'flat': DbiNoteDB})
class Note(callbacks.Plugin):
def __init__(self, irc):
self.__parent= super(Note, self)
self.__parent.__init__(irc)
self.db = NoteDB()
def die(self):
self.__parent.die()
self.db.close()
def doPrivmsg(self, irc, msg):
if ircmsgs.isCtcp(msg) and not ircmsgs.isAction(msg):
return
self._notify(irc, msg)
def doJoin(self, irc, msg):
if self.registryValue('notify.onJoin'):
repeatedly = self.registryValue('notify.onJoin.repeatedly')
self._notify(irc, msg, repeatedly)
def _notify(self, irc, msg, repeatedly=False):
irc = callbacks.SimpleProxy(irc, msg)
try:
to = ircdb.users.getUserId(msg.prefix)
except KeyError:
return
ids = self.db.getUnnotifiedIds(to)
if len(ids) <= self.registryValue('notify.autoSend'):
for id in ids:
irc.reply(self._formatNote(self.db.get(id), to), private=True)
self.db.setRead(id)
return
unnotifiedIds = ['#%s' % nid for nid in ids]
unnotified = len(unnotifiedIds)
if unnotified or repeatedly:
unreadIds = ['#%s' % nid for nid in self.db.getUnreadIds(to)]
unread = len(unreadIds)
s = format('You have %n; %i that I haven\'t told you about '
'before now. %L %b still unread.',
(unread, 'unread', 'note'), unnotified,
unreadIds, unread)
# Later we'll have a user value for allowing this to be a NOTICE.
irc.reply(s, private=True)
for nid in unnotifiedIds:
id = int(nid[1:])
self.db.setNotified(id)
def _getUserId(self, irc, name):
if ircdb.users.hasUser(name):
return ircdb.users.getUserId(name)
else:
try:
hostmask = irc.state.nickToHostmask(name)
return ircdb.users.getUserId(hostmask)
except KeyError:
return None
def send(self, irc, msg, args, user, targets, text):
"""<recipient>,[<recipient>,[...]] <text>
Sends a new note to the user specified. Multiple recipients may be
specified by separating their names by commas.
"""
# Let's get the from user.
public = irc.isChannel(msg.args[0])
sent = []
for target in targets:
id = self.db.send(user.id, target.id, public, text)
s = format('note #%i sent to %s', id, target.name)
sent.append(s)
irc.reply(format('%L.', sent).capitalize())
send = wrap(send, ['user', commalist('otherUser'), 'text'])
def reply(self, irc, msg, args, user, id, text):
"""<id> <text>
Sends a note in reply to <id>.
"""
try:
note = self.db.get(id)
except dbi.NoRecordError:
irc.error('That\'s not a note in my database.', Raise=True)
if note.to != user.id:
irc.error('You may only reply to notes '
'that have been sent to you.', Raise=True)
self.db.setRead(id)
text += ' (in reply to #%s)' % id
public = irc.isChannel(msg.args[0])
try:
target = ircdb.users.getUser(note.frm)
except KeyError:
irc.error('The user who sent you that note '
'is no longer in my user database.', Raise=True)
id = self.db.send(user.id, note.frm, public, text)
irc.reply(format('Note #%i sent to %s.', id, target.name))
reply = wrap(reply, ['user', ('id', 'note'), 'text'])
def unsend(self, irc, msg, args, user, id):
"""<id>
Unsends the note with the id given. You must be the
author of the note, and it must be unread.
"""
try:
note = self.db.get(id)
except dbi.NoRecordError:
irc.errorInvalid('note id')
if note.frm == user.id:
if not note.read:
self.db.unsend(id)
irc.replySuccess()
else:
irc.error('That note has been read already.')
else:
irc.error('That note wasn\'t sent by you.')
unsend = wrap(unsend, ['user', ('id', 'note')])
def _formatNote(self, note, to):
elapsed = utils.timeElapsed(time.time() - note.at)
if note.to == to:
author = plugins.getUserName(note.frm)
return format('#%i: %s (Sent by %s %s ago)',
note.id, note.text, author, elapsed)
else:
assert note.frm == to, 'Odd, userid isn\'t frm either.'
recipient = plugins.getUserName(note.to)
return format('#%i: %s (Sent to %s %s ago)',
note.id, note.text, recipient, elapsed)
def note(self, irc, msg, args, user, id):
"""<id>
Retrieves a single note by its unique note id. Use the 'note list'
command to see what unread notes you have.
"""
try:
note = self.db.get(id)
except dbi.NoRecordError:
irc.errorInvalid('note id')
if user.id != note.frm and user.id != note.to:
s = 'You may only retrieve notes you\'ve sent or received.'
irc.error(s)
return
newnote = self._formatNote(note, user.id)
irc.reply(newnote, private=(not note.public))
self.db.setRead(id)
note = wrap(note, ['user', ('id', 'note')])
def _formatNoteId(self, msg, note, sent=False):
if note.public or not ircutils.isChannel(msg.args[0]):
if sent:
sender = plugins.getUserName(note.to)
return format('#%i to %s', note.id, sender)
else:
sender = plugins.getUserName(note.frm)
return format('#%i from %s', note.id, sender)
else:
return format('#%i (private)', note.id)
def search(self, irc, msg, args, user, optlist, glob):
"""[--{regexp} <value>] [--sent] [<glob>]
Searches your received notes for ones matching <glob>. If --regexp is
given, its associated value is taken as a regexp and matched against
the notes. If --sent is specified, only search sent notes.
"""
criteria = []
def to(note):
return note.to == user.id
def frm(note):
return note.frm == user.id
own = to
for (option, arg) in optlist:
if option == 'regexp':
criteria.append(lambda x: commands.regexp_wrapper(x, reobj=arg,
timeout=0.1, plugin_name = self.name(), fcn_name='search'))
elif option == 'sent':
own = frm
if glob:
glob = utils.python.glob2re(glob)
criteria.append(re.compile(glob).search)
def match(note):
for p in criteria:
if not p(note.text):
return False
return True
notes = list(self.db.select(lambda n: match(n) and own(n)))
if not notes:
irc.reply('No matching notes were found.')
else:
utils.sortBy(operator.attrgetter('id'), notes)
ids = [self._formatNoteId(msg, note) for note in notes]
ids = self._condense(ids)
irc.reply(format('%L', ids))
search = wrap(search,
['user', getopts({'regexp': ('regexpMatcher', True),
'sent': ''}),
additional('glob')])
def list(self, irc, msg, args, user, optlist):
"""[--{old,sent}] [--{from,to} <user>]
Retrieves the ids of all your unread notes. If --old is given, list
read notes. If --sent is given, list notes that you have sent. If
--from is specified, only lists notes sent to you from <user>. If
--to is specified, only lists notes sent by you to <user>.
"""
(sender, receiver, old, sent) = (None, None, False, False)
for (option, arg) in optlist:
if option == 'old':
old = True
if option == 'sent':
sent = True
if option == 'from':
sender = arg
if option == 'to':
receiver = arg
sent = True
if old:
return self._oldnotes(irc, msg, sender)
if sent:
return self._sentnotes(irc, msg, receiver)
def p(note):
return not note.read and note.to == user.id
if sender:
originalP = p
def p(note):
return originalP(note) and note.frm == sender.id
notes = list(self.db.select(p))
if not notes:
irc.reply('You have no unread notes.')
else:
utils.sortBy(operator.attrgetter('id'), notes)
ids = [self._formatNoteId(msg, note) for note in notes]
ids = self._condense(ids)
irc.reply(format('%L.', ids))
list = wrap(list, ['user', getopts({'old': '', 'sent': '',
'from': 'otherUser',
'to': 'otherUser'})])
def next(self, irc, msg, args, user):
"""takes no arguments
Retrieves your next unread note, if any.
"""
notes = self.db.getUnreadIds(user.id)
if not notes:
irc.reply('You have no unread notes.')
else:
found = False
for id in notes:
try:
note = self.db.get(id)
except KeyError:
continue
found = True
break
if not found:
irc.reply('You have no unread notes.')
else:
irc.reply(self._formatNote(note, user.id), private=(not note.public))
self.db.setRead(note.id)
next = wrap(next, ['user'])
def _condense(self, notes):
temp = {}
for note in notes:
note = note.split(' ', 1)
if note[1] in temp:
temp[note[1]].append(note[0])
else:
temp[note[1]] = [note[0]]
notes = []
for (k,v) in temp.iteritems():
if '(private)' in k:
k = k.replace('(private)', format('%b private', len(v)))
notes.append(format('%L %s', v, k))
return notes
def _sentnotes(self, irc, msg, receiver):
try:
user = ircdb.users.getUser(msg.prefix)
except KeyError:
irc.errorNotRegistered()
return
def p(note):
return note.frm == user.id
if receiver:
originalP = p
def p(note):
return originalP(note) and note.to == receiver.id
notes = list(self.db.select(p))
if not notes:
irc.error('I couldn\'t find any sent notes for your user.')
else:
utils.sortBy(operator.attrgetter('id'), notes)
notes.reverse() # Most recently sent first.
ids = [self._formatNoteId(msg, note, sent=True) for note in notes]
ids = self._condense(ids)
irc.reply(format('%L.', ids))
def _oldnotes(self, irc, msg, sender):
try:
user = ircdb.users.getUser(msg.prefix)
except KeyError:
irc.errorNotRegistered()
return
def p(note):
return note.to == user.id and note.read
if sender:
originalP = p
def p(note):
return originalP(note) and note.frm == sender.id
notes = list(self.db.select(p))
if not notes:
irc.reply('I couldn\'t find any matching read notes '
'for your user.')
else:
utils.sortBy(operator.attrgetter('id'), notes)
notes.reverse()
ids = [self._formatNoteId(msg, note) for note in notes]
ids = self._condense(ids)
irc.reply(format('%L.', ids))
Class = Note
# vim: shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
|
import os.path as op
import gc
import pytest
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_equal,
assert_array_equal, assert_allclose)
from mne.datasets import testing
from mne import (read_forward_solution, apply_forward, apply_forward_raw,
average_forward_solutions, write_forward_solution,
convert_forward_solution, SourceEstimate, pick_types_forward,
read_evokeds, VectorSourceEstimate)
from mne.io import read_info
from mne.label import read_label
from mne.utils import (requires_mne, run_subprocess,
run_tests_if_main)
from mne.forward import (restrict_forward_to_stc, restrict_forward_to_label,
Forward, is_fixed_orient, compute_orient_prior,
compute_depth_prior)
from mne.channels import equalize_channels
data_path = testing.data_path(download=False)
fname_meeg = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_meeg_grad = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-2-grad-fwd.fif')
fname_evoked = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-ave.fif')
def assert_forward_allclose(f1, f2, rtol=1e-7):
"""Compare two potentially converted forward solutions."""
assert_allclose(f1['sol']['data'], f2['sol']['data'], rtol=rtol)
assert f1['sol']['ncol'] == f2['sol']['ncol']
assert f1['sol']['ncol'] == f1['sol']['data'].shape[1]
assert_allclose(f1['source_nn'], f2['source_nn'], rtol=rtol)
if f1['sol_grad'] is not None:
assert (f2['sol_grad'] is not None)
assert_allclose(f1['sol_grad']['data'], f2['sol_grad']['data'])
assert f1['sol_grad']['ncol'] == f2['sol_grad']['ncol']
assert f1['sol_grad']['ncol'] == f1['sol_grad']['data'].shape[1]
else:
assert (f2['sol_grad'] is None)
assert f1['source_ori'] == f2['source_ori']
assert f1['surf_ori'] == f2['surf_ori']
assert f1['src'][0]['coord_frame'] == f1['src'][0]['coord_frame']
@testing.requires_testing_data
def test_convert_forward():
"""Test converting forward solution between different representations."""
fwd = read_forward_solution(fname_meeg_grad)
fwd_repr = repr(fwd)
assert ('306' in fwd_repr)
assert ('60' in fwd_repr)
assert (fwd_repr)
assert (isinstance(fwd, Forward))
# look at surface orientation
fwd_surf = convert_forward_solution(fwd, surf_ori=True)
# go back
fwd_new = convert_forward_solution(fwd_surf, surf_ori=False)
assert (repr(fwd_new))
assert (isinstance(fwd_new, Forward))
assert_forward_allclose(fwd, fwd_new)
del fwd_new
gc.collect()
# now go to fixed
fwd_fixed = convert_forward_solution(fwd_surf, surf_ori=True,
force_fixed=True, use_cps=False)
del fwd_surf
gc.collect()
assert (repr(fwd_fixed))
assert (isinstance(fwd_fixed, Forward))
assert (is_fixed_orient(fwd_fixed))
# now go back to cartesian (original condition)
fwd_new = convert_forward_solution(fwd_fixed, surf_ori=False,
force_fixed=False)
assert (repr(fwd_new))
assert (isinstance(fwd_new, Forward))
assert_forward_allclose(fwd, fwd_new)
del fwd, fwd_new, fwd_fixed
gc.collect()
@pytest.mark.slowtest
@testing.requires_testing_data
def test_io_forward(tmpdir):
"""Test IO for forward solutions."""
# do extensive tests with MEEG + grad
n_channels, n_src = 366, 108
fwd = read_forward_solution(fname_meeg_grad)
assert (isinstance(fwd, Forward))
fwd = read_forward_solution(fname_meeg_grad)
fwd = convert_forward_solution(fwd, surf_ori=True)
leadfield = fwd['sol']['data']
assert_equal(leadfield.shape, (n_channels, n_src))
assert_equal(len(fwd['sol']['row_names']), n_channels)
fname_temp = tmpdir.join('test-fwd.fif')
with pytest.warns(RuntimeWarning, match='stored on disk'):
write_forward_solution(fname_temp, fwd, overwrite=True)
fwd = read_forward_solution(fname_meeg_grad)
fwd = convert_forward_solution(fwd, surf_ori=True)
fwd_read = read_forward_solution(fname_temp)
fwd_read = convert_forward_solution(fwd_read, surf_ori=True)
leadfield = fwd_read['sol']['data']
assert_equal(leadfield.shape, (n_channels, n_src))
assert_equal(len(fwd_read['sol']['row_names']), n_channels)
assert_equal(len(fwd_read['info']['chs']), n_channels)
assert ('dev_head_t' in fwd_read['info'])
assert ('mri_head_t' in fwd_read)
assert_array_almost_equal(fwd['sol']['data'], fwd_read['sol']['data'])
fwd = read_forward_solution(fname_meeg)
fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True,
use_cps=False)
with pytest.warns(RuntimeWarning, match='stored on disk'):
write_forward_solution(fname_temp, fwd, overwrite=True)
fwd_read = read_forward_solution(fname_temp)
fwd_read = convert_forward_solution(fwd_read, surf_ori=True,
force_fixed=True, use_cps=False)
assert (repr(fwd_read))
assert (isinstance(fwd_read, Forward))
assert (is_fixed_orient(fwd_read))
assert_forward_allclose(fwd, fwd_read)
fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True,
use_cps=True)
leadfield = fwd['sol']['data']
assert_equal(leadfield.shape, (n_channels, 1494 / 3))
assert_equal(len(fwd['sol']['row_names']), n_channels)
assert_equal(len(fwd['info']['chs']), n_channels)
assert ('dev_head_t' in fwd['info'])
assert ('mri_head_t' in fwd)
assert (fwd['surf_ori'])
with pytest.warns(RuntimeWarning, match='stored on disk'):
write_forward_solution(fname_temp, fwd, overwrite=True)
fwd_read = read_forward_solution(fname_temp)
fwd_read = convert_forward_solution(fwd_read, surf_ori=True,
force_fixed=True, use_cps=True)
assert (repr(fwd_read))
assert (isinstance(fwd_read, Forward))
assert (is_fixed_orient(fwd_read))
assert_forward_allclose(fwd, fwd_read)
fwd = read_forward_solution(fname_meeg_grad)
fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True,
use_cps=True)
leadfield = fwd['sol']['data']
assert_equal(leadfield.shape, (n_channels, n_src / 3))
assert_equal(len(fwd['sol']['row_names']), n_channels)
assert_equal(len(fwd['info']['chs']), n_channels)
assert ('dev_head_t' in fwd['info'])
assert ('mri_head_t' in fwd)
assert (fwd['surf_ori'])
with pytest.warns(RuntimeWarning, match='stored on disk'):
write_forward_solution(fname_temp, fwd, overwrite=True)
fwd_read = read_forward_solution(fname_temp)
fwd_read = convert_forward_solution(fwd_read, surf_ori=True,
force_fixed=True, use_cps=True)
assert (repr(fwd_read))
assert (isinstance(fwd_read, Forward))
assert (is_fixed_orient(fwd_read))
assert_forward_allclose(fwd, fwd_read)
# test warnings on bad filenames
fwd = read_forward_solution(fname_meeg_grad)
fwd_badname = tmpdir.join('test-bad-name.fif.gz')
with pytest.warns(RuntimeWarning, match='end with'):
write_forward_solution(fwd_badname, fwd)
with pytest.warns(RuntimeWarning, match='end with'):
read_forward_solution(fwd_badname)
fwd = read_forward_solution(fname_meeg)
write_forward_solution(fname_temp, fwd, overwrite=True)
fwd_read = read_forward_solution(fname_temp)
assert_forward_allclose(fwd, fwd_read)
@testing.requires_testing_data
def test_apply_forward():
"""Test projection of source space data to sensor space."""
start = 0
stop = 5
n_times = stop - start - 1
sfreq = 10.0
t_start = 0.123
fwd = read_forward_solution(fname_meeg)
fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True,
use_cps=True)
fwd = pick_types_forward(fwd, meg=True)
assert isinstance(fwd, Forward)
vertno = [fwd['src'][0]['vertno'], fwd['src'][1]['vertno']]
stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
gain_sum = np.sum(fwd['sol']['data'], axis=1)
# Evoked
evoked = read_evokeds(fname_evoked, condition=0)
evoked.pick_types(meg=True)
with pytest.warns(RuntimeWarning, match='only .* positive values'):
evoked = apply_forward(fwd, stc, evoked.info, start=start, stop=stop)
data = evoked.data
times = evoked.times
# do some tests
assert_array_almost_equal(evoked.info['sfreq'], sfreq)
assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)
assert_array_almost_equal(times[0], t_start)
assert_array_almost_equal(times[-1], t_start + (n_times - 1) / sfreq)
# vector
stc_vec = VectorSourceEstimate(
fwd['source_nn'][:, :, np.newaxis] * stc.data[:, np.newaxis],
stc.vertices, stc.tmin, stc.tstep)
with pytest.warns(RuntimeWarning, match='very large'):
evoked_2 = apply_forward(fwd, stc_vec, evoked.info)
assert np.abs(evoked_2.data).mean() > 1e-5
assert_allclose(evoked.data, evoked_2.data, atol=1e-10)
# Raw
with pytest.warns(RuntimeWarning, match='only .* positive values'):
raw_proj = apply_forward_raw(fwd, stc, evoked.info, start=start,
stop=stop)
data, times = raw_proj[:, :]
# do some tests
assert_array_almost_equal(raw_proj.info['sfreq'], sfreq)
assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)
atol = 1. / sfreq
assert_allclose(raw_proj.first_samp / sfreq, t_start, atol=atol)
assert_allclose(raw_proj.last_samp / sfreq,
t_start + (n_times - 1) / sfreq, atol=atol)
@testing.requires_testing_data
def test_restrict_forward_to_stc(tmpdir):
"""Test restriction of source space to source SourceEstimate."""
start = 0
stop = 5
n_times = stop - start - 1
sfreq = 10.0
t_start = 0.123
fwd = read_forward_solution(fname_meeg)
fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True,
use_cps=True)
fwd = pick_types_forward(fwd, meg=True)
vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]
stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
fwd_out = restrict_forward_to_stc(fwd, stc)
assert (isinstance(fwd_out, Forward))
assert_equal(fwd_out['sol']['ncol'], 20)
assert_equal(fwd_out['src'][0]['nuse'], 15)
assert_equal(fwd_out['src'][1]['nuse'], 5)
assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])
assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])
fwd = read_forward_solution(fname_meeg)
fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=False)
fwd = pick_types_forward(fwd, meg=True)
vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]
stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
fwd_out = restrict_forward_to_stc(fwd, stc)
assert_equal(fwd_out['sol']['ncol'], 60)
assert_equal(fwd_out['src'][0]['nuse'], 15)
assert_equal(fwd_out['src'][1]['nuse'], 5)
assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])
assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])
# Test saving the restricted forward object. This only works if all fields
# are properly accounted for.
fname_copy = tmpdir.join('copy-fwd.fif')
with pytest.warns(RuntimeWarning, match='stored on disk'):
write_forward_solution(fname_copy, fwd_out, overwrite=True)
fwd_out_read = read_forward_solution(fname_copy)
fwd_out_read = convert_forward_solution(fwd_out_read, surf_ori=True,
force_fixed=False)
assert_forward_allclose(fwd_out, fwd_out_read)
@testing.requires_testing_data
def test_restrict_forward_to_label(tmpdir):
"""Test restriction of source space to label."""
fwd = read_forward_solution(fname_meeg)
fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True,
use_cps=True)
fwd = pick_types_forward(fwd, meg=True)
label_path = op.join(data_path, 'MEG', 'sample', 'labels')
labels = ['Aud-lh', 'Vis-rh']
label_lh = read_label(op.join(label_path, labels[0] + '.label'))
label_rh = read_label(op.join(label_path, labels[1] + '.label'))
fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])
src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)
src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
vertno_lh = fwd['src'][0]['vertno'][src_sel_lh]
nuse_lh = fwd['src'][0]['nuse']
src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
src_sel_rh = np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh)
vertno_rh = fwd['src'][1]['vertno'][src_sel_rh]
src_sel_rh += nuse_lh
assert_equal(fwd_out['sol']['ncol'], len(src_sel_lh) + len(src_sel_rh))
assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))
assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))
assert_equal(fwd_out['src'][0]['vertno'], vertno_lh)
assert_equal(fwd_out['src'][1]['vertno'], vertno_rh)
fwd = read_forward_solution(fname_meeg)
fwd = pick_types_forward(fwd, meg=True)
label_path = op.join(data_path, 'MEG', 'sample', 'labels')
labels = ['Aud-lh', 'Vis-rh']
label_lh = read_label(op.join(label_path, labels[0] + '.label'))
label_rh = read_label(op.join(label_path, labels[1] + '.label'))
fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])
src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)
src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
vertno_lh = fwd['src'][0]['vertno'][src_sel_lh]
nuse_lh = fwd['src'][0]['nuse']
src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
src_sel_rh = np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh)
vertno_rh = fwd['src'][1]['vertno'][src_sel_rh]
src_sel_rh += nuse_lh
assert_equal(fwd_out['sol']['ncol'],
3 * (len(src_sel_lh) + len(src_sel_rh)))
assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))
assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))
assert_equal(fwd_out['src'][0]['vertno'], vertno_lh)
assert_equal(fwd_out['src'][1]['vertno'], vertno_rh)
# Test saving the restricted forward object. This only works if all fields
# are properly accounted for.
fname_copy = tmpdir.join('copy-fwd.fif')
write_forward_solution(fname_copy, fwd_out, overwrite=True)
fwd_out_read = read_forward_solution(fname_copy)
assert_forward_allclose(fwd_out, fwd_out_read)
@testing.requires_testing_data
@requires_mne
def test_average_forward_solution(tmpdir):
"""Test averaging forward solutions."""
fwd = read_forward_solution(fname_meeg)
# input not a list
pytest.raises(TypeError, average_forward_solutions, 1)
# list is too short
pytest.raises(ValueError, average_forward_solutions, [])
# negative weights
pytest.raises(ValueError, average_forward_solutions, [fwd, fwd], [-1, 0])
# all zero weights
pytest.raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0])
# weights not same length
pytest.raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0, 0])
# list does not only have all dict()
pytest.raises(TypeError, average_forward_solutions, [1, fwd])
# try an easy case
fwd_copy = average_forward_solutions([fwd])
assert (isinstance(fwd_copy, Forward))
assert_array_equal(fwd['sol']['data'], fwd_copy['sol']['data'])
# modify a fwd solution, save it, use MNE to average with old one
fwd_copy['sol']['data'] *= 0.5
fname_copy = str(tmpdir.join('copy-fwd.fif'))
write_forward_solution(fname_copy, fwd_copy, overwrite=True)
cmd = ('mne_average_forward_solutions', '--fwd', fname_meeg, '--fwd',
fname_copy, '--out', fname_copy)
run_subprocess(cmd)
# now let's actually do it, with one filename and one fwd
fwd_ave = average_forward_solutions([fwd, fwd_copy])
assert_array_equal(0.75 * fwd['sol']['data'], fwd_ave['sol']['data'])
# fwd_ave_mne = read_forward_solution(fname_copy)
# assert_array_equal(fwd_ave_mne['sol']['data'], fwd_ave['sol']['data'])
# with gradient
fwd = read_forward_solution(fname_meeg_grad)
fwd_ave = average_forward_solutions([fwd, fwd])
assert_forward_allclose(fwd, fwd_ave)
@testing.requires_testing_data
def test_priors():
"""Test prior computations."""
# Depth prior
fwd = read_forward_solution(fname_meeg)
assert not is_fixed_orient(fwd)
n_sources = fwd['nsource']
info = read_info(fname_evoked)
depth_prior = compute_depth_prior(fwd, info, exp=0.8)
assert depth_prior.shape == (3 * n_sources,)
depth_prior = compute_depth_prior(fwd, info, exp=0.)
assert_array_equal(depth_prior, 1.)
with pytest.raises(ValueError, match='must be "whiten"'):
compute_depth_prior(fwd, info, limit_depth_chs='foo')
with pytest.raises(ValueError, match='noise_cov must be a Covariance'):
compute_depth_prior(fwd, info, limit_depth_chs='whiten')
fwd_fixed = convert_forward_solution(fwd, force_fixed=True)
depth_prior = compute_depth_prior(fwd_fixed, info=info)
assert depth_prior.shape == (n_sources,)
# Orientation prior
orient_prior = compute_orient_prior(fwd, 1.)
assert_array_equal(orient_prior, 1.)
orient_prior = compute_orient_prior(fwd_fixed, 0.)
assert_array_equal(orient_prior, 1.)
with pytest.raises(ValueError, match='oriented in surface coordinates'):
compute_orient_prior(fwd, 0.5)
fwd_surf_ori = convert_forward_solution(fwd, surf_ori=True)
orient_prior = compute_orient_prior(fwd_surf_ori, 0.5)
assert all(np.in1d(orient_prior, (0.5, 1.)))
with pytest.raises(ValueError, match='between 0 and 1'):
compute_orient_prior(fwd_surf_ori, -0.5)
with pytest.raises(ValueError, match='with fixed orientation'):
compute_orient_prior(fwd_fixed, 0.5)
@testing.requires_testing_data
def test_equalize_channels():
"""Test equalization of channels for instances of Forward."""
fwd1 = read_forward_solution(fname_meeg)
fwd1.pick_channels(['EEG 001', 'EEG 002', 'EEG 003'])
fwd2 = fwd1.copy().pick_channels(['EEG 002', 'EEG 001'], ordered=True)
fwd1, fwd2 = equalize_channels([fwd1, fwd2])
assert fwd1.ch_names == ['EEG 001', 'EEG 002']
assert fwd2.ch_names == ['EEG 001', 'EEG 002']
run_tests_if_main()
|
|
#!/usr/bin/python
#
# aes.py: implements AES - Advanced Encryption Standard
# from the SlowAES project, http://code.google.com/p/slowaes/
#
# Copyright (c) 2008 Josh Davis ( http://www.josh-davis.org ),
# Alex Martelli ( http://www.aleax.it )
#
# Ported from C code written by Laurent Haan ( http://www.progressive-coding.com )
#
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/
#
import os
import sys
import math
class AES(object):
# valid key sizes
keySize = dict(SIZE_128=16, SIZE_192=24, SIZE_256=32)
# valid expanded key sizes
ekeySize = dict(SIZE_128=176, SIZE_192=208, SIZE_256=240)
# Rijndael S-box
sbox = [0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67,
0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59,
0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7,
0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1,
0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05,
0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83,
0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29,
0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa,
0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c,
0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc,
0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec,
0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19,
0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee,
0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49,
0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4,
0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6,
0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70,
0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9,
0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e,
0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1,
0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0,
0x54, 0xbb, 0x16]
# Rijndael Inverted S-box
rsbox = [0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3,
0x9e, 0x81, 0xf3, 0xd7, 0xfb, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f,
0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, 0x54,
0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b,
0x42, 0xfa, 0xc3, 0x4e, 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24,
0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, 0x72, 0xf8,
0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d,
0x65, 0xb6, 0x92, 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda,
0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, 0x90, 0xd8, 0xab,
0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3,
0x45, 0x06, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1,
0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, 0x3a, 0x91, 0x11, 0x41,
0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6,
0x73, 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9,
0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, 0x47, 0xf1, 0x1a, 0x71, 0x1d,
0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0,
0xfe, 0x78, 0xcd, 0x5a, 0xf4, 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07,
0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, 0x60,
0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f,
0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5,
0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, 0x17, 0x2b,
0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55,
0x21, 0x0c, 0x7d]
# Rijndael Rcon
Rcon = [0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36,
0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97,
0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72,
0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66,
0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04,
0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d,
0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3,
0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61,
0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a,
0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40,
0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc,
0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5,
0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a,
0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d,
0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c,
0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35,
0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4,
0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc,
0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08,
0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a,
0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d,
0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2,
0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74,
0xe8, 0xcb]
def getSBoxValue(self,num):
"""Retrieves a given S-Box Value"""
return self.sbox[num]
def getSBoxInvert(self,num):
"""Retrieves a given Inverted S-Box Value"""
return self.rsbox[num]
def rotate(self, word):
""" Rijndael's key schedule rotate operation.
Rotate a word eight bits to the left: eg, rotate(1d2c3a4f) == 2c3a4f1d
Word is an char list of size 4 (32 bits overall).
"""
return word[1:] + word[:1]
def getRconValue(self, num):
"""Retrieves a given Rcon Value"""
return self.Rcon[num]
def core(self, word, iteration):
"""Key schedule core."""
# rotate the 32-bit word 8 bits to the left
word = self.rotate(word)
# apply S-Box substitution on all 4 parts of the 32-bit word
for i in range(4):
word[i] = self.getSBoxValue(word[i])
# XOR the output of the rcon operation with i to the first part
# (leftmost) only
word[0] = word[0] ^ self.getRconValue(iteration)
return word
def expandKey(self, key, size, expandedKeySize):
"""Rijndael's key expansion.
Expands an 128,192,256 key into an 176,208,240 bytes key
expandedKey is a char list of large enough size,
key is the non-expanded key.
"""
# current expanded keySize, in bytes
currentSize = 0
rconIteration = 1
expandedKey = [0] * expandedKeySize
# set the 16, 24, 32 bytes of the expanded key to the input key
for j in range(size):
expandedKey[j] = key[j]
currentSize += size
while currentSize < expandedKeySize:
# assign the previous 4 bytes to the temporary value t
t = expandedKey[currentSize-4:currentSize]
# every 16,24,32 bytes we apply the core schedule to t
# and increment rconIteration afterwards
if currentSize % size == 0:
t = self.core(t, rconIteration)
rconIteration += 1
# For 256-bit keys, we add an extra sbox to the calculation
if size == self.keySize["SIZE_256"] and ((currentSize % size) == 16):
for l in range(4): t[l] = self.getSBoxValue(t[l])
# We XOR t with the four-byte block 16,24,32 bytes before the new
# expanded key. This becomes the next four bytes in the expanded
# key.
for m in range(4):
expandedKey[currentSize] = expandedKey[currentSize - size] ^ \
t[m]
currentSize += 1
return expandedKey
def addRoundKey(self, state, roundKey):
"""Adds (XORs) the round key to the state."""
for i in range(16):
state[i] ^= roundKey[i]
return state
def createRoundKey(self, expandedKey, roundKeyPointer):
"""Create a round key.
Creates a round key from the given expanded key and the
position within the expanded key.
"""
roundKey = [0] * 16
for i in range(4):
for j in range(4):
roundKey[j * 4 + i] = expandedKey[roundKeyPointer + i * 4 + j]
return roundKey
def galois_multiplication(self, a, b):
"""Galois multiplication of 8 bit characters a and b."""
p = 0
for counter in range(8):
if b & 1: p ^= a
hi_bit_set = a & 0x80
a <<= 1
# keep a 8 bit
a &= 0xFF
if hi_bit_set:
a ^= 0x1b
b >>= 1
return p
#
# substitute all the values from the state with the value in the SBox
# using the state value as index for the SBox
#
def subBytes(self, state, isInv):
if isInv: getter = self.getSBoxInvert
else: getter = self.getSBoxValue
for i in range(16): state[i] = getter(state[i])
return state
# iterate over the 4 rows and call shiftRow() with that row
def shiftRows(self, state, isInv):
for i in range(4):
state = self.shiftRow(state, i * 4, i, isInv)
return state
# each iteration shifts the row to the left by 1
def shiftRow(self, state, statePointer, nbr, isInv):
for i in range(nbr):
if isInv:
state[statePointer:statePointer+4] = \
state[statePointer+3:statePointer+4] + \
state[statePointer:statePointer+3]
else:
state[statePointer:statePointer+4] = \
state[statePointer+1:statePointer+4] + \
state[statePointer:statePointer+1]
return state
# galois multiplication of the 4x4 matrix
def mixColumns(self, state, isInv):
# iterate over the 4 columns
for i in range(4):
# construct one column by slicing over the 4 rows
column = state[i:i+16:4]
# apply the mixColumn on one column
column = self.mixColumn(column, isInv)
# put the values back into the state
state[i:i+16:4] = column
return state
# galois multiplication of 1 column of the 4x4 matrix
def mixColumn(self, column, isInv):
if isInv: mult = [14, 9, 13, 11]
else: mult = [2, 1, 1, 3]
cpy = list(column)
g = self.galois_multiplication
column[0] = g(cpy[0], mult[0]) ^ g(cpy[3], mult[1]) ^ \
g(cpy[2], mult[2]) ^ g(cpy[1], mult[3])
column[1] = g(cpy[1], mult[0]) ^ g(cpy[0], mult[1]) ^ \
g(cpy[3], mult[2]) ^ g(cpy[2], mult[3])
column[2] = g(cpy[2], mult[0]) ^ g(cpy[1], mult[1]) ^ \
g(cpy[0], mult[2]) ^ g(cpy[3], mult[3])
column[3] = g(cpy[3], mult[0]) ^ g(cpy[2], mult[1]) ^ \
g(cpy[1], mult[2]) ^ g(cpy[0], mult[3])
return column
# applies the 4 operations of the forward round in sequence
def aes_round(self, state, roundKey):
state = self.subBytes(state, False)
state = self.shiftRows(state, False)
state = self.mixColumns(state, False)
state = self.addRoundKey(state, roundKey)
return state
# applies the 4 operations of the inverse round in sequence
def aes_invRound(self, state, roundKey):
state = self.shiftRows(state, True)
state = self.subBytes(state, True)
state = self.addRoundKey(state, roundKey)
state = self.mixColumns(state, True)
return state
# Perform the initial operations, the standard round, and the final
# operations of the forward aes, creating a round key for each round
def aes_main(self, state, expandedKey, nbrRounds):
state = self.addRoundKey(state, self.createRoundKey(expandedKey, 0))
i = 1
while i < nbrRounds:
state = self.aes_round(state,
self.createRoundKey(expandedKey, 16*i))
i += 1
state = self.subBytes(state, False)
state = self.shiftRows(state, False)
state = self.addRoundKey(state,
self.createRoundKey(expandedKey, 16*nbrRounds))
return state
# Perform the initial operations, the standard round, and the final
# operations of the inverse aes, creating a round key for each round
def aes_invMain(self, state, expandedKey, nbrRounds):
state = self.addRoundKey(state,
self.createRoundKey(expandedKey, 16*nbrRounds))
i = nbrRounds - 1
while i > 0:
state = self.aes_invRound(state,
self.createRoundKey(expandedKey, 16*i))
i -= 1
state = self.shiftRows(state, True)
state = self.subBytes(state, True)
state = self.addRoundKey(state, self.createRoundKey(expandedKey, 0))
return state
# encrypts a 128 bit input block against the given key of size specified
def encrypt(self, iput, expandedKey, nbrRounds):
output = [0] * 16
# the 128 bit block to encode
block = [0] * 16
for i in range(4):
# iterate over the rows
for j in range(4):
block[(i+(j*4))] = iput[(i*4)+j]
# encrypt the block using the expandedKey
block = self.aes_main(block, expandedKey, nbrRounds)
# unmap the block again into the output
for k in range(4):
# iterate over the rows
for l in range(4):
output[(k*4)+l] = block[(k+(l*4))]
return output
# decrypts a 128 bit input block against the given key of size specified
def decrypt(self, iput, expandedKey, nbrRounds):
output = [0] * 16
# the 128 bit block to decode
block = [0] * 16
# iterate over the columns
for i in range(4):
# iterate over the rows
for j in range(4):
block[(i+(j*4))] = iput[(i*4)+j]
# decrypt the block using the expandedKey
block = self.aes_invMain(block, expandedKey, nbrRounds)
# unmap the block again into the output
for k in range(4):
# iterate over the rows
for l in range(4):
output[(k*4)+l] = block[(k+(l*4))]
return output
|
|
"""Test the UniFi Protect switch platform."""
# pylint: disable=protected-access
from __future__ import annotations
from unittest.mock import AsyncMock, Mock
import pytest
from pyunifiprotect.data import Camera, Light
from pyunifiprotect.data.types import RecordingMode, VideoMode
from homeassistant.components.unifiprotect.const import DEFAULT_ATTRIBUTION
from homeassistant.components.unifiprotect.switch import (
ALL_DEVICES_SWITCHES,
CAMERA_SWITCHES,
LIGHT_SWITCHES,
ProtectSwitchEntityDescription,
)
from homeassistant.const import ATTR_ATTRIBUTION, ATTR_ENTITY_ID, STATE_OFF, Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from .conftest import (
MockEntityFixture,
assert_entity_counts,
enable_entity,
ids_from_device_description,
)
@pytest.fixture(name="light")
async def light_fixture(
hass: HomeAssistant, mock_entry: MockEntityFixture, mock_light: Light
):
"""Fixture for a single light for testing the switch platform."""
# disable pydantic validation so mocking can happen
Light.__config__.validate_assignment = False
light_obj = mock_light.copy(deep=True)
light_obj._api = mock_entry.api
light_obj.name = "Test Light"
light_obj.is_ssh_enabled = False
light_obj.light_device_settings.is_indicator_enabled = False
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.lights = {
light_obj.id: light_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SWITCH, 2, 1)
yield light_obj
Light.__config__.validate_assignment = True
@pytest.fixture(name="camera")
async def camera_fixture(
hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: Camera
):
"""Fixture for a single camera for testing the switch platform."""
# disable pydantic validation so mocking can happen
Camera.__config__.validate_assignment = False
camera_obj = mock_camera.copy(deep=True)
camera_obj._api = mock_entry.api
camera_obj.channels[0]._api = mock_entry.api
camera_obj.channels[1]._api = mock_entry.api
camera_obj.channels[2]._api = mock_entry.api
camera_obj.name = "Test Camera"
camera_obj.recording_settings.mode = RecordingMode.DETECTIONS
camera_obj.feature_flags.has_led_status = True
camera_obj.feature_flags.has_hdr = True
camera_obj.feature_flags.video_modes = [VideoMode.DEFAULT, VideoMode.HIGH_FPS]
camera_obj.feature_flags.has_privacy_mask = True
camera_obj.feature_flags.has_speaker = True
camera_obj.feature_flags.has_smart_detect = True
camera_obj.is_ssh_enabled = False
camera_obj.led_settings.is_enabled = False
camera_obj.hdr_mode = False
camera_obj.video_mode = VideoMode.DEFAULT
camera_obj.remove_privacy_zone()
camera_obj.speaker_settings.are_system_sounds_enabled = False
camera_obj.osd_settings.is_name_enabled = False
camera_obj.osd_settings.is_date_enabled = False
camera_obj.osd_settings.is_logo_enabled = False
camera_obj.osd_settings.is_debug_enabled = False
camera_obj.smart_detect_settings.object_types = []
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.cameras = {
camera_obj.id: camera_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SWITCH, 12, 11)
yield camera_obj
Camera.__config__.validate_assignment = True
@pytest.fixture(name="camera_none")
async def camera_none_fixture(
hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: Camera
):
"""Fixture for a single camera for testing the switch platform."""
# disable pydantic validation so mocking can happen
Camera.__config__.validate_assignment = False
camera_obj = mock_camera.copy(deep=True)
camera_obj._api = mock_entry.api
camera_obj.channels[0]._api = mock_entry.api
camera_obj.channels[1]._api = mock_entry.api
camera_obj.channels[2]._api = mock_entry.api
camera_obj.name = "Test Camera"
camera_obj.recording_settings.mode = RecordingMode.DETECTIONS
camera_obj.feature_flags.has_led_status = False
camera_obj.feature_flags.has_hdr = False
camera_obj.feature_flags.video_modes = [VideoMode.DEFAULT]
camera_obj.feature_flags.has_privacy_mask = False
camera_obj.feature_flags.has_speaker = False
camera_obj.feature_flags.has_smart_detect = False
camera_obj.is_ssh_enabled = False
camera_obj.osd_settings.is_name_enabled = False
camera_obj.osd_settings.is_date_enabled = False
camera_obj.osd_settings.is_logo_enabled = False
camera_obj.osd_settings.is_debug_enabled = False
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.cameras = {
camera_obj.id: camera_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SWITCH, 5, 4)
yield camera_obj
Camera.__config__.validate_assignment = True
@pytest.fixture(name="camera_privacy")
async def camera_privacy_fixture(
hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: Camera
):
"""Fixture for a single camera for testing the switch platform."""
# disable pydantic validation so mocking can happen
Camera.__config__.validate_assignment = False
camera_obj = mock_camera.copy(deep=True)
camera_obj._api = mock_entry.api
camera_obj.channels[0]._api = mock_entry.api
camera_obj.channels[1]._api = mock_entry.api
camera_obj.channels[2]._api = mock_entry.api
camera_obj.name = "Test Camera"
camera_obj.recording_settings.mode = RecordingMode.NEVER
camera_obj.feature_flags.has_led_status = False
camera_obj.feature_flags.has_hdr = False
camera_obj.feature_flags.video_modes = [VideoMode.DEFAULT]
camera_obj.feature_flags.has_privacy_mask = True
camera_obj.feature_flags.has_speaker = False
camera_obj.feature_flags.has_smart_detect = False
camera_obj.add_privacy_zone()
camera_obj.is_ssh_enabled = False
camera_obj.osd_settings.is_name_enabled = False
camera_obj.osd_settings.is_date_enabled = False
camera_obj.osd_settings.is_logo_enabled = False
camera_obj.osd_settings.is_debug_enabled = False
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.cameras = {
camera_obj.id: camera_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SWITCH, 6, 5)
yield camera_obj
Camera.__config__.validate_assignment = True
async def test_switch_setup_light(
hass: HomeAssistant,
mock_entry: MockEntityFixture,
light: Light,
):
"""Test switch entity setup for light devices."""
entity_registry = er.async_get(hass)
description = LIGHT_SWITCHES[0]
unique_id, entity_id = ids_from_device_description(
Platform.SWITCH, light, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
description = ALL_DEVICES_SWITCHES[0]
unique_id = f"{light.id}_{description.key}"
entity_id = f"switch.test_light_{description.name.lower().replace(' ', '_')}"
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_switch_setup_camera_all(
hass: HomeAssistant,
mock_entry: MockEntityFixture,
camera: Camera,
):
"""Test switch entity setup for camera devices (all enabled feature flags)."""
entity_registry = er.async_get(hass)
for description in CAMERA_SWITCHES:
unique_id, entity_id = ids_from_device_description(
Platform.SWITCH, camera, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
description = ALL_DEVICES_SWITCHES[0]
description_entity_name = (
description.name.lower().replace(":", "").replace(" ", "_")
)
unique_id = f"{camera.id}_{description.key}"
entity_id = f"switch.test_camera_{description_entity_name}"
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_switch_setup_camera_none(
hass: HomeAssistant,
mock_entry: MockEntityFixture,
camera_none: Camera,
):
"""Test switch entity setup for camera devices (no enabled feature flags)."""
entity_registry = er.async_get(hass)
for description in CAMERA_SWITCHES:
if description.ufp_required_field is not None:
continue
unique_id, entity_id = ids_from_device_description(
Platform.SWITCH, camera_none, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
description = ALL_DEVICES_SWITCHES[0]
description_entity_name = (
description.name.lower().replace(":", "").replace(" ", "_")
)
unique_id = f"{camera_none.id}_{description.key}"
entity_id = f"switch.test_camera_{description_entity_name}"
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_switch_light_status(hass: HomeAssistant, light: Light):
"""Tests status light switch for lights."""
description = LIGHT_SWITCHES[0]
light.__fields__["set_status_light"] = Mock()
light.set_status_light = AsyncMock()
_, entity_id = ids_from_device_description(Platform.SWITCH, light, description)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
light.set_status_light.assert_called_once_with(True)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
light.set_status_light.assert_called_with(False)
async def test_switch_camera_ssh(
hass: HomeAssistant, camera: Camera, mock_entry: MockEntityFixture
):
"""Tests SSH switch for cameras."""
description = ALL_DEVICES_SWITCHES[0]
camera.__fields__["set_ssh"] = Mock()
camera.set_ssh = AsyncMock()
_, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_ssh.assert_called_once_with(True)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_ssh.assert_called_with(False)
@pytest.mark.parametrize("description", CAMERA_SWITCHES)
async def test_switch_camera_simple(
hass: HomeAssistant, camera: Camera, description: ProtectSwitchEntityDescription
):
"""Tests all simple switches for cameras."""
if description.name in ("High FPS", "Privacy Mode"):
return
assert description.ufp_set_function is not None
camera.__fields__[description.ufp_set_function] = Mock()
setattr(camera, description.ufp_set_function, AsyncMock())
set_method = getattr(camera, description.ufp_set_function)
_, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
set_method.assert_called_once_with(True)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
set_method.assert_called_with(False)
async def test_switch_camera_highfps(hass: HomeAssistant, camera: Camera):
"""Tests High FPS switch for cameras."""
description = CAMERA_SWITCHES[2]
camera.__fields__["set_video_mode"] = Mock()
camera.set_video_mode = AsyncMock()
_, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_video_mode.assert_called_once_with(VideoMode.HIGH_FPS)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_video_mode.assert_called_with(VideoMode.DEFAULT)
async def test_switch_camera_privacy(hass: HomeAssistant, camera: Camera):
"""Tests Privacy Mode switch for cameras."""
description = CAMERA_SWITCHES[3]
camera.__fields__["set_privacy"] = Mock()
camera.set_privacy = AsyncMock()
_, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_privacy.assert_called_once_with(True, 0, RecordingMode.NEVER)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_privacy.assert_called_with(
False, camera.mic_volume, camera.recording_settings.mode
)
async def test_switch_camera_privacy_already_on(
hass: HomeAssistant, camera_privacy: Camera
):
"""Tests Privacy Mode switch for cameras with privacy mode defaulted on."""
description = CAMERA_SWITCHES[3]
camera_privacy.__fields__["set_privacy"] = Mock()
camera_privacy.set_privacy = AsyncMock()
_, entity_id = ids_from_device_description(
Platform.SWITCH, camera_privacy, description
)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera_privacy.set_privacy.assert_called_once_with(False, 100, RecordingMode.ALWAYS)
|
|
# Copyright 2013 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest.test import attr
class RolesNegativeTestJSON(base.BaseIdentityAdminTest):
_interface = 'json'
def _get_role_params(self):
self.data.setup_test_user()
self.data.setup_test_role()
user = self.get_user_by_name(self.data.test_user)
tenant = self.get_tenant_by_name(self.data.test_tenant)
role = self.get_role_by_name(self.data.test_role)
return (user, tenant, role)
@attr(type=['negative', 'gate'])
def test_list_roles_by_unauthorized_user(self):
# Non-administrator user should not be able to list roles
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.list_roles)
@attr(type=['negative', 'gate'])
def test_list_roles_request_without_token(self):
# Request to list roles without a valid token should fail
token = self.client.auth_provider.get_token()
self.client.delete_token(token)
self.assertRaises(exceptions.Unauthorized, self.client.list_roles)
self.client.auth_provider.clear_auth()
@attr(type=['negative', 'gate'])
def test_role_create_blank_name(self):
# Should not be able to create a role with a blank name
self.assertRaises(exceptions.BadRequest, self.client.create_role, '')
@attr(type=['negative', 'gate'])
def test_create_role_by_unauthorized_user(self):
# Non-administrator user should not be able to create role
role_name = data_utils.rand_name(name='role-')
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.create_role, role_name)
@attr(type=['negative', 'gate'])
def test_create_role_request_without_token(self):
# Request to create role without a valid token should fail
token = self.client.auth_provider.get_token()
self.client.delete_token(token)
role_name = data_utils.rand_name(name='role-')
self.assertRaises(exceptions.Unauthorized,
self.client.create_role, role_name)
self.client.auth_provider.clear_auth()
@attr(type=['negative', 'gate'])
def test_role_create_duplicate(self):
# Role names should be unique
role_name = data_utils.rand_name(name='role-dup-')
resp, body = self.client.create_role(role_name)
role1_id = body.get('id')
self.assertIn('status', resp)
self.assertTrue(resp['status'].startswith('2'))
self.addCleanup(self.client.delete_role, role1_id)
self.assertRaises(exceptions.Conflict, self.client.create_role,
role_name)
@attr(type=['negative', 'gate'])
def test_delete_role_by_unauthorized_user(self):
# Non-administrator user should not be able to delete role
role_name = data_utils.rand_name(name='role-')
resp, body = self.client.create_role(role_name)
self.assertEqual(200, resp.status)
self.data.roles.append(body)
role_id = body.get('id')
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.delete_role, role_id)
@attr(type=['negative', 'gate'])
def test_delete_role_request_without_token(self):
# Request to delete role without a valid token should fail
role_name = data_utils.rand_name(name='role-')
resp, body = self.client.create_role(role_name)
self.assertEqual(200, resp.status)
self.data.roles.append(body)
role_id = body.get('id')
token = self.client.auth_provider.get_token()
self.client.delete_token(token)
self.assertRaises(exceptions.Unauthorized,
self.client.delete_role,
role_id)
self.client.auth_provider.clear_auth()
@attr(type=['negative', 'gate'])
def test_delete_role_non_existent(self):
# Attempt to delete a non existent role should fail
non_existent_role = str(uuid.uuid4().hex)
self.assertRaises(exceptions.NotFound, self.client.delete_role,
non_existent_role)
@attr(type=['negative', 'gate'])
def test_assign_user_role_by_unauthorized_user(self):
# Non-administrator user should not be authorized to
# assign a role to user
(user, tenant, role) = self._get_role_params()
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.assign_user_role,
tenant['id'], user['id'], role['id'])
@attr(type=['negative', 'gate'])
def test_assign_user_role_request_without_token(self):
# Request to assign a role to a user without a valid token
(user, tenant, role) = self._get_role_params()
token = self.client.auth_provider.get_token()
self.client.delete_token(token)
self.assertRaises(exceptions.Unauthorized,
self.client.assign_user_role, tenant['id'],
user['id'], role['id'])
self.client.auth_provider.clear_auth()
@attr(type=['negative', 'gate'])
def test_assign_user_role_for_non_existent_role(self):
# Attempt to assign a non existent role to user should fail
(user, tenant, role) = self._get_role_params()
non_existent_role = str(uuid.uuid4().hex)
self.assertRaises(exceptions.NotFound, self.client.assign_user_role,
tenant['id'], user['id'], non_existent_role)
@attr(type=['negative', 'gate'])
def test_assign_user_role_for_non_existent_tenant(self):
# Attempt to assign a role on a non existent tenant should fail
(user, tenant, role) = self._get_role_params()
non_existent_tenant = str(uuid.uuid4().hex)
self.assertRaises(exceptions.NotFound, self.client.assign_user_role,
non_existent_tenant, user['id'], role['id'])
@attr(type=['negative', 'gate'])
def test_assign_duplicate_user_role(self):
# Duplicate user role should not get assigned
(user, tenant, role) = self._get_role_params()
self.client.assign_user_role(tenant['id'], user['id'], role['id'])
self.assertRaises(exceptions.Conflict, self.client.assign_user_role,
tenant['id'], user['id'], role['id'])
@attr(type=['negative', 'gate'])
def test_remove_user_role_by_unauthorized_user(self):
# Non-administrator user should not be authorized to
# remove a user's role
(user, tenant, role) = self._get_role_params()
resp, user_role = self.client.assign_user_role(tenant['id'],
user['id'],
role['id'])
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.remove_user_role,
tenant['id'], user['id'], role['id'])
@attr(type=['negative', 'gate'])
def test_remove_user_role_request_without_token(self):
# Request to remove a user's role without a valid token
(user, tenant, role) = self._get_role_params()
resp, user_role = self.client.assign_user_role(tenant['id'],
user['id'],
role['id'])
token = self.client.auth_provider.get_token()
self.client.delete_token(token)
self.assertRaises(exceptions.Unauthorized,
self.client.remove_user_role, tenant['id'],
user['id'], role['id'])
self.client.auth_provider.clear_auth()
@attr(type=['negative', 'gate'])
def test_remove_user_role_non_existent_role(self):
# Attempt to delete a non existent role from a user should fail
(user, tenant, role) = self._get_role_params()
resp, user_role = self.client.assign_user_role(tenant['id'],
user['id'],
role['id'])
non_existent_role = str(uuid.uuid4().hex)
self.assertRaises(exceptions.NotFound, self.client.remove_user_role,
tenant['id'], user['id'], non_existent_role)
@attr(type=['negative', 'gate'])
def test_remove_user_role_non_existent_tenant(self):
# Attempt to remove a role from a non existent tenant should fail
(user, tenant, role) = self._get_role_params()
resp, user_role = self.client.assign_user_role(tenant['id'],
user['id'],
role['id'])
non_existent_tenant = str(uuid.uuid4().hex)
self.assertRaises(exceptions.NotFound, self.client.remove_user_role,
non_existent_tenant, user['id'], role['id'])
@attr(type=['negative', 'gate'])
def test_list_user_roles_by_unauthorized_user(self):
# Non-administrator user should not be authorized to list
# a user's roles
(user, tenant, role) = self._get_role_params()
self.client.assign_user_role(tenant['id'], user['id'], role['id'])
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.list_user_roles, tenant['id'],
user['id'])
@attr(type=['negative', 'gate'])
def test_list_user_roles_request_without_token(self):
# Request to list user's roles without a valid token should fail
(user, tenant, role) = self._get_role_params()
token = self.client.auth_provider.get_token()
self.client.delete_token(token)
try:
self.assertRaises(exceptions.Unauthorized,
self.client.list_user_roles, tenant['id'],
user['id'])
finally:
self.client.auth_provider.clear_auth()
class RolesTestXML(RolesNegativeTestJSON):
_interface = 'xml'
|
|
#!/usr/bin/env python
import unittest
import lief
import tempfile
import sys
import subprocess
import stat
import os
import logging
import random
import itertools
from subprocess import Popen
from unittest import TestCase
from utils import get_sample
class TestMachO(TestCase):
def setUp(self):
self.logger = logging.getLogger(__name__)
def test_function_starts(self):
dd = lief.parse(get_sample('MachO/MachO64_x86-64_binary_dd.bin'))
functions = [
0x100001581, 0x1000016cc, 0x1000017cc,
0x1000019e3, 0x100001a03, 0x100001a1d,
0x1000020ad, 0x1000022f6, 0x1000023ef,
0x10000246b, 0x10000248c, 0x1000026da,
0x100002754, 0x10000286b, 0x100002914,
0x100002bd8, 0x100002be8, 0x100002c2b,
0x100002c62, 0x100002d24, 0x100002d5a,
0x100002d91, 0x100002dd5, 0x100002de6,
0x100002dfc, 0x100002e40, 0x100002e51,
0x100002e67, 0x100002f9e
]
self.assertEqual(dd.function_starts.data_offset, 21168)
self.assertEqual(dd.function_starts.data_size, 48)
text_segment = list(filter(lambda e : e.name == "__TEXT", dd.segments))[0]
functions_dd = map(text_segment.virtual_address .__add__, dd.function_starts.functions)
self.assertEqual(functions, list(functions_dd))
def test_version_min(self):
sshd = lief.parse(get_sample('MachO/MachO64_x86-64_binary_sshd.bin'))
self.assertEqual(sshd.version_min.version, [10, 11, 0])
self.assertEqual(sshd.version_min.sdk, [10, 11, 0])
def test_va2offset(self):
dd = lief.parse(get_sample('MachO/MachO64_x86-64_binary_dd.bin'))
self.assertEqual(dd.virtual_address_to_offset(0x100004054), 0x4054)
def test_thread_cmd(self):
micromacho = lief.parse(get_sample('MachO/MachO32_x86_binary_micromacho.bin'))
self.assertTrue(micromacho.has_thread_command)
self.assertEqual(micromacho.thread_command.pc, 0x68)
self.assertEqual(micromacho.thread_command.flavor, 1)
self.assertEqual(micromacho.thread_command.count, 16)
self.assertEqual(micromacho.entrypoint, 0x68)
def test_rpath_cmd(self):
rpathmacho = lief.parse(get_sample('MachO/MachO64_x86-64_binary_rpathtest.bin'))
self.assertEqual(rpathmacho.rpath.path, "@executable_path/../lib")
def test_relocations(self):
helloworld = lief.parse(get_sample('MachO/MachO64_x86-64_object_HelloWorld64.o'))
# __text Section
text_section = helloworld.get_section("__text")
relocations = text_section.relocations
self.assertEqual(len(relocations), 2)
# 1
self.assertEqual(relocations[0].address, 0x233)
self.assertEqual(relocations[0].type, 2)
self.assertEqual(relocations[0].size, 32)
self.assertEqual(relocations[0].is_scattered, False)
self.assertEqual(relocations[0].has_symbol, True)
self.assertEqual(relocations[0].symbol.name, "_printf")
self.assertEqual(relocations[0].has_section, True)
self.assertEqual(relocations[0].section.name, text_section.name)
# 0
self.assertEqual(relocations[1].address, 0x21b)
self.assertEqual(relocations[1].type, 1)
self.assertEqual(relocations[1].size, 32)
self.assertEqual(relocations[1].is_scattered, False)
self.assertEqual(relocations[1].has_symbol, False)
self.assertEqual(relocations[1].has_section, True)
self.assertEqual(relocations[1].section.name, text_section.name)
# __compact_unwind__LD Section
cunwind_section = helloworld.get_section("__compact_unwind")
relocations = cunwind_section.relocations
self.assertEqual(len(relocations), 1)
# 0
self.assertEqual(relocations[0].address, 0x247)
self.assertEqual(relocations[0].type, 0)
self.assertEqual(relocations[0].size, 32)
self.assertEqual(relocations[0].is_scattered, False)
self.assertEqual(relocations[0].has_symbol, False)
self.assertEqual(relocations[0].has_section, True)
self.assertEqual(relocations[0].section.name, "__cstring")
def test_data_in_code(self):
binary = lief.parse(get_sample('MachO/MachO32_ARM_binary_data-in-code-LLVM.bin'))
self.assertTrue(binary.has_data_in_code)
dcode = binary.data_in_code
self.assertEqual(dcode.data_offset, 0x11c)
self.assertEqual(dcode.data_size, 0x20)
self.assertEqual(len(dcode.entries), 4)
self.assertEqual(dcode.entries[0].type, lief.MachO.DataCodeEntry.TYPES.DATA)
self.assertEqual(dcode.entries[0].offset, 0)
self.assertEqual(dcode.entries[0].length, 4)
self.assertEqual(dcode.entries[1].type, lief.MachO.DataCodeEntry.TYPES.JUMP_TABLE_32)
self.assertEqual(dcode.entries[1].offset, 4)
self.assertEqual(dcode.entries[1].length, 4)
self.assertEqual(dcode.entries[2].type, lief.MachO.DataCodeEntry.TYPES.JUMP_TABLE_16)
self.assertEqual(dcode.entries[2].offset, 8)
self.assertEqual(dcode.entries[2].length, 2)
self.assertEqual(dcode.entries[3].type, lief.MachO.DataCodeEntry.TYPES.JUMP_TABLE_8)
self.assertEqual(dcode.entries[3].offset, 10)
self.assertEqual(dcode.entries[3].length, 1)
def test_segment_split_info(self):
binary = lief.parse(get_sample('MachO/FAT_MachO_x86_x86-64_library_libdyld.dylib'))
self.assertTrue(binary.has_segment_split_info)
ssi = binary.segment_split_info
self.assertEqual(ssi.data_offset, 32852)
self.assertEqual(ssi.data_size, 292)
def test_dyld_environment(self):
binary = lief.parse(get_sample('MachO/MachO64_x86-64_binary_safaridriver.bin'))
self.assertTrue(binary.has_dyld_environment)
self.assertEqual(binary.dyld_environment.value, "DYLD_VERSIONED_FRAMEWORK_PATH=/System/Library/StagedFrameworks/Safari")
def test_sub_framework(self):
binary = lief.parse(get_sample('MachO/FAT_MachO_x86_x86-64_library_libdyld.dylib'))
self.assertTrue(binary.has_sub_framework)
self.assertEqual(binary.sub_framework.umbrella, "System")
def test_unwind(self):
binary = lief.parse(get_sample('MachO/MachO64_x86-64_binary_sshd.bin'))
functions = sorted(binary.functions, key=lambda f: f.address)
self.assertEqual(len(functions), 2619)
self.assertEqual(functions[0].address, 2624)
self.assertEqual(functions[0].size, 0)
self.assertEqual(functions[0].name, "")
self.assertEqual(functions[-1].address, 0x1000a4f65)
self.assertEqual(functions[-1].size, 0)
self.assertEqual(functions[-1].name, "ctor_0")
def test_build_version(self):
binary = lief.MachO.parse(get_sample('MachO/FAT_MachO_arm-arm64-binary-helloworld.bin'))
target = binary[1]
self.assertTrue(target.has_build_version)
build_version = target.build_version
self.assertEqual(build_version.minos, [12, 1, 0])
self.assertEqual(build_version.sdk, [12, 1, 0])
self.assertEqual(build_version.platform, lief.MachO.BuildVersion.PLATFORMS.IOS)
tools = build_version.tools
self.assertEqual(len(tools), 1)
self.assertEqual(tools[0].version, [409, 12, 0])
self.assertEqual(tools[0].tool, lief.MachO.BuildToolVersion.TOOLS.LD)
def test_segment_index(self):
binary = lief.parse(get_sample('MachO/MachO64_x86-64_binary_safaridriver.bin'))
self.assertEqual(binary.get_segment("__LINKEDIT").index, len(binary.segments) - 1)
original_data_index = binary.get_segment("__DATA").index
# Add a new segment (it should be placed right beore __LINKEDIT)
segment = lief.MachO.SegmentCommand("__LIEF", [0x60] * 0x100)
segment = binary.add(segment)
self.assertEqual(segment.index, binary.get_segment("__LINKEDIT").index - 1)
self.assertEqual(segment.index, original_data_index + 1)
# discard changes
binary = lief.parse(get_sample('MachO/MachO64_x86-64_binary_safaridriver.bin'))
text_segment = binary.get_segment("__TEXT")
original_data_index = binary.get_segment("__DATA").index
binary.remove(text_segment)
self.assertEqual(binary.get_segment("__DATA").index, original_data_index - 1)
self.assertEqual(binary.get_segment("__LINKEDIT").index, original_data_index)
self.assertEqual(binary.get_segment("__PAGEZERO").index, 0)
def test_offset_to_va(self):
# |Name |Virtual Address|Virtual Size|Offset|Size
# +------------+---------------+------------+------+----
# |__PAGEZERO |0x0 |0x100000000 |0x0 |0x0
# |__TEXT |0x100000000 |0x4000 |0x0 |0x4000
# |__DATA_CONST|0x100004000 |0x4000 |0x4000|0x4000
# |__DATA |0x100008000 |0x8000 |0x8000|0x4000
# |__LINKEDIT |0x100010000 |0x4000 |0xc000|0x130
sample = get_sample("MachO/MachO64_x86-64_binary_large-bss.bin")
large_bss = lief.parse(sample)
self.assertEqual(large_bss.segment_from_offset(0).name, "__TEXT")
self.assertEqual(large_bss.segment_from_offset(0x4001).name, "__DATA_CONST")
self.assertEqual(large_bss.segment_from_offset(0xc000).name, "__LINKEDIT")
self.assertEqual(large_bss.segment_from_offset(0xc001).name, "__LINKEDIT")
if __name__ == '__main__':
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
root_logger.addHandler(ch)
unittest.main(verbosity=2)
|
|
"""Unittests for heapq."""
from heapq import heappush, heappop, heapify, heapreplace, nlargest, nsmallest
import random
import unittest
from test import test_support
import sys
def heapiter(heap):
# An iterator returning a heap's elements, smallest-first.
try:
while 1:
yield heappop(heap)
except IndexError:
pass
class TestHeap(unittest.TestCase):
def test_push_pop(self):
# 1) Push 256 random numbers and pop them off, verifying all's OK.
heap = []
data = []
self.check_invariant(heap)
for i in range(256):
item = random.random()
data.append(item)
heappush(heap, item)
self.check_invariant(heap)
results = []
while heap:
item = heappop(heap)
self.check_invariant(heap)
results.append(item)
data_sorted = data[:]
data_sorted.sort()
self.assertEqual(data_sorted, results)
# 2) Check that the invariant holds for a sorted array
self.check_invariant(results)
self.assertRaises(TypeError, heappush, [])
try:
self.assertRaises(TypeError, heappush, None, None)
self.assertRaises(TypeError, heappop, None)
except AttributeError:
pass
def check_invariant(self, heap):
# Check the heap invariant.
for pos, item in enumerate(heap):
if pos: # pos 0 has no parent
parentpos = (pos-1) >> 1
self.assert_(heap[parentpos] <= item)
def test_heapify(self):
for size in range(30):
heap = [random.random() for dummy in range(size)]
heapify(heap)
self.check_invariant(heap)
self.assertRaises(TypeError, heapify, None)
def test_naive_nbest(self):
data = [random.randrange(2000) for i in range(1000)]
heap = []
for item in data:
heappush(heap, item)
if len(heap) > 10:
heappop(heap)
heap.sort()
self.assertEqual(heap, sorted(data)[-10:])
def test_nbest(self):
# Less-naive "N-best" algorithm, much faster (if len(data) is big
# enough <wink>) than sorting all of data. However, if we had a max
# heap instead of a min heap, it could go faster still via
# heapify'ing all of data (linear time), then doing 10 heappops
# (10 log-time steps).
data = [random.randrange(2000) for i in range(1000)]
heap = data[:10]
heapify(heap)
for item in data[10:]:
if item > heap[0]: # this gets rarer the longer we run
heapreplace(heap, item)
self.assertEqual(list(heapiter(heap)), sorted(data)[-10:])
self.assertRaises(TypeError, heapreplace, None)
self.assertRaises(TypeError, heapreplace, None, None)
self.assertRaises(IndexError, heapreplace, [], None)
def test_heapsort(self):
# Exercise everything with repeated heapsort checks
for trial in xrange(100):
size = random.randrange(50)
data = [random.randrange(25) for i in range(size)]
if trial & 1: # Half of the time, use heapify
heap = data[:]
heapify(heap)
else: # The rest of the time, use heappush
heap = []
for item in data:
heappush(heap, item)
heap_sorted = [heappop(heap) for i in range(size)]
self.assertEqual(heap_sorted, sorted(data))
def test_nsmallest(self):
data = [random.randrange(2000) for i in range(1000)]
f = lambda x: x * 547 % 2000
for n in (0, 1, 2, 10, 100, 400, 999, 1000, 1100):
self.assertEqual(nsmallest(n, data), sorted(data)[:n])
self.assertEqual(nsmallest(n, data, key=f),
sorted(data, key=f)[:n])
def test_nlargest(self):
data = [random.randrange(2000) for i in range(1000)]
f = lambda x: x * 547 % 2000
for n in (0, 1, 2, 10, 100, 400, 999, 1000, 1100):
self.assertEqual(nlargest(n, data), sorted(data, reverse=True)[:n])
self.assertEqual(nlargest(n, data, key=f),
sorted(data, key=f, reverse=True)[:n])
#==============================================================================
class LenOnly:
"Dummy sequence class defining __len__ but not __getitem__."
def __len__(self):
return 10
class GetOnly:
"Dummy sequence class defining __getitem__ but not __len__."
def __getitem__(self, ndx):
return 10
class CmpErr:
"Dummy element that always raises an error during comparison"
def __cmp__(self, other):
raise ZeroDivisionError
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing next()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def next(self):
raise StopIteration
from itertools import chain, imap
def L(seqn):
'Test multiple tiers of iterators'
return chain(imap(lambda x:x, R(Ig(G(seqn)))))
class TestErrorHandling(unittest.TestCase):
def test_non_sequence(self):
for f in (heapify, heappop):
self.assertRaises(TypeError, f, 10)
for f in (heappush, heapreplace, nlargest, nsmallest):
self.assertRaises(TypeError, f, 10, 10)
def test_len_only(self):
for f in (heapify, heappop):
self.assertRaises(TypeError, f, LenOnly())
for f in (heappush, heapreplace):
self.assertRaises(TypeError, f, LenOnly(), 10)
for f in (nlargest, nsmallest):
self.assertRaises(TypeError, f, 2, LenOnly())
def test_get_only(self):
for f in (heapify, heappop):
self.assertRaises(TypeError, f, GetOnly())
for f in (heappush, heapreplace):
self.assertRaises(TypeError, f, GetOnly(), 10)
for f in (nlargest, nsmallest):
self.assertRaises(TypeError, f, 2, GetOnly())
def test_get_only(self):
seq = [CmpErr(), CmpErr(), CmpErr()]
for f in (heapify, heappop):
self.assertRaises(ZeroDivisionError, f, seq)
for f in (heappush, heapreplace):
self.assertRaises(ZeroDivisionError, f, seq, 10)
for f in (nlargest, nsmallest):
self.assertRaises(ZeroDivisionError, f, 2, seq)
def test_arg_parsing(self):
for f in (heapify, heappop, heappush, heapreplace, nlargest, nsmallest):
self.assertRaises(TypeError, f, 10)
def test_iterable_args(self):
for f in (nlargest, nsmallest):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, L, R):
self.assertEqual(f(2, g(s)), f(2,s))
self.assertEqual(f(2, S(s)), [])
self.assertRaises(TypeError, f, 2, X(s))
self.assertRaises(TypeError, f, 2, N(s))
self.assertRaises(ZeroDivisionError, f, 2, E(s))
#==============================================================================
def test_main(verbose=None):
from types import BuiltinFunctionType
test_classes = [TestHeap]
if isinstance(heapify, BuiltinFunctionType):
test_classes.append(TestErrorHandling)
test_support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == "__main__":
test_main(verbose=True)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Frontends for the message extraction functionality."""
from datetime import datetime
from distutils import log
from distutils.cmd import Command
from distutils.errors import DistutilsOptionError, DistutilsSetupError
from locale import getpreferredencoding
import logging
from optparse import OptionParser
import os
import shutil
import sys
import tempfile
from babel import __version__ as VERSION
from babel import Locale, localedata
from babel.compat import RawConfigParser, StringIO, string_types, u
from babel.core import UnknownLocaleError
from babel.messages.catalog import Catalog
from babel.messages.extract import extract_from_dir, DEFAULT_KEYWORDS, \
DEFAULT_MAPPING
from babel.messages.mofile import write_mo
from babel.messages.pofile import read_po, write_po
from babel.util import odict, LOCALTZ
__all__ = ['CommandLineInterface', 'compile_catalog', 'extract_messages',
'init_catalog', 'check_message_extractors', 'update_catalog']
__docformat__ = 'restructuredtext en'
class compile_catalog(Command):
"""Catalog compilation command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import compile_catalog
setup(
...
cmdclass = {'compile_catalog': compile_catalog}
)
:since: version 0.9
:see: `Integrating new distutils commands <http://docs.python.org/dist/node32.html>`_
:see: `setuptools <http://peak.telecommunity.com/DevCenter/setuptools>`_
"""
description = 'compile message catalogs to binary MO files'
user_options = [
('domain=', 'D',
"domain of PO file (default 'messages')"),
('directory=', 'd',
'path to base directory containing the catalogs'),
('input-file=', 'i',
'name of the input file'),
('output-file=', 'o',
"name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),
('locale=', 'l',
'locale of the catalog to compile'),
('use-fuzzy', 'f',
'also include fuzzy translations'),
('statistics', None,
'print statistics about translations')
]
boolean_options = ['use-fuzzy', 'statistics']
def initialize_options(self):
self.domain = 'messages'
self.directory = None
self.input_file = None
self.output_file = None
self.locale = None
self.use_fuzzy = False
self.statistics = False
def finalize_options(self):
if not self.input_file and not self.directory:
raise DistutilsOptionError('you must specify either the input file '
'or the base directory')
if not self.output_file and not self.directory:
raise DistutilsOptionError('you must specify either the input file '
'or the base directory')
def run(self):
po_files = []
mo_files = []
if not self.input_file:
if self.locale:
po_files.append((self.locale,
os.path.join(self.directory, self.locale,
'LC_MESSAGES',
self.domain + '.po')))
mo_files.append(os.path.join(self.directory, self.locale,
'LC_MESSAGES',
self.domain + '.mo'))
else:
for locale in os.listdir(self.directory):
po_file = os.path.join(self.directory, locale,
'LC_MESSAGES', self.domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
mo_files.append(os.path.join(self.directory, locale,
'LC_MESSAGES',
self.domain + '.mo'))
else:
po_files.append((self.locale, self.input_file))
if self.output_file:
mo_files.append(self.output_file)
else:
mo_files.append(os.path.join(self.directory, self.locale,
'LC_MESSAGES',
self.domain + '.mo'))
if not po_files:
raise DistutilsOptionError('no message catalogs found')
for idx, (locale, po_file) in enumerate(po_files):
mo_file = mo_files[idx]
infile = open(po_file, 'rb')
try:
catalog = read_po(infile, locale)
finally:
infile.close()
if self.statistics:
translated = 0
for message in list(catalog)[1:]:
if message.string:
translated +=1
percentage = 0
if len(catalog):
percentage = translated * 100 // len(catalog)
log.info('%d of %d messages (%d%%) translated in %r',
translated, len(catalog), percentage, po_file)
if catalog.fuzzy and not self.use_fuzzy:
log.warn('catalog %r is marked as fuzzy, skipping', po_file)
continue
for message, errors in catalog.check():
for error in errors:
log.error('error: %s:%d: %s', po_file, message.lineno,
error)
log.info('compiling catalog %r to %r', po_file, mo_file)
outfile = open(mo_file, 'wb')
try:
write_mo(outfile, catalog, use_fuzzy=self.use_fuzzy)
finally:
outfile.close()
class extract_messages(Command):
"""Message extraction command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import extract_messages
setup(
...
cmdclass = {'extract_messages': extract_messages}
)
:see: `Integrating new distutils commands <http://docs.python.org/dist/node32.html>`_
:see: `setuptools <http://peak.telecommunity.com/DevCenter/setuptools>`_
"""
description = 'extract localizable strings from the project code'
user_options = [
('charset=', None,
'charset to use in the output file'),
('keywords=', 'k',
'space-separated list of keywords to look for in addition to the '
'defaults'),
('no-default-keywords', None,
'do not include the default keywords'),
('mapping-file=', 'F',
'path to the mapping configuration file'),
('no-location', None,
'do not include location comments with filename and line number'),
('omit-header', None,
'do not include msgid "" entry in header'),
('output-file=', 'o',
'name of the output file'),
('width=', 'w',
'set output line width (default 76)'),
('no-wrap', None,
'do not break long message lines, longer than the output line width, '
'into several lines'),
('sort-output', None,
'generate sorted output (default False)'),
('sort-by-file', None,
'sort output by file location (default False)'),
('msgid-bugs-address=', None,
'set report address for msgid'),
('copyright-holder=', None,
'set copyright holder in output'),
('add-comments=', 'c',
'place comment block with TAG (or those preceding keyword lines) in '
'output file. Seperate multiple TAGs with commas(,)'),
('strip-comments', None,
'strip the comment TAGs from the comments.'),
('input-dirs=', None,
'directories that should be scanned for messages'),
]
boolean_options = [
'no-default-keywords', 'no-location', 'omit-header', 'no-wrap',
'sort-output', 'sort-by-file', 'strip-comments'
]
def initialize_options(self):
self.charset = 'utf-8'
self.keywords = ''
self._keywords = DEFAULT_KEYWORDS.copy()
self.no_default_keywords = False
self.mapping_file = None
self.no_location = False
self.omit_header = False
self.output_file = None
self.input_dirs = None
self.width = None
self.no_wrap = False
self.sort_output = False
self.sort_by_file = False
self.msgid_bugs_address = None
self.copyright_holder = None
self.add_comments = None
self._add_comments = []
self.strip_comments = False
def finalize_options(self):
if self.no_default_keywords and not self.keywords:
raise DistutilsOptionError('you must specify new keywords if you '
'disable the default ones')
if self.no_default_keywords:
self._keywords = {}
if self.keywords:
self._keywords.update(parse_keywords(self.keywords.split()))
if not self.output_file:
raise DistutilsOptionError('no output file specified')
if self.no_wrap and self.width:
raise DistutilsOptionError("'--no-wrap' and '--width' are mutually "
"exclusive")
if not self.no_wrap and not self.width:
self.width = 76
elif self.width is not None:
self.width = int(self.width)
if self.sort_output and self.sort_by_file:
raise DistutilsOptionError("'--sort-output' and '--sort-by-file' "
"are mutually exclusive")
if not self.input_dirs:
self.input_dirs = list(dict.fromkeys([k.split('.',1)[0]
for k in self.distribution.packages
]).keys())
if self.add_comments:
self._add_comments = self.add_comments.split(',')
def run(self):
mappings = self._get_mappings()
outfile = open(self.output_file, 'wb')
try:
catalog = Catalog(project=self.distribution.get_name(),
version=self.distribution.get_version(),
msgid_bugs_address=self.msgid_bugs_address,
copyright_holder=self.copyright_holder,
charset=self.charset)
for dirname, (method_map, options_map) in mappings.items():
def callback(filename, method, options):
if method == 'ignore':
return
filepath = os.path.normpath(os.path.join(dirname, filename))
optstr = ''
if options:
optstr = ' (%s)' % ', '.join(['%s="%s"' % (k, v) for
k, v in options.items()])
log.info('extracting messages from %s%s', filepath, optstr)
extracted = extract_from_dir(dirname, method_map, options_map,
keywords=self._keywords,
comment_tags=self._add_comments,
callback=callback,
strip_comment_tags=
self.strip_comments)
for filename, lineno, message, comments, context in extracted:
filepath = os.path.normpath(os.path.join(dirname, filename))
catalog.add(message, None, [(filepath, lineno)],
auto_comments=comments, context=context)
log.info('writing PO template file to %s' % self.output_file)
write_po(outfile, catalog, width=self.width,
no_location=self.no_location,
omit_header=self.omit_header,
sort_output=self.sort_output,
sort_by_file=self.sort_by_file)
finally:
outfile.close()
def _get_mappings(self):
mappings = {}
if self.mapping_file:
fileobj = open(self.mapping_file, 'U')
try:
method_map, options_map = parse_mapping(fileobj)
for dirname in self.input_dirs:
mappings[dirname] = method_map, options_map
finally:
fileobj.close()
elif getattr(self.distribution, 'message_extractors', None):
message_extractors = self.distribution.message_extractors
for dirname, mapping in message_extractors.items():
if isinstance(mapping, string_types):
method_map, options_map = parse_mapping(StringIO(mapping))
else:
method_map, options_map = [], {}
for pattern, method, options in mapping:
method_map.append((pattern, method))
options_map[pattern] = options or {}
mappings[dirname] = method_map, options_map
else:
for dirname in self.input_dirs:
mappings[dirname] = DEFAULT_MAPPING, {}
return mappings
def check_message_extractors(dist, name, value):
"""Validate the ``message_extractors`` keyword argument to ``setup()``.
:param dist: the distutils/setuptools ``Distribution`` object
:param name: the name of the keyword argument (should always be
"message_extractors")
:param value: the value of the keyword argument
:raise `DistutilsSetupError`: if the value is not valid
:see: `Adding setup() arguments
<http://peak.telecommunity.com/DevCenter/setuptools#adding-setup-arguments>`_
"""
assert name == 'message_extractors'
if not isinstance(value, dict):
raise DistutilsSetupError('the value of the "message_extractors" '
'parameter must be a dictionary')
class init_catalog(Command):
"""New catalog initialization command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import init_catalog
setup(
...
cmdclass = {'init_catalog': init_catalog}
)
:see: `Integrating new distutils commands <http://docs.python.org/dist/node32.html>`_
:see: `setuptools <http://peak.telecommunity.com/DevCenter/setuptools>`_
"""
description = 'create a new catalog based on a POT file'
user_options = [
('domain=', 'D',
"domain of PO file (default 'messages')"),
('input-file=', 'i',
'name of the input file'),
('output-dir=', 'd',
'path to output directory'),
('output-file=', 'o',
"name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),
('locale=', 'l',
'locale for the new localized catalog'),
]
def initialize_options(self):
self.output_dir = None
self.output_file = None
self.input_file = None
self.locale = None
self.domain = 'messages'
def finalize_options(self):
if not self.input_file:
raise DistutilsOptionError('you must specify the input file')
if not self.locale:
raise DistutilsOptionError('you must provide a locale for the '
'new catalog')
try:
self._locale = Locale.parse(self.locale)
except UnknownLocaleError:
raise DistutilsOptionError(sys.exc_info()[1])
if not self.output_file and not self.output_dir:
raise DistutilsOptionError('you must specify the output directory')
if not self.output_file:
self.output_file = os.path.join(self.output_dir, self.locale,
'LC_MESSAGES', self.domain + '.po')
if not os.path.exists(os.path.dirname(self.output_file)):
os.makedirs(os.path.dirname(self.output_file))
def run(self):
log.info('creating catalog %r based on %r', self.output_file,
self.input_file)
infile = open(self.input_file, 'r')
try:
# Although reading from the catalog template, read_po must be fed
# the locale in order to correctly calculate plurals
catalog = read_po(infile, locale=self.locale)
finally:
infile.close()
catalog.locale = self._locale
catalog.fuzzy = False
outfile = open(self.output_file, 'wb')
try:
write_po(outfile, catalog)
finally:
outfile.close()
class update_catalog(Command):
"""Catalog merging command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import update_catalog
setup(
...
cmdclass = {'update_catalog': update_catalog}
)
:since: version 0.9
:see: `Integrating new distutils commands <http://docs.python.org/dist/node32.html>`_
:see: `setuptools <http://peak.telecommunity.com/DevCenter/setuptools>`_
"""
description = 'update message catalogs from a POT file'
user_options = [
('domain=', 'D',
"domain of PO file (default 'messages')"),
('input-file=', 'i',
'name of the input file'),
('output-dir=', 'd',
'path to base directory containing the catalogs'),
('output-file=', 'o',
"name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),
('locale=', 'l',
'locale of the catalog to compile'),
('ignore-obsolete=', None,
'whether to omit obsolete messages from the output'),
('no-fuzzy-matching', 'N',
'do not use fuzzy matching'),
('previous', None,
'keep previous msgids of translated messages')
]
boolean_options = ['ignore_obsolete', 'no_fuzzy_matching', 'previous']
def initialize_options(self):
self.domain = 'messages'
self.input_file = None
self.output_dir = None
self.output_file = None
self.locale = None
self.ignore_obsolete = False
self.no_fuzzy_matching = False
self.previous = False
def finalize_options(self):
if not self.input_file:
raise DistutilsOptionError('you must specify the input file')
if not self.output_file and not self.output_dir:
raise DistutilsOptionError('you must specify the output file or '
'directory')
if self.output_file and not self.locale:
raise DistutilsOptionError('you must specify the locale')
if self.no_fuzzy_matching and self.previous:
self.previous = False
def run(self):
po_files = []
if not self.output_file:
if self.locale:
po_files.append((self.locale,
os.path.join(self.output_dir, self.locale,
'LC_MESSAGES',
self.domain + '.po')))
else:
for locale in os.listdir(self.output_dir):
po_file = os.path.join(self.output_dir, locale,
'LC_MESSAGES',
self.domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
else:
po_files.append((self.locale, self.output_file))
domain = self.domain
if not domain:
domain = os.path.splitext(os.path.basename(self.input_file))[0]
infile = open(self.input_file, 'U')
try:
template = read_po(infile)
finally:
infile.close()
if not po_files:
raise DistutilsOptionError('no message catalogs found')
for locale, filename in po_files:
log.info('updating catalog %r based on %r', filename,
self.input_file)
infile = open(filename, 'U', encoding="utf-8")
try:
catalog = read_po(infile, locale=locale, domain=domain)
finally:
infile.close()
catalog.update(template, self.no_fuzzy_matching)
tmpname = os.path.join(os.path.dirname(filename),
tempfile.gettempprefix() +
os.path.basename(filename))
tmpfile = open(tmpname, 'wb')
try:
try:
write_po(tmpfile, catalog,
ignore_obsolete=self.ignore_obsolete,
include_previous=self.previous)
finally:
tmpfile.close()
except:
os.remove(tmpname)
raise
try:
os.rename(tmpname, filename)
except OSError:
# We're probably on Windows, which doesn't support atomic
# renames, at least not through Python
# If the error is in fact due to a permissions problem, that
# same error is going to be raised from one of the following
# operations
os.remove(filename)
shutil.copy(tmpname, filename)
os.remove(tmpname)
class CommandLineInterface(object):
"""Command-line interface.
This class provides a simple command-line interface to the message
extraction and PO file generation functionality.
"""
usage = '%%prog %s [options] %s'
version = '%%prog %s' % VERSION
commands = {
'compile': 'compile message catalogs to MO files',
'extract': 'extract messages from source files and generate a POT file',
'init': 'create new message catalogs from a POT file',
'update': 'update existing message catalogs from a POT file'
}
def run(self, argv=sys.argv):
"""Main entry point of the command-line interface.
:param argv: list of arguments passed on the command-line
"""
self.parser = OptionParser(usage=self.usage % ('command', '[args]'),
version=self.version)
self.parser.disable_interspersed_args()
self.parser.print_help = self._help
self.parser.add_option('--list-locales', dest='list_locales',
action='store_true',
help="print all known locales and exit")
self.parser.add_option('-v', '--verbose', action='store_const',
dest='loglevel', const=logging.DEBUG,
help='print as much as possible')
self.parser.add_option('-q', '--quiet', action='store_const',
dest='loglevel', const=logging.ERROR,
help='print as little as possible')
self.parser.set_defaults(list_locales=False, loglevel=logging.INFO)
options, args = self.parser.parse_args(argv[1:])
self._configure_logging(options.loglevel)
if options.list_locales:
identifiers = sorted(localedata.locale_identifiers())
longest = max([len(identifier) for identifier in identifiers])
format = u('%%-%ds %%s') % (longest + 1)
for identifier in identifiers:
locale = Locale.parse(identifier)
output = format % (identifier, locale.english_name)
print(output.encode(sys.stdout.encoding or
getpreferredencoding() or
'ascii', 'replace'))
return 0
if not args:
self.parser.error('no valid command or option passed. '
'Try the -h/--help option for more information.')
cmdname = args[0]
if cmdname not in self.commands:
self.parser.error('unknown command "%s"' % cmdname)
return getattr(self, cmdname)(args[1:])
def _configure_logging(self, loglevel):
self.log = logging.getLogger('babel')
self.log.setLevel(loglevel)
# Don't add a new handler for every instance initialization (#227), this
# would cause duplicated output when the CommandLineInterface as an
# normal Python class.
if self.log.handlers:
handler = self.log.handlers[0]
else:
handler = logging.StreamHandler()
self.log.addHandler(handler)
handler.setLevel(loglevel)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
def _help(self):
print(self.parser.format_help())
print("commands:")
longest = max([len(command) for command in self.commands])
format = " %%-%ds %%s" % max(8, longest + 1)
commands = sorted(self.commands.items())
for name, description in commands:
print(format % (name, description))
def compile(self, argv):
"""Subcommand for compiling a message catalog to a MO file.
:param argv: the command arguments
:since: version 0.9
"""
parser = OptionParser(usage=self.usage % ('compile', ''),
description=self.commands['compile'])
parser.add_option('--domain', '-D', dest='domain',
help="domain of MO and PO files (default '%default')")
parser.add_option('--directory', '-d', dest='directory',
metavar='DIR', help='base directory of catalog files')
parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE',
help='locale of the catalog')
parser.add_option('--input-file', '-i', dest='input_file',
metavar='FILE', help='name of the input file')
parser.add_option('--output-file', '-o', dest='output_file',
metavar='FILE',
help="name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/"
"<domain>.mo')")
parser.add_option('--use-fuzzy', '-f', dest='use_fuzzy',
action='store_true',
help='also include fuzzy translations (default '
'%default)')
parser.add_option('--statistics', dest='statistics',
action='store_true',
help='print statistics about translations')
parser.set_defaults(domain='messages', use_fuzzy=False,
compile_all=False, statistics=False)
options, args = parser.parse_args(argv)
po_files = []
mo_files = []
if not options.input_file:
if not options.directory:
parser.error('you must specify either the input file or the '
'base directory')
if options.locale:
po_files.append((options.locale,
os.path.join(options.directory,
options.locale, 'LC_MESSAGES',
options.domain + '.po')))
mo_files.append(os.path.join(options.directory, options.locale,
'LC_MESSAGES',
options.domain + '.mo'))
else:
for locale in os.listdir(options.directory):
po_file = os.path.join(options.directory, locale,
'LC_MESSAGES', options.domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
mo_files.append(os.path.join(options.directory, locale,
'LC_MESSAGES',
options.domain + '.mo'))
else:
po_files.append((options.locale, options.input_file))
if options.output_file:
mo_files.append(options.output_file)
else:
if not options.directory:
parser.error('you must specify either the input file or '
'the base directory')
mo_files.append(os.path.join(options.directory, options.locale,
'LC_MESSAGES',
options.domain + '.mo'))
if not po_files:
parser.error('no message catalogs found')
for idx, (locale, po_file) in enumerate(po_files):
mo_file = mo_files[idx]
infile = open(po_file, 'r')
try:
catalog = read_po(infile, locale)
finally:
infile.close()
if options.statistics:
translated = 0
for message in list(catalog)[1:]:
if message.string:
translated +=1
percentage = 0
if len(catalog):
percentage = translated * 100 // len(catalog)
self.log.info("%d of %d messages (%d%%) translated in %r",
translated, len(catalog), percentage, po_file)
if catalog.fuzzy and not options.use_fuzzy:
self.log.warn('catalog %r is marked as fuzzy, skipping',
po_file)
continue
for message, errors in catalog.check():
for error in errors:
self.log.error('error: %s:%d: %s', po_file, message.lineno,
error)
self.log.info('compiling catalog %r to %r', po_file, mo_file)
outfile = open(mo_file, 'wb')
try:
write_mo(outfile, catalog, use_fuzzy=options.use_fuzzy)
finally:
outfile.close()
def extract(self, argv):
"""Subcommand for extracting messages from source files and generating
a POT file.
:param argv: the command arguments
"""
parser = OptionParser(usage=self.usage % ('extract', 'dir1 <dir2> ...'),
description=self.commands['extract'])
parser.add_option('--charset', dest='charset',
help='charset to use in the output (default '
'"%default")')
parser.add_option('-k', '--keyword', dest='keywords', action='append',
help='keywords to look for in addition to the '
'defaults. You can specify multiple -k flags on '
'the command line.')
parser.add_option('--no-default-keywords', dest='no_default_keywords',
action='store_true',
help="do not include the default keywords")
parser.add_option('--mapping', '-F', dest='mapping_file',
help='path to the extraction mapping file')
parser.add_option('--no-location', dest='no_location',
action='store_true',
help='do not include location comments with filename '
'and line number')
parser.add_option('--omit-header', dest='omit_header',
action='store_true',
help='do not include msgid "" entry in header')
parser.add_option('-o', '--output', dest='output',
help='path to the output POT file')
parser.add_option('-w', '--width', dest='width', type='int',
help="set output line width (default 76)")
parser.add_option('--no-wrap', dest='no_wrap', action = 'store_true',
help='do not break long message lines, longer than '
'the output line width, into several lines')
parser.add_option('--sort-output', dest='sort_output',
action='store_true',
help='generate sorted output (default False)')
parser.add_option('--sort-by-file', dest='sort_by_file',
action='store_true',
help='sort output by file location (default False)')
parser.add_option('--msgid-bugs-address', dest='msgid_bugs_address',
metavar='EMAIL@ADDRESS',
help='set report address for msgid')
parser.add_option('--copyright-holder', dest='copyright_holder',
help='set copyright holder in output')
parser.add_option('--project', dest='project',
help='set project name in output')
parser.add_option('--version', dest='version',
help='set project version in output')
parser.add_option('--add-comments', '-c', dest='comment_tags',
metavar='TAG', action='append',
help='place comment block with TAG (or those '
'preceding keyword lines) in output file. One '
'TAG per argument call')
parser.add_option('--strip-comment-tags', '-s',
dest='strip_comment_tags', action='store_true',
help='Strip the comment tags from the comments.')
parser.set_defaults(charset='utf-8', keywords=[],
no_default_keywords=False, no_location=False,
omit_header = False, width=None, no_wrap=False,
sort_output=False, sort_by_file=False,
comment_tags=[], strip_comment_tags=False)
options, args = parser.parse_args(argv)
if not args:
parser.error('incorrect number of arguments')
if options.output not in (None, '-'):
outfile = open(options.output, 'wb')
else:
outfile = sys.stdout
keywords = DEFAULT_KEYWORDS.copy()
if options.no_default_keywords:
if not options.keywords:
parser.error('you must specify new keywords if you disable the '
'default ones')
keywords = {}
if options.keywords:
keywords.update(parse_keywords(options.keywords))
if options.mapping_file:
fileobj = open(options.mapping_file, 'U')
try:
method_map, options_map = parse_mapping(fileobj)
finally:
fileobj.close()
else:
method_map = DEFAULT_MAPPING
options_map = {}
if options.width and options.no_wrap:
parser.error("'--no-wrap' and '--width' are mutually exclusive.")
elif not options.width and not options.no_wrap:
options.width = 76
if options.sort_output and options.sort_by_file:
parser.error("'--sort-output' and '--sort-by-file' are mutually "
"exclusive")
try:
catalog = Catalog(project=options.project,
version=options.version,
msgid_bugs_address=options.msgid_bugs_address,
copyright_holder=options.copyright_holder,
charset=options.charset)
for dirname in args:
if not os.path.isdir(dirname):
parser.error('%r is not a directory' % dirname)
def callback(filename, method, options):
if method == 'ignore':
return
filepath = os.path.normpath(os.path.join(dirname, filename))
optstr = ''
if options:
optstr = ' (%s)' % ', '.join(['%s="%s"' % (k, v) for
k, v in options.items()])
self.log.info('extracting messages from %s%s', filepath,
optstr)
extracted = extract_from_dir(dirname, method_map, options_map,
keywords, options.comment_tags,
callback=callback,
strip_comment_tags=
options.strip_comment_tags)
for filename, lineno, message, comments, context in extracted:
filepath = os.path.normpath(os.path.join(dirname, filename))
catalog.add(message, None, [(filepath, lineno)],
auto_comments=comments, context=context)
if options.output not in (None, '-'):
self.log.info('writing PO template file to %s' % options.output)
write_po(outfile, catalog, width=options.width,
no_location=options.no_location,
omit_header=options.omit_header,
sort_output=options.sort_output,
sort_by_file=options.sort_by_file)
finally:
if options.output:
outfile.close()
def init(self, argv):
"""Subcommand for creating new message catalogs from a template.
:param argv: the command arguments
"""
parser = OptionParser(usage=self.usage % ('init', ''),
description=self.commands['init'])
parser.add_option('--domain', '-D', dest='domain',
help="domain of PO file (default '%default')")
parser.add_option('--input-file', '-i', dest='input_file',
metavar='FILE', help='name of the input file')
parser.add_option('--output-dir', '-d', dest='output_dir',
metavar='DIR', help='path to output directory')
parser.add_option('--output-file', '-o', dest='output_file',
metavar='FILE',
help="name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/"
"<domain>.po')")
parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE',
help='locale for the new localized catalog')
parser.set_defaults(domain='messages')
options, args = parser.parse_args(argv)
if not options.locale:
parser.error('you must provide a locale for the new catalog')
try:
locale = Locale.parse(options.locale)
except UnknownLocaleError:
parser.error(sys.exc_info()[1])
if not options.input_file:
parser.error('you must specify the input file')
if not options.output_file and not options.output_dir:
parser.error('you must specify the output file or directory')
if not options.output_file:
options.output_file = os.path.join(options.output_dir,
options.locale, 'LC_MESSAGES',
options.domain + '.po')
if not os.path.exists(os.path.dirname(options.output_file)):
os.makedirs(os.path.dirname(options.output_file))
infile = open(options.input_file, 'r')
try:
# Although reading from the catalog template, read_po must be fed
# the locale in order to correctly calculate plurals
catalog = read_po(infile, locale=options.locale)
finally:
infile.close()
catalog.locale = locale
catalog.revision_date = datetime.now(LOCALTZ)
self.log.info('creating catalog %r based on %r', options.output_file,
options.input_file)
outfile = open(options.output_file, 'wb')
try:
write_po(outfile, catalog)
finally:
outfile.close()
def update(self, argv):
"""Subcommand for updating existing message catalogs from a template.
:param argv: the command arguments
:since: version 0.9
"""
parser = OptionParser(usage=self.usage % ('update', ''),
description=self.commands['update'])
parser.add_option('--domain', '-D', dest='domain',
help="domain of PO file (default '%default')")
parser.add_option('--input-file', '-i', dest='input_file',
metavar='FILE', help='name of the input file')
parser.add_option('--output-dir', '-d', dest='output_dir',
metavar='DIR', help='path to output directory')
parser.add_option('--output-file', '-o', dest='output_file',
metavar='FILE',
help="name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/"
"<domain>.po')")
parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE',
help='locale of the translations catalog')
parser.add_option('--ignore-obsolete', dest='ignore_obsolete',
action='store_true',
help='do not include obsolete messages in the output '
'(default %default)')
parser.add_option('--no-fuzzy-matching', '-N', dest='no_fuzzy_matching',
action='store_true',
help='do not use fuzzy matching (default %default)')
parser.add_option('--previous', dest='previous', action='store_true',
help='keep previous msgids of translated messages '
'(default %default)')
parser.set_defaults(domain='messages', ignore_obsolete=False,
no_fuzzy_matching=False, previous=False)
options, args = parser.parse_args(argv)
if not options.input_file:
parser.error('you must specify the input file')
if not options.output_file and not options.output_dir:
parser.error('you must specify the output file or directory')
if options.output_file and not options.locale:
parser.error('you must specify the locale')
if options.no_fuzzy_matching and options.previous:
options.previous = False
po_files = []
if not options.output_file:
if options.locale:
po_files.append((options.locale,
os.path.join(options.output_dir,
options.locale, 'LC_MESSAGES',
options.domain + '.po')))
else:
for locale in os.listdir(options.output_dir):
po_file = os.path.join(options.output_dir, locale,
'LC_MESSAGES',
options.domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
else:
po_files.append((options.locale, options.output_file))
domain = options.domain
if not domain:
domain = os.path.splitext(os.path.basename(options.input_file))[0]
infile = open(options.input_file, 'U')
try:
template = read_po(infile)
finally:
infile.close()
if not po_files:
parser.error('no message catalogs found')
for locale, filename in po_files:
self.log.info('updating catalog %r based on %r', filename,
options.input_file)
infile = open(filename, 'U')
try:
catalog = read_po(infile, locale=locale, domain=domain)
finally:
infile.close()
catalog.update(template, options.no_fuzzy_matching)
tmpname = os.path.join(os.path.dirname(filename),
tempfile.gettempprefix() +
os.path.basename(filename))
tmpfile = open(tmpname, 'w')
try:
try:
write_po(tmpfile, catalog,
ignore_obsolete=options.ignore_obsolete,
include_previous=options.previous)
finally:
tmpfile.close()
except:
os.remove(tmpname)
raise
try:
os.rename(tmpname, filename)
except OSError:
# We're probably on Windows, which doesn't support atomic
# renames, at least not through Python
# If the error is in fact due to a permissions problem, that
# same error is going to be raised from one of the following
# operations
os.remove(filename)
shutil.copy(tmpname, filename)
os.remove(tmpname)
def main():
return CommandLineInterface().run(sys.argv)
def parse_mapping(fileobj, filename=None):
"""Parse an extraction method mapping from a file-like object.
>>> buf = StringIO('''
... [extractors]
... custom = mypackage.module:myfunc
...
... # Python source files
... [python: **.py]
...
... # Genshi templates
... [genshi: **/templates/**.html]
... include_attrs =
... [genshi: **/templates/**.txt]
... template_class = genshi.template:TextTemplate
... encoding = latin-1
...
... # Some custom extractor
... [custom: **/custom/*.*]
... ''')
>>> method_map, options_map = parse_mapping(buf)
>>> len(method_map)
4
>>> method_map[0]
('**.py', 'python')
>>> options_map['**.py']
{}
>>> method_map[1]
('**/templates/**.html', 'genshi')
>>> options_map['**/templates/**.html']['include_attrs']
''
>>> method_map[2]
('**/templates/**.txt', 'genshi')
>>> options_map['**/templates/**.txt']['template_class']
'genshi.template:TextTemplate'
>>> options_map['**/templates/**.txt']['encoding']
'latin-1'
>>> method_map[3]
('**/custom/*.*', 'mypackage.module:myfunc')
>>> options_map['**/custom/*.*']
{}
:param fileobj: a readable file-like object containing the configuration
text to parse
:return: a `(method_map, options_map)` tuple
:rtype: `tuple`
:see: `extract_from_directory`
"""
extractors = {}
method_map = []
options_map = {}
parser = RawConfigParser()
parser._sections = odict(parser._sections) # We need ordered sections
parser.readfp(fileobj, filename)
for section in parser.sections():
if section == 'extractors':
extractors = dict(parser.items(section))
else:
method, pattern = [part.strip() for part in section.split(':', 1)]
method_map.append((pattern, method))
options_map[pattern] = dict(parser.items(section))
if extractors:
for idx, (pattern, method) in enumerate(method_map):
if method in extractors:
method = extractors[method]
method_map[idx] = (pattern, method)
return (method_map, options_map)
def parse_keywords(strings=[]):
"""Parse keywords specifications from the given list of strings.
>>> kw = sorted(parse_keywords(['_', 'dgettext:2', 'dngettext:2,3', 'pgettext:1c,2']).items())
>>> for keyword, indices in sorted(kw):
... print((keyword, indices))
('_', None)
('dgettext', (2,))
('dngettext', (2, 3))
('pgettext', ((1, 'c'), 2))
"""
keywords = {}
for string in strings:
if ':' in string:
funcname, indices = string.split(':')
else:
funcname, indices = string, None
if funcname not in keywords:
if indices:
inds = []
for x in indices.split(','):
if x[-1] == 'c':
inds.append((int(x[:-1]), 'c'))
else:
inds.append(int(x))
indices = tuple(inds)
keywords[funcname] = indices
return keywords
if __name__ == '__main__':
main()
|
|
try:
import Queue as queue
except ImportError:
import queue
from helpers import Struct
class uiParameter(Struct):
"""uiParameter represents a single GUI element that is used to build a parameter window
in the UI (simulator event "make_param_window").
It has one parameter, ``type``, that defines the type of the parameter. Possible parameter
types are GROUP, INT, FLOAT, BOOL and SELECT.
"""
GROUP, INT, FLOAT, BOOL, SELECT = 0,1,2,3,4
def __init__(self, elem_type):
self.type = elem_type
class uiGroup(uiParameter):
def __init__(self, contents):
uiParameter.__init__(uiParameter.GROUP)
self.contents = contents
class uiInt(uiParameter):
def __init__(self, value, min_value = -100, max_value = 100):
uiParameter.__init__(self, uiParameter.INT)
self.value = value
self.min_value = min_value
self.max_value = max_value
class uiFloat(uiParameter):
def __init__(self, value, step = 1.0, min_value = -1000.0, max_value = 1000.0):
uiParameter.__init__(self, uiParameter.FLOAT)
self.value = value
self.step = step
self.min_value = min_value
self.max_value = max_value
class uiBool(uiParameter):
def __init__(self, value):
uiParameter.__init__(self, uiParameter.BOOL)
self.value = value
class uiSelect(uiParameter):
def __init__(self, value, value_list):
uiParameter.__init__(self, uiParameter.SELECT, value, value_list)
self.value = value
self.value_list = value_list
class SimUI:
"""The SimUI class defines a front-end for the :class:`~simulator.Simulator`.
It contains the necessary functions for the frontend-simulator communication
and stubs for the message callbacks.
This class manages three important objects:
* The simulator, as ``self.simulator_thread``
* The incoming simulator events, as ``self.in_queue``
* The outgoing simulator commands, as ``self.sim_queue``
The constructor of SimUI takes a :class:`~renderer.Renderer` object as parameter.
This renderer will be passed to the simulator to draw on.
"""
def __init__(self, renderer, simulator_class):
self.event_handler = None
self.sim_queue = queue.Queue()
# create the simulator thread
self.simulator_thread = simulator_class(renderer, self.sim_queue)
self.in_queue = self.simulator_thread._out_queue
self.simulator_thread.start()
def register_event_handler(self, event_handler):
"""Register a callback that will be executed to process the
"""
self.event_handler = event_handler
def unregister_event_handler(self):
"""Unregister a previously registered event handler.
"""
self.event_handler = None
def process_events(self, process_all = False):
"""Processes one or all incoming events from the simulator. A single
event is a tuple (name,args). During the processing of the event,
the function ``simulator_``\ *name* will be called with args as parameters.
It is strongly discouraged to create new class methods with the name
starting with `simulator_`. Such functions could be called from
the simulator without your consent.
Unknown or malformed events will lead to an error message printed
to the console.
"""
while not self.in_queue.empty():
tpl = self.in_queue.get()
if isinstance(tpl,tuple) and len(tpl) == 2:
name, args = tpl
intercepted = False
if self.event_handler is not None:
intercepted = self.event_handler(name,args)
if not intercepted:
# Scramble
name = "simulator_{}".format(name)
if name in self.__class__.__dict__:
try:
self.__class__.__dict__[name](self,*args)
except TypeError:
print("Wrong UI event parameters {}{}".format(name,args))
raise
else:
print("Unknown UI event '{}'".format(name))
else:
print("Wrong UI event format '{}'".format(tpl))
self.in_queue.task_done()
if not process_all:
return
def run_simulator_command(self,command,*args):
"""Sends the command *command* to the simulator. All arguments after
*command* are passed to the command processing function on the simulator side.
See :class:`~simulator.Simulator` for the available commands.
"""
self.sim_queue.put((command, args))
# Simulator processing functions : stubs
def simulator_make_param_window(self,robot_id,name,parameters):
"""A request from the supervisor to create a parameter window.
*robot_id* is guaranteed to uniquely identify a robot in a simulation.
Currently, *robot_id* is the actual robot object.
It can be used e.g. to extract the color of the robot as ``robot_id.get_color()``.
*name* is the desired window name, and *parameters* is the structure
returned by :meth:`~supervisor.Supervisor.get_ui_description`.
"""
raise NotImplementedError('SimUI.simulator_make_param_window')
def simulator_running(self):
"""A notification that the simulation has been started."""
raise NotImplementedError('SimUI.simulator_running')
def simulator_paused(self):
"""A notification that the simulation has been paused."""
raise NotImplementedError('SimUI.simulator_paused')
def simulator_reset(self):
"""A notification that the simulation has been reset."""
raise NotImplementedError('SimUI.simulator_reset')
def simulator_stopped(self):
"""A notification that the simulation has been stopped."""
raise NotImplementedError('SimUI.simulator_stopped')
def simulator_update_view(self):
"""A request to redraw the simulation window. This notification
signifies that the simulation has stopped using the renderer,
and is waiting for the UI to process this event.
The simulation will be resumed after this function exits.
"""
raise NotImplementedError('SimUI.simulator_update_view')
def simulator_exception(self,e_type, e_value, e_traceback):
"""An exception was raised in the simulator thread in the attempt
to process an incoming command.
"""
raise NotImplementedError('SimUI.simulator_exception')
def simulator_log(self, message, objclass, objcolor):
"""A log *message* was generated by one of the simulation objects
of class *objclass*. The *objcolor* is the color of the simobject,
in the case the object is connected to one, and None otherwise.
"""
raise NotImplementedError('SimUI.simulator_log')
# Commands for the tester:
def run_simulation(self):
"""Unpause the simulation."""
self.run_simulator_command('start_simulation')
def pause_simulation(self):
"""Pause the simulation."""
self.run_simulator_command('pause_simulation')
def step_simulation(self):
"""Advance the simulation one step if it is paused."""
self.run_simulator_command('step_simulation')
def start_testing(self):
"""Prepare the simulation environment for testing, e.g. disable
user controls of the simulation progress."""
pass
def stop_testing(self):
"""Return UI back to normal operation."""
pass
#def get_view_parameters(self):
#pass
#def set_view_parameters(self,params):
#pass
#def new_renderer(self):
#pass
#def pop_renderer(self):
#pass
#def start_test(self):
#"""This function will pause and 'cache' the currently running
#simulation. A new `simulator.Simulator` will be started with
#the control belonging to the tester object.
#"""
#self.antiteststruct = Struct()
#self.antiteststruct.wasrunning = False
## 1) Pause simulator
#if self.simulator_thread.is_running():
#self.antiteststruct.wasrunning = True # Remember the setting
#self.run_simulator_command('pause_simulation') # Pause simulation
#self.process_events(True) # Process all events
## 2) Create new simulator
#self.antiteststruct.simulator = simulator_thread
#self.simulator_thread = sim.Simulator(self.instantiate_new_renderer(), self.sim_queue)
#self.simulator_thread.start()
#def stop_test(self):
#"""This function will restore the cached simulation and
#simulation. A new `simulator.Simulator` will be started with
#the control belonging to the tester object.
#"""
#view_params = self.get_view_parameters()
## 1) Stop simulator
#self.run_simulator_command('stop')
#while self.simulator_thread.isAlive():
#self.process_events(True)
#self.simulator_thread.join(0.1)
## 2) Switch to old simulator
#self.pop_renderer()
#self.simulator_thread = self.antiteststruct.simulator
## 3) continue running
#if self.antiteststruct.wasrunning:
#self.run_simulator_command('pause_simulation')
|
|
# encoding: utf-8
"""Initializes lxml parser, particularly the custom element classes.
Also makes available a handful of functions that wrap its typical uses.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from lxml import etree
from .ns import NamespacePrefixedTag
# configure etree XML parser -------------------------------
element_class_lookup = etree.ElementNamespaceClassLookup()
oxml_parser = etree.XMLParser(remove_blank_text=True, resolve_entities=False)
oxml_parser.set_element_class_lookup(element_class_lookup)
def parse_from_template(template_name):
"""
Return an element loaded from the XML in the template file identified by
*template_name*.
"""
thisdir = os.path.split(__file__)[0]
filename = os.path.join(thisdir, "..", "templates", "%s.xml" % template_name)
with open(filename, "rb") as f:
xml = f.read()
return parse_xml(xml)
def parse_xml(xml):
"""
Return root lxml element obtained by parsing XML character string in
*xml*, which can be either a Python 2.x string or unicode.
"""
root_element = etree.fromstring(xml, oxml_parser)
return root_element
def register_element_cls(nsptagname, cls):
"""
Register *cls* to be constructed when the oxml parser encounters an
element having name *nsptag_name*. *nsptag_name* is a string of the form
``nspfx:tagroot``, e.g. ``'w:document'``.
"""
nsptag = NamespacePrefixedTag(nsptagname)
namespace = element_class_lookup.get_namespace(nsptag.nsuri)
namespace[nsptag.local_part] = cls
from .action import CT_Hyperlink # noqa: E402
register_element_cls("a:hlinkClick", CT_Hyperlink)
register_element_cls("a:hlinkHover", CT_Hyperlink)
from .chart.axis import ( # noqa: E402
CT_AxisUnit,
CT_CatAx,
CT_ChartLines,
CT_Crosses,
CT_DateAx,
CT_LblOffset,
CT_Orientation,
CT_Scaling,
CT_TickLblPos,
CT_TickMark,
CT_ValAx,
)
register_element_cls("c:catAx", CT_CatAx)
register_element_cls("c:crosses", CT_Crosses)
register_element_cls("c:dateAx", CT_DateAx)
register_element_cls("c:lblOffset", CT_LblOffset)
register_element_cls("c:majorGridlines", CT_ChartLines)
register_element_cls("c:majorTickMark", CT_TickMark)
register_element_cls("c:majorUnit", CT_AxisUnit)
register_element_cls("c:minorTickMark", CT_TickMark)
register_element_cls("c:minorUnit", CT_AxisUnit)
register_element_cls("c:orientation", CT_Orientation)
register_element_cls("c:scaling", CT_Scaling)
register_element_cls("c:tickLblPos", CT_TickLblPos)
register_element_cls("c:valAx", CT_ValAx)
from .chart.chart import ( # noqa: E402
CT_Chart,
CT_ChartSpace,
CT_ExternalData,
CT_PlotArea,
CT_Style,
)
register_element_cls("c:chart", CT_Chart)
register_element_cls("c:chartSpace", CT_ChartSpace)
register_element_cls("c:externalData", CT_ExternalData)
register_element_cls("c:plotArea", CT_PlotArea)
register_element_cls("c:style", CT_Style)
from .chart.datalabel import CT_DLbl, CT_DLblPos, CT_DLbls # noqa: E402
register_element_cls("c:dLbl", CT_DLbl)
register_element_cls("c:dLblPos", CT_DLblPos)
register_element_cls("c:dLbls", CT_DLbls)
from .chart.legend import CT_Legend, CT_LegendPos # noqa: E402
register_element_cls("c:legend", CT_Legend)
register_element_cls("c:legendPos", CT_LegendPos)
from .chart.marker import CT_Marker, CT_MarkerSize, CT_MarkerStyle # noqa: E402
register_element_cls("c:marker", CT_Marker)
register_element_cls("c:size", CT_MarkerSize)
register_element_cls("c:symbol", CT_MarkerStyle)
from .chart.plot import ( # noqa: E402
CT_Area3DChart,
CT_AreaChart,
CT_BarChart,
CT_BarDir,
CT_BubbleChart,
CT_BubbleScale,
CT_DoughnutChart,
CT_GapAmount,
CT_Grouping,
CT_LineChart,
CT_Overlap,
CT_PieChart,
CT_RadarChart,
CT_ScatterChart,
)
register_element_cls("c:area3DChart", CT_Area3DChart)
register_element_cls("c:areaChart", CT_AreaChart)
register_element_cls("c:barChart", CT_BarChart)
register_element_cls("c:barDir", CT_BarDir)
register_element_cls("c:bubbleChart", CT_BubbleChart)
register_element_cls("c:bubbleScale", CT_BubbleScale)
register_element_cls("c:doughnutChart", CT_DoughnutChart)
register_element_cls("c:gapWidth", CT_GapAmount)
register_element_cls("c:grouping", CT_Grouping)
register_element_cls("c:lineChart", CT_LineChart)
register_element_cls("c:overlap", CT_Overlap)
register_element_cls("c:pieChart", CT_PieChart)
register_element_cls("c:radarChart", CT_RadarChart)
register_element_cls("c:scatterChart", CT_ScatterChart)
from .chart.series import ( # noqa: E402
CT_AxDataSource,
CT_DPt,
CT_Lvl,
CT_NumDataSource,
CT_SeriesComposite,
CT_StrVal_NumVal_Composite,
)
register_element_cls("c:bubbleSize", CT_NumDataSource)
register_element_cls("c:cat", CT_AxDataSource)
register_element_cls("c:dPt", CT_DPt)
register_element_cls("c:lvl", CT_Lvl)
register_element_cls("c:pt", CT_StrVal_NumVal_Composite)
register_element_cls("c:ser", CT_SeriesComposite)
register_element_cls("c:val", CT_NumDataSource)
register_element_cls("c:xVal", CT_NumDataSource)
register_element_cls("c:yVal", CT_NumDataSource)
from .chart.shared import ( # noqa: E402
CT_Boolean,
CT_Boolean_Explicit,
CT_Double,
CT_Layout,
CT_LayoutMode,
CT_ManualLayout,
CT_NumFmt,
CT_Title,
CT_Tx,
CT_UnsignedInt,
)
register_element_cls("c:autoTitleDeleted", CT_Boolean_Explicit)
register_element_cls("c:autoUpdate", CT_Boolean)
register_element_cls("c:bubble3D", CT_Boolean)
register_element_cls("c:crossAx", CT_UnsignedInt)
register_element_cls("c:crossesAt", CT_Double)
register_element_cls("c:date1904", CT_Boolean)
register_element_cls("c:delete", CT_Boolean)
register_element_cls("c:idx", CT_UnsignedInt)
register_element_cls("c:invertIfNegative", CT_Boolean_Explicit)
register_element_cls("c:layout", CT_Layout)
register_element_cls("c:manualLayout", CT_ManualLayout)
register_element_cls("c:max", CT_Double)
register_element_cls("c:min", CT_Double)
register_element_cls("c:numFmt", CT_NumFmt)
register_element_cls("c:order", CT_UnsignedInt)
register_element_cls("c:overlay", CT_Boolean_Explicit)
register_element_cls("c:ptCount", CT_UnsignedInt)
register_element_cls("c:showCatName", CT_Boolean_Explicit)
register_element_cls("c:showLegendKey", CT_Boolean_Explicit)
register_element_cls("c:showPercent", CT_Boolean_Explicit)
register_element_cls("c:showSerName", CT_Boolean_Explicit)
register_element_cls("c:showVal", CT_Boolean_Explicit)
register_element_cls("c:smooth", CT_Boolean)
register_element_cls("c:title", CT_Title)
register_element_cls("c:tx", CT_Tx)
register_element_cls("c:varyColors", CT_Boolean)
register_element_cls("c:x", CT_Double)
register_element_cls("c:xMode", CT_LayoutMode)
from .coreprops import CT_CoreProperties # noqa: E402
register_element_cls("cp:coreProperties", CT_CoreProperties)
from .dml.color import ( # noqa: E402
CT_Color,
CT_HslColor,
CT_Percentage,
CT_PresetColor,
CT_SchemeColor,
CT_ScRgbColor,
CT_SRgbColor,
CT_SystemColor,
)
register_element_cls("a:bgClr", CT_Color)
register_element_cls("a:fgClr", CT_Color)
register_element_cls("a:hslClr", CT_HslColor)
register_element_cls("a:lumMod", CT_Percentage)
register_element_cls("a:lumOff", CT_Percentage)
register_element_cls("a:prstClr", CT_PresetColor)
register_element_cls("a:schemeClr", CT_SchemeColor)
register_element_cls("a:scrgbClr", CT_ScRgbColor)
register_element_cls("a:srgbClr", CT_SRgbColor)
register_element_cls("a:sysClr", CT_SystemColor)
from .dml.fill import ( # noqa: E402
CT_Blip,
CT_BlipFillProperties,
CT_GradientFillProperties,
CT_GradientStop,
CT_GradientStopList,
CT_GroupFillProperties,
CT_LinearShadeProperties,
CT_NoFillProperties,
CT_PatternFillProperties,
CT_RelativeRect,
CT_SolidColorFillProperties,
)
register_element_cls("a:blip", CT_Blip)
register_element_cls("a:blipFill", CT_BlipFillProperties)
register_element_cls("a:gradFill", CT_GradientFillProperties)
register_element_cls("a:grpFill", CT_GroupFillProperties)
register_element_cls("a:gs", CT_GradientStop)
register_element_cls("a:gsLst", CT_GradientStopList)
register_element_cls("a:lin", CT_LinearShadeProperties)
register_element_cls("a:noFill", CT_NoFillProperties)
register_element_cls("a:pattFill", CT_PatternFillProperties)
register_element_cls("a:solidFill", CT_SolidColorFillProperties)
register_element_cls("a:srcRect", CT_RelativeRect)
from .dml.line import CT_PresetLineDashProperties # noqa: E402
register_element_cls("a:prstDash", CT_PresetLineDashProperties)
from .presentation import ( # noqa: E402
CT_Presentation,
CT_SlideId,
CT_SlideIdList,
CT_SlideMasterIdList,
CT_SlideMasterIdListEntry,
CT_SlideSize,
)
register_element_cls("p:presentation", CT_Presentation)
register_element_cls("p:sldId", CT_SlideId)
register_element_cls("p:sldIdLst", CT_SlideIdList)
register_element_cls("p:sldMasterId", CT_SlideMasterIdListEntry)
register_element_cls("p:sldMasterIdLst", CT_SlideMasterIdList)
register_element_cls("p:sldSz", CT_SlideSize)
from .shapes.autoshape import ( # noqa: E402
CT_AdjPoint2D,
CT_CustomGeometry2D,
CT_GeomGuide,
CT_GeomGuideList,
CT_NonVisualDrawingShapeProps,
CT_Path2D,
CT_Path2DClose,
CT_Path2DLineTo,
CT_Path2DList,
CT_Path2DMoveTo,
CT_PresetGeometry2D,
CT_Shape,
CT_ShapeNonVisual,
)
register_element_cls("a:avLst", CT_GeomGuideList)
register_element_cls("a:custGeom", CT_CustomGeometry2D)
register_element_cls("a:gd", CT_GeomGuide)
register_element_cls("a:close", CT_Path2DClose)
register_element_cls("a:lnTo", CT_Path2DLineTo)
register_element_cls("a:moveTo", CT_Path2DMoveTo)
register_element_cls("a:path", CT_Path2D)
register_element_cls("a:pathLst", CT_Path2DList)
register_element_cls("a:prstGeom", CT_PresetGeometry2D)
register_element_cls("a:pt", CT_AdjPoint2D)
register_element_cls("p:cNvSpPr", CT_NonVisualDrawingShapeProps)
register_element_cls("p:nvSpPr", CT_ShapeNonVisual)
register_element_cls("p:sp", CT_Shape)
from .shapes.connector import ( # noqa: E402
CT_Connection,
CT_Connector,
CT_ConnectorNonVisual,
CT_NonVisualConnectorProperties,
)
register_element_cls("a:endCxn", CT_Connection)
register_element_cls("a:stCxn", CT_Connection)
register_element_cls("p:cNvCxnSpPr", CT_NonVisualConnectorProperties)
register_element_cls("p:cxnSp", CT_Connector)
register_element_cls("p:nvCxnSpPr", CT_ConnectorNonVisual)
from .shapes.graphfrm import ( # noqa: E402
CT_GraphicalObject,
CT_GraphicalObjectData,
CT_GraphicalObjectFrame,
CT_GraphicalObjectFrameNonVisual,
CT_OleObject,
)
register_element_cls("a:graphic", CT_GraphicalObject)
register_element_cls("a:graphicData", CT_GraphicalObjectData)
register_element_cls("p:graphicFrame", CT_GraphicalObjectFrame)
register_element_cls("p:nvGraphicFramePr", CT_GraphicalObjectFrameNonVisual)
register_element_cls("p:oleObj", CT_OleObject)
from .shapes.groupshape import ( # noqa: E402
CT_GroupShape,
CT_GroupShapeNonVisual,
CT_GroupShapeProperties,
)
register_element_cls("p:grpSp", CT_GroupShape)
register_element_cls("p:grpSpPr", CT_GroupShapeProperties)
register_element_cls("p:nvGrpSpPr", CT_GroupShapeNonVisual)
register_element_cls("p:spTree", CT_GroupShape)
from .shapes.picture import CT_Picture, CT_PictureNonVisual # noqa: E402
register_element_cls("p:blipFill", CT_BlipFillProperties)
register_element_cls("p:nvPicPr", CT_PictureNonVisual)
register_element_cls("p:pic", CT_Picture)
from .shapes.shared import ( # noqa: E402
CT_ApplicationNonVisualDrawingProps,
CT_LineProperties,
CT_NonVisualDrawingProps,
CT_Placeholder,
CT_Point2D,
CT_PositiveSize2D,
CT_ShapeProperties,
CT_Transform2D,
)
register_element_cls("a:chExt", CT_PositiveSize2D)
register_element_cls("a:chOff", CT_Point2D)
register_element_cls("a:ext", CT_PositiveSize2D)
register_element_cls("a:ln", CT_LineProperties)
register_element_cls("a:off", CT_Point2D)
register_element_cls("a:xfrm", CT_Transform2D)
register_element_cls("c:spPr", CT_ShapeProperties)
register_element_cls("p:cNvPr", CT_NonVisualDrawingProps)
register_element_cls("p:nvPr", CT_ApplicationNonVisualDrawingProps)
register_element_cls("p:ph", CT_Placeholder)
register_element_cls("p:spPr", CT_ShapeProperties)
register_element_cls("p:xfrm", CT_Transform2D)
from .slide import ( # noqa: E402
CT_Background,
CT_BackgroundProperties,
CT_CommonSlideData,
CT_NotesMaster,
CT_NotesSlide,
CT_Slide,
CT_SlideLayout,
CT_SlideLayoutIdList,
CT_SlideLayoutIdListEntry,
CT_SlideMaster,
CT_SlideTiming,
CT_TimeNodeList,
CT_TLMediaNodeVideo,
)
register_element_cls("p:bg", CT_Background)
register_element_cls("p:bgPr", CT_BackgroundProperties)
register_element_cls("p:childTnLst", CT_TimeNodeList)
register_element_cls("p:cSld", CT_CommonSlideData)
register_element_cls("p:notes", CT_NotesSlide)
register_element_cls("p:notesMaster", CT_NotesMaster)
register_element_cls("p:sld", CT_Slide)
register_element_cls("p:sldLayout", CT_SlideLayout)
register_element_cls("p:sldLayoutId", CT_SlideLayoutIdListEntry)
register_element_cls("p:sldLayoutIdLst", CT_SlideLayoutIdList)
register_element_cls("p:sldMaster", CT_SlideMaster)
register_element_cls("p:timing", CT_SlideTiming)
register_element_cls("p:video", CT_TLMediaNodeVideo)
from .table import ( # noqa: E402
CT_Table,
CT_TableCell,
CT_TableCellProperties,
CT_TableCol,
CT_TableGrid,
CT_TableProperties,
CT_TableRow,
)
register_element_cls("a:gridCol", CT_TableCol)
register_element_cls("a:tbl", CT_Table)
register_element_cls("a:tblGrid", CT_TableGrid)
register_element_cls("a:tblPr", CT_TableProperties)
register_element_cls("a:tc", CT_TableCell)
register_element_cls("a:tcPr", CT_TableCellProperties)
register_element_cls("a:tr", CT_TableRow)
from .text import ( # noqa: E402
CT_RegularTextRun,
CT_TextBody,
CT_TextBodyProperties,
CT_TextCharacterProperties,
CT_TextField,
CT_TextFont,
CT_TextLineBreak,
CT_TextNormalAutofit,
CT_TextParagraph,
CT_TextParagraphProperties,
CT_TextSpacing,
CT_TextSpacingPercent,
CT_TextSpacingPoint,
)
register_element_cls("a:bodyPr", CT_TextBodyProperties)
register_element_cls("a:br", CT_TextLineBreak)
register_element_cls("a:defRPr", CT_TextCharacterProperties)
register_element_cls("a:endParaRPr", CT_TextCharacterProperties)
register_element_cls("a:fld", CT_TextField)
register_element_cls("a:latin", CT_TextFont)
register_element_cls("a:lnSpc", CT_TextSpacing)
register_element_cls("a:normAutofit", CT_TextNormalAutofit)
register_element_cls("a:r", CT_RegularTextRun)
register_element_cls("a:p", CT_TextParagraph)
register_element_cls("a:pPr", CT_TextParagraphProperties)
register_element_cls("c:rich", CT_TextBody)
register_element_cls("a:rPr", CT_TextCharacterProperties)
register_element_cls("a:spcAft", CT_TextSpacing)
register_element_cls("a:spcBef", CT_TextSpacing)
register_element_cls("a:spcPct", CT_TextSpacingPercent)
register_element_cls("a:spcPts", CT_TextSpacingPoint)
register_element_cls("a:txBody", CT_TextBody)
register_element_cls("c:txPr", CT_TextBody)
register_element_cls("p:txBody", CT_TextBody)
from .theme import CT_OfficeStyleSheet # noqa: E402
register_element_cls("a:theme", CT_OfficeStyleSheet)
|
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Policy for reward prediction and boltzmann exploration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional, Text, Tuple, Sequence
import gin
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
import tensorflow_probability as tfp
from tf_agents.bandits.networks import heteroscedastic_q_network
from tf_agents.bandits.policies import constraints as constr
from tf_agents.bandits.specs import utils as bandit_spec_utils
from tf_agents.distributions import shifted_categorical
from tf_agents.policies import tf_policy
from tf_agents.policies import utils as policy_utilities
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from tf_agents.typing import types
@gin.configurable
class BoltzmannRewardPredictionPolicy(tf_policy.TFPolicy):
"""Class to build Reward Prediction Policies with Boltzmann exploration."""
def __init__(self,
time_step_spec: types.TimeStep,
action_spec: types.NestedTensorSpec,
reward_network: types.Network,
temperature: types.FloatOrReturningFloat = 1.0,
boltzmann_gumbel_exploration_constant: Optional[
types.Float] = None,
observation_and_action_constraint_splitter: Optional[
types.Splitter] = None,
accepts_per_arm_features: bool = False,
constraints: Tuple[constr.NeuralConstraint, ...] = (),
emit_policy_info: Tuple[Text, ...] = (),
num_samples_list: Sequence[tf.Variable] = (),
name: Optional[Text] = None):
"""Builds a BoltzmannRewardPredictionPolicy given a reward network.
This policy takes a tf_agents.Network predicting rewards and chooses an
action with weighted probabilities (i.e., using a softmax over the network
estimates of value for each action).
Args:
time_step_spec: A `TimeStep` spec of the expected time_steps.
action_spec: A nest of BoundedTensorSpec representing the actions.
reward_network: An instance of a `tf_agents.network.Network`,
callable via `network(observation, step_type) -> (output, final_state)`.
temperature: float or callable that returns a float. The temperature used
in the Boltzmann exploration.
boltzmann_gumbel_exploration_constant: optional positive float. When
provided, the policy implements Neural Bandit with Boltzmann-Gumbel
exploration from the paper:
N. Cesa-Bianchi et al., "Boltzmann Exploration Done Right", NIPS 2017.
observation_and_action_constraint_splitter: A function used for masking
valid/invalid actions with each state of the environment. The function
takes in a full observation and returns a tuple consisting of 1) the
part of the observation intended as input to the network and 2) the
mask. The mask should be a 0-1 `Tensor` of shape
`[batch_size, num_actions]`. This function should also work with a
`TensorSpec` as input, and should output `TensorSpec` objects for the
observation and mask.
accepts_per_arm_features: (bool) Whether the policy accepts per-arm
features.
constraints: iterable of constraints objects that are instances of
`tf_agents.bandits.agents.NeuralConstraint`.
emit_policy_info: (tuple of strings) what side information we want to get
as part of the policy info. Allowed values can be found in
`policy_utilities.PolicyInfo`.
num_samples_list: list or tuple of tf.Variable's. Used only in
Boltzmann-Gumbel exploration. Otherwise, empty.
name: The name of this policy. All variables in this module will fall
under that name. Defaults to the class name.
Raises:
NotImplementedError: If `action_spec` contains more than one
`BoundedTensorSpec` or the `BoundedTensorSpec` is not valid.
"""
policy_utilities.check_no_mask_with_arm_features(
accepts_per_arm_features, observation_and_action_constraint_splitter)
flat_action_spec = tf.nest.flatten(action_spec)
if len(flat_action_spec) > 1:
raise NotImplementedError(
'action_spec can only contain a single BoundedTensorSpec.')
self._temperature = temperature
action_spec = flat_action_spec[0]
if (not tensor_spec.is_bounded(action_spec) or
not tensor_spec.is_discrete(action_spec) or
action_spec.shape.rank > 1 or
action_spec.shape.num_elements() != 1):
raise NotImplementedError(
'action_spec must be a BoundedTensorSpec of type int32 and shape (). '
'Found {}.'.format(action_spec))
self._expected_num_actions = action_spec.maximum - action_spec.minimum + 1
self._action_offset = action_spec.minimum
reward_network.create_variables()
self._reward_network = reward_network
self._constraints = constraints
self._boltzmann_gumbel_exploration_constant = (
boltzmann_gumbel_exploration_constant)
self._num_samples_list = num_samples_list
if self._boltzmann_gumbel_exploration_constant is not None:
if self._boltzmann_gumbel_exploration_constant <= 0.0:
raise ValueError(
'The Boltzmann-Gumbel exploration constant is expected to be ',
'positive. Found: ', self._boltzmann_gumbel_exploration_constant)
if self._action_offset > 0:
raise NotImplementedError('Action offset is not supported when ',
'Boltzmann-Gumbel exploration is enabled.')
if accepts_per_arm_features:
raise NotImplementedError(
'Boltzmann-Gumbel exploration is not supported ',
'for arm features case.')
if len(self._num_samples_list) != self._expected_num_actions:
raise ValueError(
'Size of num_samples_list: ', len(self._num_samples_list),
' does not match the expected number of actions:',
self._expected_num_actions)
self._emit_policy_info = emit_policy_info
predicted_rewards_mean = ()
if policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN in emit_policy_info:
predicted_rewards_mean = tensor_spec.TensorSpec(
[self._expected_num_actions])
bandit_policy_type = ()
if policy_utilities.InfoFields.BANDIT_POLICY_TYPE in emit_policy_info:
bandit_policy_type = (
policy_utilities.create_bandit_policy_type_tensor_spec(shape=[1]))
if accepts_per_arm_features:
# The features for the chosen arm is saved to policy_info.
chosen_arm_features_info = (
policy_utilities.create_chosen_arm_features_info_spec(
time_step_spec.observation))
info_spec = policy_utilities.PerArmPolicyInfo(
predicted_rewards_mean=predicted_rewards_mean,
bandit_policy_type=bandit_policy_type,
chosen_arm_features=chosen_arm_features_info)
else:
info_spec = policy_utilities.PolicyInfo(
predicted_rewards_mean=predicted_rewards_mean,
bandit_policy_type=bandit_policy_type)
self._accepts_per_arm_features = accepts_per_arm_features
super(BoltzmannRewardPredictionPolicy, self).__init__(
time_step_spec, action_spec,
policy_state_spec=reward_network.state_spec,
clip=False,
info_spec=info_spec,
emit_log_probability='log_probability' in emit_policy_info,
observation_and_action_constraint_splitter=(
observation_and_action_constraint_splitter),
name=name)
@property
def accepts_per_arm_features(self):
return self._accepts_per_arm_features
def _variables(self):
policy_variables = self._reward_network.variables
for c in self._constraints:
policy_variables.append(c.variables)
return policy_variables
def _get_temperature_value(self):
if callable(self._temperature):
return self._temperature()
return self._temperature
def _distribution(self, time_step, policy_state):
observation = time_step.observation
if self.observation_and_action_constraint_splitter is not None:
observation, _ = self.observation_and_action_constraint_splitter(
observation)
predictions, policy_state = self._reward_network(
observation, time_step.step_type, policy_state)
batch_size = tf.shape(predictions)[0]
if isinstance(self._reward_network,
heteroscedastic_q_network.HeteroscedasticQNetwork):
predicted_reward_values = predictions.q_value_logits
else:
predicted_reward_values = predictions
predicted_reward_values.shape.with_rank_at_least(2)
predicted_reward_values.shape.with_rank_at_most(3)
if predicted_reward_values.shape[
-1] is not None and predicted_reward_values.shape[
-1] != self._expected_num_actions:
raise ValueError(
'The number of actions ({}) does not match the reward_network output'
' size ({}).'.format(self._expected_num_actions,
predicted_reward_values.shape[1]))
mask = constr.construct_mask_from_multiple_sources(
time_step.observation, self._observation_and_action_constraint_splitter,
self._constraints, self._expected_num_actions)
if self._boltzmann_gumbel_exploration_constant is not None:
logits = predicted_reward_values
# Apply masking if needed. Overwrite the logits for invalid actions to
# logits.dtype.min.
if mask is not None:
almost_neg_inf = tf.constant(logits.dtype.min, dtype=logits.dtype)
logits = tf.compat.v2.where(
tf.cast(mask, tf.bool), logits, almost_neg_inf)
gumbel_dist = tfp.distributions.Gumbel(loc=0., scale=1.)
gumbel_samples = gumbel_dist.sample(tf.shape(logits))
num_samples_list_float = tf.stack(
[tf.cast(x.read_value(), tf.float32) for x in self._num_samples_list],
axis=-1)
exploration_weights = tf.math.divide_no_nan(
self._boltzmann_gumbel_exploration_constant,
tf.sqrt(num_samples_list_float))
final_logits = logits + exploration_weights * gumbel_samples
actions = tf.cast(
tf.math.argmax(final_logits, axis=1), self._action_spec.dtype)
# Log probability is not available in closed form. We treat this as a
# deterministic policy at the moment.
log_probability = tf.zeros([batch_size], tf.float32)
else:
# Apply the temperature scaling, needed for Boltzmann exploration.
logits = predicted_reward_values / self._get_temperature_value()
# Apply masking if needed. Overwrite the logits for invalid actions to
# logits.dtype.min.
if mask is not None:
almost_neg_inf = tf.constant(logits.dtype.min, dtype=logits.dtype)
logits = tf.compat.v2.where(
tf.cast(mask, tf.bool), logits, almost_neg_inf)
if self._action_offset != 0:
distribution = shifted_categorical.ShiftedCategorical(
logits=logits,
dtype=self._action_spec.dtype,
shift=self._action_offset)
else:
distribution = tfp.distributions.Categorical(
logits=logits,
dtype=self._action_spec.dtype)
actions = distribution.sample()
log_probability = distribution.log_prob(actions)
bandit_policy_values = tf.fill([batch_size, 1],
policy_utilities.BanditPolicyType.BOLTZMANN)
if self._accepts_per_arm_features:
# Saving the features for the chosen action to the policy_info.
def gather_observation(obs):
return tf.gather(params=obs, indices=actions, batch_dims=1)
chosen_arm_features = tf.nest.map_structure(
gather_observation,
observation[bandit_spec_utils.PER_ARM_FEATURE_KEY])
policy_info = policy_utilities.PerArmPolicyInfo(
log_probability=log_probability if
policy_utilities.InfoFields.LOG_PROBABILITY in self._emit_policy_info
else (),
predicted_rewards_mean=(
predicted_reward_values if policy_utilities.InfoFields
.PREDICTED_REWARDS_MEAN in self._emit_policy_info else ()),
bandit_policy_type=(bandit_policy_values
if policy_utilities.InfoFields.BANDIT_POLICY_TYPE
in self._emit_policy_info else ()),
chosen_arm_features=chosen_arm_features)
else:
policy_info = policy_utilities.PolicyInfo(
log_probability=log_probability if
policy_utilities.InfoFields.LOG_PROBABILITY in self._emit_policy_info
else (),
predicted_rewards_mean=(
predicted_reward_values if policy_utilities.InfoFields
.PREDICTED_REWARDS_MEAN in self._emit_policy_info else ()),
bandit_policy_type=(bandit_policy_values
if policy_utilities.InfoFields.BANDIT_POLICY_TYPE
in self._emit_policy_info else ()))
return policy_step.PolicyStep(
tfp.distributions.Deterministic(loc=actions), policy_state, policy_info)
|
|
# coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to visualize the unified scores.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import matplotlib
matplotlib.use("Agg") # Set headless-friendly backend.
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow.compat.v1 as tf
def heat_square(matrix, output_dir, name, xlabel, ylabel, max_val=None,
factor_names=None):
"""Plot values of a matrix.
Each entry is represented as a square of increasing size and different color.
Args:
matrix: Matrix of values to plot. Values should be in range [0, max_val].
output_dir: Where to save the image.
name: File name.
xlabel: Name of the x axis of the matrix.
ylabel: Name of the y axis of the matrix.
max_val: Maximum value acceptable in the matrix. If None, the max_val will
be set as the maximum value in the matrix.
factor_names: Names of the factors of variation.
"""
sns.set_context("notebook", font_scale=1.5, rc={"lines.linewidth": 2})
sns.set_style("whitegrid")
fig, _ = plt.subplots()
plot_grid = plt.GridSpec(1, 15, hspace=0.2, wspace=1.2)
ax = plt.subplot(plot_grid[:, :-1])
if max_val is None:
max_val = np.max(matrix)
if max_val == 0:
max_val = 1.
else:
if max_val < np.max(matrix):
raise ValueError("The matrix has maximum value larger than max_val")
palette = sns.color_palette("Blues", 256)
# Estimates the area of the squares: the length of the edge is
# roughly: length of the grid in inches * how many points per inch - space for
# the axis names times * 14/15 as the last 1/15 part of the figure is occupied
# by the colorbar legend.
size_scale = ((((ax.get_position().xmax - ax.get_position().xmin) *
fig.get_size_inches()[0] * fig.get_dpi() - 40) * 14/15*0.8) /
(matrix.shape[0]))**2
plot_matrix_squares(matrix, max_val, palette, size_scale, ax)
plt.xticks(range(matrix.shape[0]))
if factor_names is not None:
plt.yticks(range(matrix.shape[1]), factor_names)
else:
plt.yticks(range(matrix.shape[1]))
plt.xlabel(xlabel)
plt.ylabel(ylabel)
# Add color legend on the right side of the plot.
ax = plt.subplot(plot_grid[:, -1])
plot_bar_palette(palette, max_val, ax)
if not tf.gfile.IsDirectory(output_dir):
tf.gfile.MakeDirs(output_dir)
output_path = os.path.join(output_dir, "{}.png".format(name))
with tf.gfile.Open(output_path, "wb") as path:
fig.savefig(path, bbox_inches="tight")
def plot_matrix_squares(matrix, max_val, palette, size_scale, ax):
"""Grid of squares where the size is proportional to the matrix values.
Args:
matrix: Matrix of values to plot.
max_val: Maximum value that is allowed in the matrix.
palette: Color palette.
size_scale: Maximum size of the squares.
ax: Axis of the subplot.
"""
tmp = pd.melt(pd.DataFrame(matrix).reset_index(), id_vars="index")
# The columns of the dataframe are: index, variable and value.
def to_color(val):
ind = int(val/max_val*255)
return palette[ind]
ax.scatter(x=tmp["index"], y=tmp["variable"],
s=size_scale * tmp["value"]/max_val, marker="s",
c=tmp["value"].apply(to_color))
ax.set_xticks([v+0.5 for v in range(matrix.shape[0])], minor=True)
ax.set_yticks([v+0.5 for v in range(matrix.shape[1])], minor=True)
ax.grid(False, "major")
ax.grid(True, "minor")
ax.set_xlim([-0.5, matrix.shape[0] - 0.5])
ax.set_ylim([-0.5, matrix.shape[1] - 0.5])
ax.tick_params(right=False, top=False, left=False, bottom=False)
ax.set_aspect(aspect=1.)
def plot_bar_palette(palette, max_val, ax):
"""Plot color bar legend."""
col_x = [0]*len(palette)
bar_y = np.linspace(0, max_val, 256, ax)
bar_height = bar_y[1] - bar_y[0]
ax.barh(bar_y, np.array([5]*len(palette)), height=bar_height, left=col_x,
align="center", color=palette, linewidth=0)
ax.set_xlim(1, 2)
ax.set_ylim(0, max_val)
ax.grid(False)
ax.set_xticks([])
ax.set_yticks(np.linspace(0, max_val, 3))
ax.yaxis.tick_right()
def plot_recovery_vs_independent(matrix, output_dir, name):
"""Plot how many factors are recovered and in how many independent groups.
Plot how many factors of variation are independently captured in a
representation at different thresholds. It takes as input a matrix
relating factors of variation and latent dimensions, sort the elements and
then plot for each threshold (1) how many factors are discovered and (2)
how many factors are encoded independently in the representation.
Args:
matrix: Contains statistical relations between factors of variation and
latent codes.
output_dir: Output directory where to save the plot.
name: Filename of the plot.
"""
thresholds = np.sort(matrix.flatten())[::-1]
precisions = [precision(matrix, x) for x in thresholds]
recalls = [recall(matrix, x) for x in thresholds]
sns.set_context("notebook", font_scale=1.5, rc={"lines.linewidth": 2})
sns.set_style("whitegrid")
fig, ax = plt.subplots()
palette = sns.color_palette()
plt.plot(range(thresholds.shape[0]), precisions, label="Independent groups",
color=palette[0], linewidth=3)
plt.plot(range(thresholds.shape[0]), recalls, "--", label="Discovered",
color=palette[1], linewidth=3)
thresholds_ids = range(0, thresholds.shape[0], 10)
plt.xticks(thresholds_ids, np.around(thresholds[thresholds_ids], 2))
ax.set_ylim([0, matrix.shape[0]*1.1])
ax.tick_params(right=False, top=False, left=False, bottom=False)
ax.set_yticks(np.linspace(0, matrix.shape[0], matrix.shape[0]+1))
plt.legend(loc="upper center", bbox_to_anchor=(0.5, 1.25), ncol=2)
plt.xlabel("Threshold")
plt.ylabel("Number of Factors")
if not tf.gfile.IsDirectory(output_dir):
tf.gfile.MakeDirs(output_dir)
output_path = os.path.join(output_dir, name+".png")
with tf.gfile.Open(output_path, "wb") as path:
fig.savefig(path, bbox_inches="tight")
def precision(matrix, th):
"""How many independent components are discovered for a given threshold.
Args:
matrix: Adjacency matrix of shape (num_codes, num_factors) encoding the
statistical relations between factors and codes.
th: Eliminate all edges smaller than this threshold.
Returns:
Number of connected components.
"""
tmp = matrix.copy()
tmp[tmp < th] = 0
factors = np.zeros(tmp.shape[0])
codes = np.zeros(tmp.shape[1])
cc = 0
for i in range(len(factors)):
if factors[i] == 0:
to_visit = [(i, 0)]
factors, codes, size = bfs(tmp, to_visit, factors, codes, 1)
if size > 1:
cc += 1
return cc
def recall(matrix, th):
"""How many factors are discovered for a given threshold.
Counts as many factors of variation are captured in the representation.
First, we remove all edges in the adjacency matrix with weight smaller than
the threshold. Then, we count how many factors are connected to some codes.
Args:
matrix: Adjacency matrix for the graph.
th: Eliminate all edges smaller than this threshold.
Returns:
Number of discovered factors of variation for the given threshold.
"""
tmp = matrix.copy()
tmp[tmp < th] = 0
return np.sum(np.sum(tmp, axis=1) != 0)
def bfs(matrix, to_visit, factors, codes, size):
"""Traverse the matrix across connected components.
Implements breadth first search on an adjacency matrix. In our case, the
adjacency matrix encodes the statistical relations between factors of
variation and codes. This is used to traverse the adjacency matrix and
discover whether a factor is captured in multiple codes and whether there is a
path in the graph connecting two factors.
Args:
matrix: Adjacency matrix for the graph.
to_visit: Queue with the nodes to visit. We index the factors and codes in
the adjacency matrix and implement the queue with an array containing the
nodes that need to be visited.
factors: Array of shape (num_factors, ) with flags marking whether factors
of variation are visited.
codes: Array of shape (num_codes, ) with flags marking whether codes are
visited.
size: Count how many node are in the same connected component.
Returns:
factors: Array of shape (num_factors, ) with flags marking whether factors
of variation are visited.
codes: Array of shape (num_codes, ) with flags marking whether codes are
visited.
size: How many nodes were visited.
"""
(current_node, flag) = to_visit.pop()
if flag == 0:
factors[current_node] = 1
for i in range(len(matrix[current_node, :])):
if matrix[current_node, i] != 0:
if codes[i] == 0:
to_visit.append((i, 1))
size += 1
factors, codes, size = bfs(matrix, to_visit, factors, codes, size)
else:
codes[current_node] = 1
for i in range(len(matrix[:, current_node])):
if matrix[i, current_node] != 0:
if factors[i] == 0:
to_visit.append((i, 0))
size += 1
factors, codes, size = bfs(matrix, to_visit, factors, codes, size)
return factors, codes, size
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from openstack_dashboard.api import neutron
neutronclient = neutron.neutronclient
class Vip(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron load balancer vip"""
def __init__(self, apiresource):
super(Vip, self).__init__(apiresource)
class Pool(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron load balancer pool"""
def __init__(self, apiresource):
if 'provider' not in apiresource:
apiresource['provider'] = None
super(Pool, self).__init__(apiresource)
class AttributeDict(dict):
def __getattr__(self, attr):
return self[attr]
def __setattr__(self, attr, value):
self[attr] = value
def readable(self, request):
pFormatted = {'id': self.id,
'name': self.name,
'description': self.description,
'protocol': self.protocol,
'health_monitors': self.health_monitors,
'provider': self.provider}
try:
pFormatted['subnet_id'] = self.subnet_id
pFormatted['subnet_name'] = neutron.subnet_get(
request, self.subnet_id).cidr
except Exception:
pFormatted['subnet_id'] = self.subnet_id
pFormatted['subnet_name'] = self.subnet_id
if self.vip_id is not None:
try:
pFormatted['vip_id'] = self.vip_id
pFormatted['vip_name'] = vip_get(
request, self.vip_id).name
except Exception:
pFormatted['vip_id'] = self.vip_id
pFormatted['vip_name'] = self.vip_id
else:
pFormatted['vip_id'] = None
pFormatted['vip_name'] = None
return self.AttributeDict(pFormatted)
class Member(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron load balancer member"""
def __init__(self, apiresource):
super(Member, self).__init__(apiresource)
class AttributeDict(dict):
def __getattr__(self, attr):
return self[attr]
def __setattr__(self, attr, value):
self[attr] = value
def readable(self, request):
mFormatted = {'id': self.id,
'address': self.address,
'protocol_port': self.protocol_port}
try:
mFormatted['pool_id'] = self.pool_id
mFormatted['pool_name'] = pool_get(
request, self.pool_id).name
except Exception:
mFormatted['pool_id'] = self.pool_id
mFormatted['pool_name'] = self.pool_id
return self.AttributeDict(mFormatted)
class PoolStats(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron load balancer pool stats"""
def __init__(self, apiresource):
super(PoolStats, self).__init__(apiresource)
class PoolMonitor(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron load balancer pool health monitor"""
def __init__(self, apiresource):
super(PoolMonitor, self).__init__(apiresource)
def vip_create(request, **kwargs):
"""Create a vip for a specified pool.
:param request: request context
:param address: virtual IP address
:param name: name for vip
:param description: description for vip
:param subnet_id: subnet_id for subnet of vip
:param protocol_port: transport layer port number for vip
:returns: Vip object
"""
body = {'vip': {'name': kwargs['name'],
'description': kwargs['description'],
'subnet_id': kwargs['subnet_id'],
'protocol_port': kwargs['protocol_port'],
'protocol': kwargs['protocol'],
'pool_id': kwargs['pool_id'],
'session_persistence': kwargs['session_persistence'],
'admin_state_up': kwargs['admin_state_up']
}}
if kwargs.get('connection_limit'):
body['vip']['connection_limit'] = kwargs['connection_limit']
if kwargs.get('address'):
body['vip']['address'] = kwargs['address']
vip = neutronclient(request).create_vip(body).get('vip')
return Vip(vip)
def vips_get(request, **kwargs):
vips = neutronclient(request).list_vips().get('vips')
return [Vip(v) for v in vips]
def vip_get(request, vip_id):
vip = neutronclient(request).show_vip(vip_id).get('vip')
return Vip(vip)
def vip_update(request, vip_id, **kwargs):
vip = neutronclient(request).update_vip(vip_id, kwargs).get('vip')
return Vip(vip)
def vip_delete(request, vip_id):
neutronclient(request).delete_vip(vip_id)
def pool_create(request, **kwargs):
"""Create a pool for specified protocol
:param request: request context
:param name: name for pool
:param description: description for pool
:param subnet_id: subnet_id for subnet of pool
:param protocol: load balanced protocol
:param lb_method: load balancer method
:param admin_state_up: admin state (default on)
"""
body = {'pool': {'name': kwargs['name'],
'description': kwargs['description'],
'subnet_id': kwargs['subnet_id'],
'protocol': kwargs['protocol'],
'lb_method': kwargs['lb_method'],
'admin_state_up': kwargs['admin_state_up'],
'provider': kwargs['provider'],
}}
pool = neutronclient(request).create_pool(body).get('pool')
return Pool(pool)
def pools_get(request, **kwargs):
pools = neutronclient(request).list_pools().get('pools')
return [Pool(p) for p in pools]
def pool_get(request, pool_id):
pool = neutronclient(request).show_pool(pool_id).get('pool')
return Pool(pool)
def pool_update(request, pool_id, **kwargs):
pool = neutronclient(request).update_pool(pool_id, kwargs).get('pool')
return Pool(pool)
def pool_delete(request, pool):
neutronclient(request).delete_pool(pool)
# not linked to UI yet
def pool_stats(request, pool_id, **kwargs):
stats = neutronclient(request).retrieve_pool_stats(pool_id, **kwargs)
return PoolStats(stats)
def pool_health_monitor_create(request, **kwargs):
"""Create a health monitor
:param request: request context
:param type: type of monitor
:param delay: delay of monitor
:param timeout: timeout of monitor
:param max_retries: max retries [1..10]
:param http_method: http method
:param url_path: url path
:param expected_codes: http return code
:param admin_state_up: admin state
"""
monitor_type = kwargs['type'].upper()
body = {'health_monitor': {'type': monitor_type,
'delay': kwargs['delay'],
'timeout': kwargs['timeout'],
'max_retries': kwargs['max_retries'],
'admin_state_up': kwargs['admin_state_up']
}}
if monitor_type in ['HTTP', 'HTTPS']:
body['health_monitor']['http_method'] = kwargs['http_method']
body['health_monitor']['url_path'] = kwargs['url_path']
body['health_monitor']['expected_codes'] = kwargs['expected_codes']
mon = neutronclient(request).create_health_monitor(body).get(
'health_monitor')
return PoolMonitor(mon)
def pool_health_monitors_get(request, **kwargs):
monitors = neutronclient(request
).list_health_monitors().get('health_monitors')
return [PoolMonitor(m) for m in monitors]
def pool_health_monitor_get(request, monitor_id):
monitor = neutronclient(request
).show_health_monitor(monitor_id
).get('health_monitor')
return PoolMonitor(monitor)
def pool_health_monitor_update(request, monitor_id, **kwargs):
monitor = neutronclient(request).update_health_monitor(monitor_id, kwargs)
return PoolMonitor(monitor)
def pool_health_monitor_delete(request, mon_id):
neutronclient(request).delete_health_monitor(mon_id)
def member_create(request, **kwargs):
"""Create a load balance member
:param request: request context
:param pool_id: pool_id of pool for member
:param address: IP address
:param protocol_port: transport layer port number
:param weight: weight for member
:param admin_state_up: admin_state
"""
body = {'member': {'pool_id': kwargs['pool_id'],
'address': kwargs['address'],
'protocol_port': kwargs['protocol_port'],
'admin_state_up': kwargs['admin_state_up']
}}
if kwargs.get('weight'):
body['member']['weight'] = kwargs['weight']
member = neutronclient(request).create_member(body).get('member')
return Member(member)
def members_get(request, **kwargs):
members = neutronclient(request).list_members().get('members')
return [Member(m) for m in members]
def member_get(request, member_id):
member = neutronclient(request).show_member(member_id).get('member')
return Member(member)
def member_update(request, member_id, **kwargs):
member = neutronclient(request).update_member(member_id, kwargs)
return Member(member)
def member_delete(request, mem_id):
neutronclient(request).delete_member(mem_id)
def pool_monitor_association_create(request, **kwargs):
"""Associate a health monitor with pool
:param request: request context
:param monitor_id: id of monitor
:param pool_id: id of pool
"""
body = {'health_monitor': {'id': kwargs['monitor_id'], }}
neutronclient(request).associate_health_monitor(
kwargs['pool_id'], body)
def pool_monitor_association_delete(request, **kwargs):
"""Disassociate a health monitor from pool
:param request: request context
:param monitor_id: id of monitor
:param pool_id: id of pool
"""
neutronclient(request).disassociate_health_monitor(
kwargs['pool_id'], kwargs['monitor_id'])
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Red Hat, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests copying images to a Glance API server which uses a filesystem-
based storage backend.
"""
import hashlib
import httplib2
import json
import tempfile
import time
from glance.tests import functional
from glance.tests.functional.store_utils import (setup_http,
get_http_uri)
from glance.tests.utils import skip_if_disabled, requires
FIVE_KB = 5 * 1024
class TestCopyToFile(functional.FunctionalTest):
"""
Functional tests for copying images from the HTTP storage
backend to file
"""
def _do_test_copy_from(self, from_store, get_uri):
"""
Ensure we can copy from an external image in from_store.
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
setup_http(self)
# POST /images with public image to be stored in from_store,
# to stand in for the 'external' image
image_data = "*" * FIVE_KB
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'external',
'X-Image-Meta-Store': from_store,
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True'}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(response.status, 201, content)
data = json.loads(content)
original_image_id = data['image']['id']
copy_from = get_uri(self, original_image_id)
# POST /images with public image copied from_store (to file)
headers = {'X-Image-Meta-Name': 'copied',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True',
'X-Glance-API-Copy-From': copy_from}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers)
self.assertEqual(response.status, 201, content)
data = json.loads(content)
copy_image_id = data['image']['id']
self.assertNotEqual(copy_image_id, original_image_id)
# GET image and make sure image content is as expected
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
copy_image_id)
def _await_status(expected_status):
for i in xrange(100):
time.sleep(0.01)
http = httplib2.Http()
response, content = http.request(path, 'HEAD')
self.assertEqual(response.status, 200)
if response['x-image-meta-status'] == expected_status:
return
self.fail('unexpected image status %s' %
response['x-image-meta-status'])
_await_status('active')
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
self.assertEqual(response['content-length'], str(FIVE_KB))
self.assertEqual(content, "*" * FIVE_KB)
self.assertEqual(hashlib.md5(content).hexdigest(),
hashlib.md5("*" * FIVE_KB).hexdigest())
self.assertEqual(data['image']['size'], FIVE_KB)
self.assertEqual(data['image']['name'], "copied")
# DELETE original image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
original_image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
# GET image again to make sure the existence of the original
# image in from_store is not depended on
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
copy_image_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
self.assertEqual(response['content-length'], str(FIVE_KB))
self.assertEqual(content, "*" * FIVE_KB)
self.assertEqual(hashlib.md5(content).hexdigest(),
hashlib.md5("*" * FIVE_KB).hexdigest())
self.assertEqual(data['image']['size'], FIVE_KB)
self.assertEqual(data['image']['name'], "copied")
# DELETE copied image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
copy_image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
self.stop_servers()
@skip_if_disabled
def test_copy_from_http_store(self):
"""
Ensure we can copy from an external image in HTTP store.
"""
self._do_test_copy_from('file', get_http_uri)
@skip_if_disabled
def _do_test_copy_from_http(self, exists):
"""
Ensure we can copy from an external image in HTTP.
:param exists: True iff the external source image exists
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
setup_http(self)
uri = get_http_uri(self, 'foobar')
copy_from = uri if exists else uri.replace('images', 'snafu')
# POST /images with public image copied from HTTP (to file)
headers = {'X-Image-Meta-Name': 'copied',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True',
'X-Glance-API-Copy-From': copy_from}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers)
self.assertEqual(response.status, 201, content)
data = json.loads(content)
copy_image_id = data['image']['id']
self.assertEqual(data['image']['status'], 'queued', content)
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
copy_image_id)
def _await_status(expected_status):
for i in xrange(100):
time.sleep(0.01)
http = httplib2.Http()
response, content = http.request(path, 'HEAD')
self.assertEqual(response.status, 200)
if response['x-image-meta-status'] == expected_status:
return
self.fail('unexpected image status %s' %
response['x-image-meta-status'])
_await_status('active' if exists else 'killed')
# GET image and make sure image content is as expected
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200 if exists else 404)
if exists:
self.assertEqual(response['content-length'], str(FIVE_KB))
self.assertEqual(content, "*" * FIVE_KB)
self.assertEqual(hashlib.md5(content).hexdigest(),
hashlib.md5("*" * FIVE_KB).hexdigest())
# DELETE copied image
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
self.stop_servers()
@skip_if_disabled
def test_copy_from_http_exists(self):
self._do_test_copy_from_http(True)
@skip_if_disabled
def test_copy_from_http_nonexistent(self):
self._do_test_copy_from_http(False)
@skip_if_disabled
def test_copy_from_file(self):
"""
Ensure we can't copy from file
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
with tempfile.NamedTemporaryFile() as image_file:
image_file.write("XXX")
image_file.flush()
copy_from = 'file://' + image_file.name
# POST /images with public image copied from file (to file)
headers = {'X-Image-Meta-Name': 'copied',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True',
'X-Glance-API-Copy-From': copy_from}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers)
self.assertEqual(response.status, 400, content)
expected = 'External sourcing not supported for store ' + copy_from
msg = 'expected "%s" in "%s"' % (expected, content)
self.assertTrue(expected in content, msg)
self.stop_servers()
|
|
""" Cisco_IOS_XR_ha_eem_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR ha\-eem package operational data.
This module contains definitions
for the following management objects\:
system\-monitoring\: Processes operational data
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class SystemMonitoring(object):
"""
Processes operational data
.. attribute:: cpu_utilization
Processes CPU utilization information
**type**\: list of :py:class:`CpuUtilization <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_oper.SystemMonitoring.CpuUtilization>`
"""
_prefix = 'ha-eem-oper'
_revision = '2015-01-07'
def __init__(self):
self.cpu_utilization = YList()
self.cpu_utilization.parent = self
self.cpu_utilization.name = 'cpu_utilization'
class CpuUtilization(object):
"""
Processes CPU utilization information
.. attribute:: node_name <key>
Node name
**type**\: str
**pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+)
.. attribute:: process_cpu
Per process CPU utilization
**type**\: list of :py:class:`ProcessCpu <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_oper.SystemMonitoring.CpuUtilization.ProcessCpu>`
.. attribute:: total_cpu_fifteen_minute
Total CPU utilization in past 15 minute
**type**\: int
**range:** 0..4294967295
.. attribute:: total_cpu_five_minute
Total CPU utilization in past 5 minute
**type**\: int
**range:** 0..4294967295
.. attribute:: total_cpu_one_minute
Total CPU utilization in past 1 minute
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'ha-eem-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.node_name = None
self.process_cpu = YList()
self.process_cpu.parent = self
self.process_cpu.name = 'process_cpu'
self.total_cpu_fifteen_minute = None
self.total_cpu_five_minute = None
self.total_cpu_one_minute = None
class ProcessCpu(object):
"""
Per process CPU utilization
.. attribute:: process_cpu_fifteen_minute
Process CPU utilization in percent for past 15 minute
**type**\: int
**range:** 0..4294967295
.. attribute:: process_cpu_five_minute
Process CPU utilization in percent for past 5 minute
**type**\: int
**range:** 0..4294967295
.. attribute:: process_cpu_one_minute
Process CPU utilization in percent for past 1 minute
**type**\: int
**range:** 0..4294967295
.. attribute:: process_id
Process ID
**type**\: int
**range:** 0..4294967295
.. attribute:: process_name
Process name
**type**\: str
"""
_prefix = 'ha-eem-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.process_cpu_fifteen_minute = None
self.process_cpu_five_minute = None
self.process_cpu_one_minute = None
self.process_id = None
self.process_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ha-eem-oper:process-cpu'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.process_cpu_fifteen_minute is not None:
return True
if self.process_cpu_five_minute is not None:
return True
if self.process_cpu_one_minute is not None:
return True
if self.process_id is not None:
return True
if self.process_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ha_eem_oper as meta
return meta._meta_table['SystemMonitoring.CpuUtilization.ProcessCpu']['meta_info']
@property
def _common_path(self):
if self.node_name is None:
raise YPYModelError('Key property node_name is None')
return '/Cisco-IOS-XR-ha-eem-oper:system-monitoring/Cisco-IOS-XR-ha-eem-oper:cpu-utilization[Cisco-IOS-XR-ha-eem-oper:node-name = ' + str(self.node_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.node_name is not None:
return True
if self.process_cpu is not None:
for child_ref in self.process_cpu:
if child_ref._has_data():
return True
if self.total_cpu_fifteen_minute is not None:
return True
if self.total_cpu_five_minute is not None:
return True
if self.total_cpu_one_minute is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ha_eem_oper as meta
return meta._meta_table['SystemMonitoring.CpuUtilization']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ha-eem-oper:system-monitoring'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.cpu_utilization is not None:
for child_ref in self.cpu_utilization:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ha_eem_oper as meta
return meta._meta_table['SystemMonitoring']['meta_info']
|
|
#!/usr/bin/env python
'''
brozzler-easy - brozzler-worker, warcprox, pywb, and brozzler-dashboard all
working together in a single process
Copyright (C) 2016 Internet Archive
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys
import logging
try:
import warcprox
import warcprox.main
import pywb
import brozzler.pywb
import wsgiref.simple_server
import wsgiref.handlers
import brozzler.dashboard
except ImportError as e:
logging.critical(
'%s: %s\n\nYou might need to run "pip install '
'brozzler[easy]".\nSee README.rst for more information.',
type(e).__name__, e)
sys.exit(1)
import argparse
import brozzler
import brozzler.cli
import os
import socket
import signal
import threading
import time
import doublethink
import traceback
import socketserver
def _build_arg_parser(argv=None):
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
formatter_class=brozzler.cli.BetterArgumentDefaultsHelpFormatter,
prog=os.path.basename(argv[0]), description=(
'brozzler-easy - easy deployment of brozzler, with '
'brozzler-worker, warcprox, pywb, and brozzler-dashboard all '
'running in a single process'))
# common args
brozzler.cli.add_rethinkdb_options(arg_parser)
arg_parser.add_argument(
'-d', '--warcs-dir', dest='warcs_dir', default='./warcs',
help='where to write warcs')
# warcprox args
arg_parser.add_argument(
'-c', '--cacert', dest='cacert',
default='./%s-warcprox-ca.pem' % socket.gethostname(),
help=(
'warcprox CA certificate file; if file does not exist, it '
'will be created'))
arg_parser.add_argument(
'--certs-dir', dest='certs_dir',
default='./%s-warcprox-ca' % socket.gethostname(),
help='where warcprox will store and load generated certificates')
arg_parser.add_argument(
'--onion-tor-socks-proxy', dest='onion_tor_socks_proxy',
default=None, help=(
'host:port of tor socks proxy, used only to connect to '
'.onion sites'))
# brozzler-worker args
arg_parser.add_argument(
'-e', '--chrome-exe', dest='chrome_exe',
default=brozzler.cli.suggest_default_chrome_exe(),
help='executable to use to invoke chrome')
arg_parser.add_argument(
'-n', '--max-browsers', dest='max_browsers',
type=int, default=1, help=(
'max number of chrome instances simultaneously '
'browsing pages'))
# pywb args
arg_parser.add_argument(
'--pywb-address', dest='pywb_address',
default='0.0.0.0',
help='pywb wayback address to listen on')
arg_parser.add_argument(
'--pywb-port', dest='pywb_port', type=int,
default=8880, help='pywb wayback port')
# dashboard args
arg_parser.add_argument(
'--dashboard-address', dest='dashboard_address',
default='localhost',
help='brozzler dashboard address to listen on')
arg_parser.add_argument(
'--dashboard-port', dest='dashboard_port',
type=int, default=8881, help='brozzler dashboard port')
# common at the bottom args
brozzler.cli.add_common_options(arg_parser, argv)
return arg_parser
class ThreadingWSGIServer(
socketserver.ThreadingMixIn, wsgiref.simple_server.WSGIServer):
pass
class BrozzlerEasyController:
logger = logging.getLogger(__module__ + "." + __qualname__)
def __init__(self, args):
self.stop = threading.Event()
self.args = args
self.warcprox_controller = warcprox.main.init_controller(
self._warcprox_args(args))
self.brozzler_worker = self._init_brozzler_worker(args)
self.pywb_httpd = self._init_pywb(args)
self.dashboard_httpd = self._init_brozzler_dashboard(args)
def _init_brozzler_dashboard(self, args):
return wsgiref.simple_server.make_server(
args.dashboard_address, args.dashboard_port,
brozzler.dashboard.app, ThreadingWSGIServer)
def _init_brozzler_worker(self, args):
rr = doublethink.Rethinker(
args.rethinkdb_servers.split(","), args.rethinkdb_db)
frontier = brozzler.RethinkDbFrontier(rr)
service_registry = doublethink.ServiceRegistry(rr)
worker = brozzler.worker.BrozzlerWorker(
frontier, service_registry, chrome_exe=args.chrome_exe,
proxy='%s:%s' % self.warcprox_controller.proxy.server_address,
max_browsers=args.max_browsers)
return worker
def _init_pywb(self, args):
brozzler.pywb.TheGoodUrlCanonicalizer.replace_default_canonicalizer()
brozzler.pywb.TheGoodUrlCanonicalizer.monkey_patch_dsrules_init()
brozzler.pywb.support_in_progress_warcs()
brozzler.pywb.monkey_patch_wburl()
brozzler.pywb.monkey_patch_fuzzy_query()
brozzler.pywb.monkey_patch_calc_search_range()
if args.warcs_dir.endswith('/'):
warcs_dir = args.warcs_dir
else:
warcs_dir = args.warcs_dir + '/'
conf = {
'collections': {
'brozzler': {
'index_paths': brozzler.pywb.RethinkCDXSource(
servers=args.rethinkdb_servers.split(","),
db=args.rethinkdb_db, table='captures')
},
},
# 'enable_http_proxy': True,
# 'enable_memento': True,
'archive_paths': warcs_dir,
'enable_cdx_api': True,
'framed_replay': True,
'port': args.pywb_port,
'enable_auto_colls': False,
}
wsgi_app = pywb.framework.wsgi_wrappers.init_app(
pywb.webapp.pywb_init.create_wb_router, config=conf,
load_yaml=False)
# disable is_hop_by_hop restrictions
wsgiref.handlers.is_hop_by_hop = lambda x: False
return wsgiref.simple_server.make_server(
args.pywb_address, args.pywb_port, wsgi_app,
ThreadingWSGIServer)
def start(self):
self.logger.info('starting warcprox')
self.warcprox_controller.start()
# XXX wait til fully started?
self.logger.info('starting brozzler-worker')
self.brozzler_worker.start()
self.logger.info(
'starting pywb at %s:%s', *self.pywb_httpd.server_address)
threading.Thread(target=self.pywb_httpd.serve_forever).start()
self.logger.info(
'starting brozzler-dashboard at %s:%s',
*self.dashboard_httpd.server_address)
threading.Thread(target=self.dashboard_httpd.serve_forever).start()
def shutdown(self):
self.logger.info('shutting down brozzler-dashboard')
self.dashboard_httpd.shutdown()
self.logger.info('shutting down brozzler-worker')
self.brozzler_worker.shutdown_now()
# brozzler-worker is fully shut down at this point
self.logger.info('shutting down pywb')
self.pywb_httpd.shutdown()
self.logger.info('shutting down warcprox')
self.warcprox_controller.shutdown()
def wait_for_shutdown_request(self):
try:
while not self.stop.is_set():
time.sleep(0.5)
finally:
self.shutdown()
def _warcprox_args(self, args):
'''
Takes args as produced by the argument parser built by
_build_arg_parser and builds warcprox arguments object suitable to pass
to warcprox.main.init_controller. Copies some arguments, renames some,
populates some with defaults appropriate for brozzler-easy, etc.
'''
warcprox_args = argparse.Namespace()
warcprox_args.address = 'localhost'
# let the OS choose an available port; discover it later using
# sock.getsockname()[1]
warcprox_args.port = 0
warcprox_args.cacert = args.cacert
warcprox_args.certs_dir = args.certs_dir
warcprox_args.directory = args.warcs_dir
warcprox_args.gzip = True
warcprox_args.prefix = 'brozzler'
warcprox_args.size = 1000 * 1000* 1000
warcprox_args.rollover_idle_time = 3 * 60
warcprox_args.digest_algorithm = 'sha1'
warcprox_args.base32 = True
warcprox_args.stats_db_file = None
warcprox_args.playback_port = None
warcprox_args.playback_index_db_file = None
warcprox_args.rethinkdb_servers = args.rethinkdb_servers
warcprox_args.rethinkdb_db = args.rethinkdb_db
warcprox_args.rethinkdb_big_table = True
warcprox_args.kafka_broker_list = None
warcprox_args.kafka_capture_feed_topic = None
warcprox_args.queue_size = 500
warcprox_args.max_threads = None
warcprox_args.profile = False
warcprox_args.onion_tor_socks_proxy = args.onion_tor_socks_proxy
return warcprox_args
def dump_state(self, signum=None, frame=None):
state_strs = []
for th in threading.enumerate():
state_strs.append(str(th))
stack = traceback.format_stack(sys._current_frames()[th.ident])
state_strs.append(''.join(stack))
logging.warn('dumping state (caught signal {})\n{}'.format(
signum, '\n'.join(state_strs)))
def main(argv=None):
argv = argv or sys.argv
arg_parser = _build_arg_parser(argv)
args = arg_parser.parse_args(args=argv[1:])
brozzler.cli.configure_logging(args)
controller = BrozzlerEasyController(args)
signal.signal(signal.SIGTERM, lambda a,b: controller.stop.set())
signal.signal(signal.SIGINT, lambda a,b: controller.stop.set())
signal.signal(signal.SIGQUIT, controller.dump_state)
controller.start()
controller.wait_for_shutdown_request()
|
|
# -*- coding: utf-8 -*-
from django.db import models, migrations
import coop_cms.settings
import coop_cms.models
import django.utils.timezone
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Alias',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('path', models.CharField(max_length=200)),
('redirect_url', models.CharField(default=b'', max_length=200, blank=True)),
],
options={
'verbose_name': 'Alias',
'verbose_name_plural': 'Aliases',
},
),
migrations.CreateModel(
name='ArticleCategory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, verbose_name='name')),
('slug', django_extensions.db.fields.AutoSlugField(editable=False, populate_from='name', max_length=100, blank=True, unique=True)),
('ordering', models.IntegerField(default=0, verbose_name='ordering')),
('in_rss', models.BooleanField(default=False, help_text='The articles of this category will be listed in the main rss feed', verbose_name='in rss')),
('sites', models.ManyToManyField(default=[1], to='sites.Site', verbose_name='site')),
],
options={
'verbose_name': 'article category',
'verbose_name_plural': 'article categories',
},
),
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', django_extensions.db.fields.CreationDateTimeField(default=django.utils.timezone.now, verbose_name='created', editable=False, blank=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(default=django.utils.timezone.now, verbose_name='modified', editable=False, blank=True)),
('name', models.CharField(default=b'', max_length=200, verbose_name='name', blank=True)),
('ordering', models.IntegerField(default=100, verbose_name='ordering')),
('file', models.FileField(upload_to=coop_cms.models.get_doc_folder, verbose_name='file')),
('is_private', models.BooleanField(default=False, help_text='Check this if you do not want to publish this document to all users', verbose_name='is private')),
('category', models.ForeignKey(default=None, blank=True, to='coop_cms.ArticleCategory', null=True, verbose_name='category', on_delete=models.CASCADE)),
],
options={
'verbose_name': 'document',
'verbose_name_plural': 'documents',
},
),
migrations.CreateModel(
name='Fragment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, verbose_name='name', db_index=True)),
('css_class', models.CharField(default='', max_length=100, verbose_name='CSS class', blank=True)),
('position', models.IntegerField(default=0, verbose_name='position')),
('content', models.TextField(default='', verbose_name='content', blank=True)),
],
options={
'ordering': ('position', 'id'),
'verbose_name': 'Fragment',
'verbose_name_plural': 'Fragment',
},
),
migrations.CreateModel(
name='FragmentFilter',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('extra_id', models.CharField(max_length=100, verbose_name='extra_id', db_index=True)),
],
options={
'verbose_name': 'Fragment filter',
'verbose_name_plural': 'Fragment filters',
},
),
migrations.CreateModel(
name='FragmentType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, verbose_name='name', db_index=True)),
('allowed_css_classes', models.CharField(default=b'', help_text='the css classed proposed when editing a fragment. It must be separated by comas', max_length=200, verbose_name='allowed css classes')),
],
options={
'verbose_name': 'Fragment type',
'verbose_name_plural': 'Fragment types',
},
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', django_extensions.db.fields.CreationDateTimeField(default=django.utils.timezone.now, verbose_name='created', editable=False, blank=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(default=django.utils.timezone.now, verbose_name='modified', editable=False, blank=True)),
('name', models.CharField(default=b'', max_length=200, verbose_name='name', blank=True)),
('ordering', models.IntegerField(default=100, verbose_name='ordering')),
('file', models.ImageField(upload_to=coop_cms.settings.get_img_folder, verbose_name='file')),
],
options={
'verbose_name': 'image',
'verbose_name_plural': 'images',
},
),
migrations.CreateModel(
name='ImageSize',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, verbose_name='name')),
('size', models.CharField(max_length=100, verbose_name='size')),
('crop', models.CharField(default=b'', max_length=100, verbose_name='crop', blank=True)),
],
options={
'verbose_name': 'Image size',
'verbose_name_plural': 'Image sizes',
},
),
migrations.CreateModel(
name='Link',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', django_extensions.db.fields.CreationDateTimeField(default=django.utils.timezone.now, verbose_name='created', editable=False, blank=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(default=django.utils.timezone.now, verbose_name='modified', editable=False, blank=True)),
('title', models.CharField(default='title', max_length=200, verbose_name='Title')),
('url', models.CharField(max_length=200, verbose_name='URL')),
],
options={
'verbose_name': 'link',
'verbose_name_plural': 'links',
},
),
migrations.CreateModel(
name='MediaFilter',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, verbose_name='name')),
],
options={
'verbose_name': 'media filter',
'verbose_name_plural': 'media filters',
},
),
migrations.CreateModel(
name='NavNode',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('label', models.CharField(max_length=200, verbose_name='label')),
('ordering', models.PositiveIntegerField(default=0, verbose_name='ordering')),
('object_id', models.PositiveIntegerField(null=True, verbose_name='object id', blank=True)),
('in_navigation', models.BooleanField(default=True, verbose_name='in navigation')),
('content_type', models.ForeignKey(verbose_name='content_type', blank=True, to='contenttypes.ContentType', null=True, on_delete=models.CASCADE)),
('parent', models.ForeignKey(default=0, blank=True, to='coop_cms.NavNode', null=True, verbose_name='parent', on_delete=models.CASCADE)),
],
options={
'verbose_name': 'navigation node',
'verbose_name_plural': 'navigation nodes',
},
),
migrations.CreateModel(
name='NavTree',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('last_update', models.DateTimeField(auto_now=True)),
('name', models.CharField(default=b'default', unique=True, max_length=100, verbose_name='name', db_index=True)),
],
options={
'abstract': False,
'verbose_name': 'Navigation tree',
'verbose_name_plural': 'Navigation trees',
},
),
migrations.CreateModel(
name='NavType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('search_field', models.CharField(default=b'', max_length=200, verbose_name='search field', blank=True)),
('label_rule', models.IntegerField(default=0, verbose_name='How to generate the label', choices=[(0, 'Use object unicode'), (1, 'Use search field'), (2, 'Use get_label')])),
('content_type', models.OneToOneField(verbose_name='django model', to='contenttypes.ContentType', on_delete=models.CASCADE)),
],
options={
'verbose_name': 'navigable type',
'verbose_name_plural': 'navigable types',
},
),
migrations.CreateModel(
name='Newsletter',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('subject', models.CharField(default=b'', max_length=200, verbose_name='subject', blank=True)),
('content', models.TextField(default=b'<br>', verbose_name='content', blank=True)),
('template', models.CharField(default=b'', max_length=200, verbose_name='template', blank=True)),
('source_url', models.URLField(default=b'', verbose_name='source url', blank=True)),
('is_public', models.BooleanField(default=False, verbose_name='is_public')),
],
options={
'verbose_name': 'newsletter',
'verbose_name_plural': 'newsletters',
},
),
migrations.CreateModel(
name='NewsletterItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField(verbose_name='object id')),
('ordering', models.IntegerField(default=0, verbose_name='ordering')),
('content_type', models.ForeignKey(verbose_name='content_type', to='contenttypes.ContentType', on_delete=models.CASCADE)),
],
options={
'ordering': ['ordering'],
'verbose_name': 'newsletter item',
'verbose_name_plural': 'newsletter items',
},
),
migrations.CreateModel(
name='NewsletterSending',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('scheduling_dt', models.DateTimeField(default=None, null=True, verbose_name='scheduling date', blank=True)),
('sending_dt', models.DateTimeField(default=None, null=True, verbose_name='sending date', blank=True)),
('newsletter', models.ForeignKey(to='coop_cms.Newsletter', on_delete=models.CASCADE)),
],
options={
'verbose_name': 'newsletter sending',
'verbose_name_plural': 'newsletter sendings',
},
),
migrations.CreateModel(
name='PieceOfHtml',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('div_id', models.CharField(max_length=100, verbose_name='identifier', db_index=True)),
('content', models.TextField(default=b'', verbose_name='content', blank=True)),
('extra_id', models.CharField(default=b'', max_length=100, verbose_name='extra identifier', db_index=True, blank=True)),
],
options={
'verbose_name': 'piece of HTML',
'verbose_name_plural': 'pieces of HTML',
},
),
migrations.CreateModel(
name='SiteSettings',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('homepage_url', models.CharField(default=b'', help_text='if set, the homepage will be redirected to the given URL', max_length=256, verbose_name='homepage URL', blank=True)),
('sitemap_mode', models.IntegerField(default=1, choices=[(1, 'Only site articles'), (2, 'All articles')])),
('site', models.OneToOneField(verbose_name='site settings', to='sites.Site', on_delete=models.CASCADE)),
],
options={
'ordering': ('site__id',),
'verbose_name': 'Sites settings',
'verbose_name_plural': 'Site settings',
},
),
migrations.AddField(
model_name='newsletter',
name='items',
field=models.ManyToManyField(to='coop_cms.NewsletterItem', blank=True),
),
migrations.AddField(
model_name='newsletter',
name='site',
field=models.ForeignKey(default=1, verbose_name='site', to='sites.Site', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='navtree',
name='types',
field=models.ManyToManyField(related_name='coop_cms_navtree_set', to='coop_cms.NavType', blank=True),
),
migrations.AddField(
model_name='navnode',
name='tree',
field=models.ForeignKey(verbose_name='tree', to='coop_cms.NavTree', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='image',
name='filters',
field=models.ManyToManyField(default=None, to='coop_cms.MediaFilter', verbose_name='filters', blank=True),
),
migrations.AddField(
model_name='image',
name='size',
field=models.ForeignKey(default=None, blank=True, to='coop_cms.ImageSize', null=True, verbose_name='size', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='fragment',
name='filter',
field=models.ForeignKey(default=None, blank=True, to='coop_cms.FragmentFilter', null=True, verbose_name='fragment filter', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='fragment',
name='type',
field=models.ForeignKey(verbose_name='fragment type', to='coop_cms.FragmentType', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='document',
name='filters',
field=models.ManyToManyField(default=None, to='coop_cms.MediaFilter', verbose_name='filters', blank=True),
),
migrations.AlterUniqueTogether(
name='newsletteritem',
unique_together=set([('content_type', 'object_id')]),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.